From 9b69fa369df296b18be65eebec33b356be803b6a Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 27 Aug 2019 14:35:04 +0200 Subject: [PATCH 01/26] added axes scales --- src/uncertainpy/plotting/plot_uncertainty.py | 97 +++++++++++++++++--- 1 file changed, 83 insertions(+), 14 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 77f6b511..7b55b24d 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -142,6 +142,10 @@ def evaluations(self, feature=None, foldername="", **plot_kwargs): foldername : str, optional Name of folder where to save all plots. The folder is created if it does not exist. Default folder is named "featurename_evaluations". + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -241,7 +245,7 @@ def evaluations_0d(self, feature=None, foldername="", **plot_kwargs): reset_style() - def evaluations_1d(self, feature=None, foldername="", **plot_kwargs): + def evaluations_1d(self, feature=None, foldername="", xscale='linear', yscale='linear', **plot_kwargs): """ Plot all 1D evaluations for a specific model/feature. @@ -309,6 +313,8 @@ def evaluations_1d(self, feature=None, foldername="", **plot_kwargs): palette="husl", **plot_kwargs) ax.set_xlim([min(time), max(time)]) + ax.set_xscale(xscale) + ax.set_yscale(yscale) plt.tight_layout() plt.savefig(os.path.join(save_folder, "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat)) @@ -403,6 +409,8 @@ def attribute_feature_1d(self, attribute_name="mean", hardcopy=True, show=False, + xscale="linear", + yscale="linear", **plot_kwargs): """ Plot a 1 dimensional attribute for a specific model/feature. @@ -421,6 +429,10 @@ def attribute_feature_1d(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -468,6 +480,8 @@ def attribute_feature_1d(self, nr_colors=3, palette="husl", **plot_kwargs) + ax.set_xscale(xscale) + ax.set_yscale(yscale) ax.set_xlim([min(time), max(time)]) @@ -591,7 +605,7 @@ def attribute_feature_2d(self, - def mean_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): + def mean_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='linear', **plot_kwargs): """ Plot the mean for a specific 1 dimensional model/feature. @@ -603,6 +617,10 @@ def mean_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -618,11 +636,13 @@ def mean_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): attribute_name="mean", hardcopy=hardcopy, show=show, + xscale=xscale, + yscale=yscale, color=0, **plot_kwargs) - def variance_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): + def variance_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='linear', **plot_kwargs): """ Plot the variance for a specific 1 dimensional model/feature. @@ -634,6 +654,10 @@ def variance_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -649,6 +673,8 @@ def variance_1d(self, feature, hardcopy=True, show=False, **plot_kwargs): attribute_name="variance", hardcopy=hardcopy, show=show, + xscale=xscale, + yscale=yscale, color=2, **plot_kwargs) @@ -717,6 +743,8 @@ def mean_variance_1d(self, new_figure=True, hardcopy=True, show=False, + xscale='linear', + yscale='linear', **plot_kwargs): """ Plot the mean and variance for a specific 1 dimensional model/feature. @@ -730,6 +758,10 @@ def mean_variance_1d(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -776,7 +808,8 @@ def mean_variance_1d(self, palette="husl", **plot_kwargs) - + ax.set_yscale(yscale) + ax.set_xscale(xscale) colors = get_current_colormap() ax2 = ax.twinx() @@ -806,7 +839,8 @@ def mean_variance_1d(self, ax2.set_xlim([min(time), max(time)]) ax.set_xlim([min(time), max(time)]) - + ax2.set_yscale(yscale) + ax2.set_xscale(xscale) plt.tight_layout() @@ -830,6 +864,8 @@ def prediction_interval_1d(self, feature=None, hardcopy=True, show=False, + xscale='linear', + yscale='linear', **plot_kwargs): """ Plot the prediction interval for a specific 1 dimensional model/feature. @@ -843,6 +879,10 @@ def prediction_interval_1d(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -897,6 +937,8 @@ def prediction_interval_1d(self, linewidth=0) ax.set_xlim([min(time), max(time)]) + ax.set_xscale(xscale) + ax.set_yscale(yscale) plt.legend(["Mean", "90% prediction interval"], loc="best") plt.tight_layout() @@ -918,6 +960,8 @@ def sensitivity_1d(self, sensitivity="first", hardcopy=True, show=False, + xscale='linear', + yscale='linear', **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -936,6 +980,10 @@ def sensitivity_1d(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -988,6 +1036,8 @@ def sensitivity_1d(self, nr_colors=len(self.data.uncertain_parameters), **plot_kwargs) # plt.ylim([0, 1.05]) ax.set_xlim([min(time), max(time)]) + ax.set_xscale(xscale) + ax.set_yscale(yscale) plt.tight_layout() @@ -1010,6 +1060,8 @@ def sensitivity_1d_grid(self, sensitivity="first", hardcopy=True, show=False, + xscale="linear", + yscale="linear", **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -1029,6 +1081,10 @@ def sensitivity_1d_grid(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -1091,6 +1147,8 @@ def sensitivity_1d_grid(self, ax.tick_params(labelcolor="w", top=False, bottom=False, left=False, right=False) ax.set_xlabel(xlabel.capitalize(), labelpad=8) ax.set_ylabel(title.capitalize()) + ax.set_xscale(xscale) + ax.set_yscale(yscale) for i in range(0, grid_x_size*grid_y_size): nx = i % grid_x_size @@ -1141,6 +1199,8 @@ def sensitivity_1d_combined(self, sensitivity="first", hardcopy=True, show=False, + xscale="linear", + yscale="linear", **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -1159,6 +1219,10 @@ def sensitivity_1d_combined(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. **plot_kwargs, optional Matplotlib plotting arguments. @@ -1217,6 +1281,8 @@ def sensitivity_1d_combined(self, plt.ylim([0, 1.05]) plt.xlim([min(time), max(time)]) + plt.xscale(xscale) + plt.yscale(yscale) if len(self.data[feature][sensitivity]) > 4: plt.xlim([time[0], 1.3*time[-1]]) @@ -1235,7 +1301,7 @@ def sensitivity_1d_combined(self, reset_style() - def features_1d(self, sensitivity="first"): + def features_1d(self, sensitivity="first", xscale='linear', yscale='linear'): """ Plot all data for all 1 dimensional model/features. @@ -1251,6 +1317,10 @@ def features_1d(self, sensitivity="first"): order Sobol indices, while "sobol_total" and "total" are the total order Sobol indices. If None, no sensitivity is plotted. Default is "first". + xscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for x axis. + yscale: {"linear", "log", "symlog", "logit", ...}, optional + Choose scale for y axis. Raises ------ @@ -1282,16 +1352,15 @@ def features_1d(self, sensitivity="first"): for feature in self.data: if self.data.ndim(feature) == 1: - self.mean_1d(feature=feature) - self.variance_1d(feature=feature) - self.mean_variance_1d(feature=feature) - self.prediction_interval_1d(feature=feature) + self.mean_1d(feature=feature, xscale=xscale, yscale=yscale) + self.variance_1d(feature=feature, xscale=xscale, yscale=yscale) + self.mean_variance_1d(feature=feature, xscale=xscale, yscale=yscale) + self.prediction_interval_1d(feature=feature, xscale=xscale, yscale=yscale) if sensitivity in self.data[feature]: - self.sensitivity_1d(feature=feature, sensitivity=sensitivity) - self.sensitivity_1d_combined(feature=feature, sensitivity=sensitivity) - self.sensitivity_1d_grid(feature=feature, sensitivity=sensitivity) - + self.sensitivity_1d(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale) + self.sensitivity_1d_combined(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale) + self.sensitivity_1d_grid(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale) def convert_sensitivity(self, sensitivity): From 479e4868a73bb3b29fdc2ad4901b6ac042253ac6 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Sat, 5 Oct 2019 18:33:47 +0200 Subject: [PATCH 02/26] added access to samples --- src/uncertainpy/core/uncertainty_calculations.py | 4 +++- src/uncertainpy/data.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/uncertainpy/core/uncertainty_calculations.py b/src/uncertainpy/core/uncertainty_calculations.py index 09258bd6..b23102e1 100644 --- a/src/uncertainpy/core/uncertainty_calculations.py +++ b/src/uncertainpy/core/uncertainty_calculations.py @@ -1051,6 +1051,7 @@ def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4): else: U_mc[feature] = U_hat[feature](samples) + data[feature].samples = U_mc[feature] data[feature].percentile_5 = np.percentile(U_mc[feature], 5, -1) data[feature].percentile_95 = np.percentile(U_mc[feature], 95, -1) @@ -1529,6 +1530,7 @@ def monte_carlo(self, logger = get_logger(self) if (np.all(mask) or allow_incomplete) and sum(mask) > 0: + data[feature].samples = masked_evaluations data[feature].mean = np.mean(masked_evaluations, 0) data[feature].variance = np.var(masked_evaluations, 0) @@ -1701,4 +1703,4 @@ def average_sensitivity(self, data, sensitivity="sobol_first"): data[feature][sensitivity + "_average"] = np.array(total_sense) - return data \ No newline at end of file + return data diff --git a/src/uncertainpy/data.py b/src/uncertainpy/data.py index f8b43f8a..68d55f1c 100644 --- a/src/uncertainpy/data.py +++ b/src/uncertainpy/data.py @@ -696,7 +696,6 @@ def add_group(group, values, name="evaluation"): for feature in self.data: group = f.create_group(feature) - for statistical_metric in self[feature]: if statistical_metric in ["evaluations", "time"]: if is_regular(self[feature][statistical_metric]): @@ -801,6 +800,9 @@ def append_evaluations(evaluations, group): if "model ignore" in f.attrs: self.model_ignore = f.attrs["model ignore"] + if "samples" in f.attrs: + self.samples = f.attrs["samples"] + for feature in f: self.add_features(str(feature)) From 72a5c7984ebd01ef82ef2e8dd1377ccf1df0bd51 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Mon, 2 Mar 2020 22:41:09 +0100 Subject: [PATCH 03/26] make labels more flexible --- src/uncertainpy/plotting/plot_uncertainty.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 7b55b24d..67d306d5 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -59,7 +59,8 @@ def __init__(self, filename=None, folder="figures/", figureformat=".png", - logger_level="info"): + logger_level="info", + uncertain_names=[]): self._folder = None @@ -69,11 +70,15 @@ def __init__(self, self.features_in_combined_plot = 3 self.data = None + self.uncertain_names = [] self._logger_level = logger_level if filename is not None: self.load(filename) + if self.uncertain_names == []: + self.uncertain_names = [i for i in self.data.uncertain_parameters] + assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") setup_module_logger(class_instance=self, level=logger_level) @@ -1028,7 +1033,7 @@ def sensitivity_1d(self, for i in range(len(self.data[feature][sensitivity])): ax = prettyPlot(time, self.data[feature][sensitivity][i], - title=title.capitalize() + ", " + feature.replace("_", " ") + " - " + self.data.uncertain_parameters[i], + title=title.capitalize() + ", " + feature.replace("_", " ") + " - " + self.uncertain_names[i], xlabel=xlabel.capitalize(), ylabel=title.capitalize(), color=i, @@ -1125,7 +1130,7 @@ def sensitivity_1d_grid(self, else: time = self.data[feature].time - parameter_names = self.data.uncertain_parameters + parameter_names = self.uncertain_names # get size of the grid in x and y directions nr_plots = len(parameter_names) @@ -1276,7 +1281,7 @@ def sensitivity_1d_combined(self, color=i, palette="husl", nr_colors=len(self.data.uncertain_parameters), - label=self.data.uncertain_parameters[i], + label=self.uncertain_names[i], **plot_kwargs) plt.ylim([0, 1.05]) @@ -1537,7 +1542,7 @@ def feature_0d(self, location = (0.5, 1.01 + legend_width*0.095) plt.legend(legend_bars, - self.data.uncertain_parameters, + self.uncertain_names, loc="upper center", bbox_to_anchor=location, ncol=legend_size) @@ -1635,7 +1640,7 @@ def average_sensitivity(self, prettyBar(self.data[feature][sensitivity + "_average"], title="Average of " + title + ", " + feature.replace("_", " "), - xlabels=self.data.uncertain_parameters, + xlabels=self.uncertain_names, ylabel="Average of " + title, nr_colors=len(self.data.uncertain_parameters), palette="husl", @@ -1983,7 +1988,7 @@ def average_sensitivity_grid(self, prettyBar(self.data[features[i]][sensitivity + "_average"], title=features[i].replace("_", " "), - xlabels=self.data.uncertain_parameters, + xlabels=self.uncertain_names, nr_colors=len(self.data.uncertain_parameters), index=index, palette="husl", From 0e4f8f0964bfd563692b356dbb92c85c7bfb72f7 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 3 Mar 2020 15:35:12 +0100 Subject: [PATCH 04/26] fixed bug and remove unphysical capitalize --- src/uncertainpy/plotting/plot_uncertainty.py | 60 ++++++++++---------- 1 file changed, 29 insertions(+), 31 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 67d306d5..b6c3d6bf 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -1,6 +1,5 @@ from __future__ import absolute_import, division, print_function, unicode_literals -import glob import os import matplotlib.pyplot as plt @@ -8,10 +7,9 @@ from .prettyplot import prettyPlot, prettyBar from .prettyplot import spines_color, get_current_colormap -from .prettyplot import get_colormap_tableu20, set_style, get_colormap, reset_style +from .prettyplot import set_style, get_colormap, reset_style from .prettyplot import axis_grey, labelsize, fontsize, titlesize, linewidth -import seaborn as sns from ..data import Data from ..utils.logger import setup_module_logger, get_logger @@ -21,7 +19,6 @@ # TODO Change the use of **plot_kwargs to use a dict for specific plotting commands? - class PlotUncertainty(object): """ Plotting the results from the uncertainty quantification and sensitivity @@ -70,19 +67,21 @@ def __init__(self, self.features_in_combined_plot = 3 self.data = None - self.uncertain_names = [] + self.uncertain_names = uncertain_names self._logger_level = logger_level if filename is not None: self.load(filename) - if self.uncertain_names == []: - self.uncertain_names = [i for i in self.data.uncertain_parameters] - assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") setup_module_logger(class_instance=self, level=logger_level) + logger = get_logger(self) - + if not self.uncertain_names: + logger.warning("no uncertain names passed for labels - will use parameter names") + self.uncertain_names = [i for i in self.data.uncertain_parameters] + assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") + logger.info("uncertain names are: {}".format(" ".join(self.uncertain_names))) def load(self, filename): """ @@ -96,7 +95,6 @@ def load(self, filename): self.data = Data(filename, logger_level=self._logger_level) - @property def folder(self): """ @@ -311,8 +309,8 @@ def evaluations_1d(self, feature=None, foldername="", xscale='linear', yscale='l time = self.data[feature].time[i] ax = prettyPlot(time, evaluation, - xlabel=xlabel.capitalize(), - ylabel=ylabel.capitalize(), + xlabel=xlabel, + ylabel=ylabel, title="{}, evaluation {:d}".format(feature.replace("_", " "), i), new_figure=True, palette="husl", @@ -396,8 +394,8 @@ def evaluations_2d(self, feature=None, foldername="", **plot_kwargs): cbar = fig.colorbar(iax) cbar.ax.set_ylabel(zlabel) - ax.set_xlabel(xlabel.capitalize()) - ax.set_ylabel(ylabel.capitalize()) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) plt.tight_layout() plt.savefig(os.path.join(save_folder, "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat)) @@ -481,7 +479,7 @@ def attribute_feature_1d(self, title = feature + ", " + attribute_name ax = prettyPlot(time, self.data[feature][attribute], - title.replace("_", " "), xlabel.capitalize(), ylabel.capitalize(), + title.replace("_", " "), xlabel, ylabel, nr_colors=3, palette="husl", **plot_kwargs) @@ -590,8 +588,8 @@ def attribute_feature_2d(self, # cbar.ax.set_title(zlabel) cbar.ax.set_ylabel(zlabel) - ax.set_xlabel(xlabel.capitalize(), fontsize=labelsize) - ax.set_ylabel(ylabel.capitalize(), fontsize=labelsize) + ax.set_xlabel(xlabel, fontsize=labelsize) + ax.set_ylabel(ylabel, fontsize=labelsize) save_name = feature + "_" + attribute_name @@ -807,7 +805,7 @@ def mean_variance_1d(self, style="seaborn-white" title = feature + ", mean and variance" ax = prettyPlot(time, self.data[feature].mean, - title.replace("_", " "), xlabel.capitalize(), ylabel.capitalize() + ", mean", + title.replace("_", " "), xlabel, ylabel + ", mean", style=style, nr_colors=3, palette="husl", @@ -825,7 +823,7 @@ def mean_variance_1d(self, "right": colors[color_2], "left": "None"}) ax2.tick_params(axis="y", which="both", right=False, left=False, labelright=True, color=colors[color_2], labelcolor=colors[color_2], labelsize=labelsize) - ax2.set_ylabel(ylabel.capitalize() + r"$^2$, variance", color=colors[color_2], fontsize=labelsize) + ax2.set_ylabel(ylabel + r"$^2$, variance", color=colors[color_2], fontsize=labelsize) # ax2.set_ylim([min(self.data.variance[feature]), max(self.data.variance[feature])]) @@ -928,7 +926,7 @@ def prediction_interval_1d(self, title = feature.replace("_", " ") + ", 90% prediction interval" ax = prettyPlot(time, self.data[feature].mean, title=title, - xlabel=xlabel.capitalize(), ylabel=ylabel.capitalize(), + xlabel=xlabel, ylabel=ylabel, color=0, nr_colors=3, palette="husl", @@ -1033,9 +1031,9 @@ def sensitivity_1d(self, for i in range(len(self.data[feature][sensitivity])): ax = prettyPlot(time, self.data[feature][sensitivity][i], - title=title.capitalize() + ", " + feature.replace("_", " ") + " - " + self.uncertain_names[i], - xlabel=xlabel.capitalize(), - ylabel=title.capitalize(), + title=title + ", " + feature.replace("_", " ") + " - " + self.uncertain_names[i], + xlabel=xlabel, + ylabel=title, color=i, palette="husl", nr_colors=len(self.data.uncertain_parameters), **plot_kwargs) @@ -1150,8 +1148,8 @@ def sensitivity_1d_grid(self, spines_color(ax, edges={"top": "None", "bottom": "None", "right": "None", "left": "None"}) ax.tick_params(labelcolor="w", top=False, bottom=False, left=False, right=False) - ax.set_xlabel(xlabel.capitalize(), labelpad=8) - ax.set_ylabel(title.capitalize()) + ax.set_xlabel(xlabel, labelpad=8) + ax.set_ylabel(title) ax.set_xscale(xscale) ax.set_yscale(yscale) @@ -1180,7 +1178,7 @@ def sensitivity_1d_grid(self, else: ax.set_axis_off() - title = title.capitalize() + ", " + feature.replace("_", " ") + title = title + ", " + feature.replace("_", " ") plt.suptitle(title, fontsize=titlesize) plt.tight_layout() plt.subplots_adjust(top=0.9) @@ -1274,9 +1272,9 @@ def sensitivity_1d_combined(self, for i in range(len(self.data[feature][sensitivity])): prettyPlot(time, self.data[feature][sensitivity][i], - title=title.capitalize() + ", " + feature.replace("_", " "), - xlabel=xlabel.capitalize(), - ylabel=title.capitalize(), + title=title + ", " + feature.replace("_", " "), + xlabel=xlabel, + ylabel=title, new_figure=False, color=i, palette="husl", @@ -1502,7 +1500,7 @@ def feature_0d(self, ax = prettyBar(values, index=xticks, xlabels=xlabels, - ylabel=ylabel.capitalize(), + ylabel=ylabel, palette="Paired", style="seaborn-white") @@ -1515,7 +1513,7 @@ def feature_0d(self, "right": axis_grey, "left": "None"}) ax2.tick_params(axis="y", which="both", right=True, left=False, labelright=True, color=axis_grey, labelcolor="black", labelsize=labelsize) - ax2.set_ylabel(label.capitalize(), fontsize=labelsize) + ax2.set_ylabel(label, fontsize=labelsize) ax2.set_ylim([0, 1.05]) From 77dfbf85df97bb6be4b5340e9f3ecfb828564737 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 3 Mar 2020 16:15:05 +0100 Subject: [PATCH 05/26] flake8 fixes and custom titles added to a lot of functions --- src/uncertainpy/plotting/plot_uncertainty.py | 223 ++++++++----------- 1 file changed, 97 insertions(+), 126 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index b6c3d6bf..3a8c244a 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -108,7 +108,6 @@ def folder(self): """ return self._folder - @folder.setter def folder(self, new_folder): self._folder = new_folder @@ -116,9 +115,6 @@ def folder(self, new_folder): if new_folder is not None and not os.path.isdir(new_folder): os.makedirs(new_folder) - - - def all_evaluations(self, foldername="evaluations"): """ Plot all evaluations for all model and features. @@ -132,7 +128,6 @@ def all_evaluations(self, foldername="evaluations"): for feature in self.data.data: self.evaluations(feature=feature, foldername=foldername) - def evaluations(self, feature=None, foldername="", **plot_kwargs): """ Plot all evaluations for a specific model/feature. @@ -190,8 +185,6 @@ def evaluations(self, feature=None, foldername="", **plot_kwargs): else: raise AttributeError("Dimension of evaluations is not valid: dim {}".format(dimension)) - - def evaluations_0d(self, feature=None, foldername="", **plot_kwargs): """ Plot all 0D evaluations for a specific model/feature. @@ -247,7 +240,6 @@ def evaluations_0d(self, feature=None, foldername="", **plot_kwargs): reset_style() - def evaluations_1d(self, feature=None, foldername="", xscale='linear', yscale='linear', **plot_kwargs): """ Plot all 1D evaluations for a specific model/feature. @@ -278,7 +270,7 @@ def evaluations_1d(self, feature=None, foldername="", xscale='linear', yscale='l if feature is None: feature = self.data.model_name - if feature not in self.data or "evaluations" not in self.data[feature]: + if feature not in self.data or "evaluations" not in self.data[feature]: logger.warning("No model evaluations to plot.") return @@ -298,7 +290,6 @@ def evaluations_1d(self, feature=None, foldername="", xscale='linear', yscale='l else: time = self.data[feature].time - padding = len(str(len(self.data[feature].evaluations) + 1)) for i, evaluation in enumerate(self.data[feature].evaluations): @@ -325,8 +316,6 @@ def evaluations_1d(self, feature=None, foldername="", xscale='linear', yscale='l reset_style() - - def evaluations_2d(self, feature=None, foldername="", **plot_kwargs): """ Plot all 2D evaluations for a specific model/feature. @@ -403,9 +392,6 @@ def evaluations_2d(self, feature=None, foldername="", **plot_kwargs): reset_style() - - - def attribute_feature_1d(self, feature=None, attribute="mean", @@ -414,6 +400,7 @@ def attribute_feature_1d(self, show=False, xscale="linear", yscale="linear", + title=None, **plot_kwargs): """ Plot a 1 dimensional attribute for a specific model/feature. @@ -436,6 +423,8 @@ def attribute_feature_1d(self, Choose scale for x axis. yscale: {"linear", "log", "symlog", "logit", ...}, optional Choose scale for y axis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -473,11 +462,11 @@ def attribute_feature_1d(self, else: time = self.data[feature].time - labels = self.data.get_labels(feature) xlabel, ylabel = labels - title = feature + ", " + attribute_name + if title is None: + title = feature + ", " + attribute_name ax = prettyPlot(time, self.data[feature][attribute], title.replace("_", " "), xlabel, ylabel, nr_colors=3, @@ -502,8 +491,6 @@ def attribute_feature_1d(self, reset_style() - - def attribute_feature_2d(self, feature=None, attribute="mean", @@ -555,7 +542,6 @@ def attribute_feature_2d(self, if attribute not in ["mean", "variance"]: raise ValueError("{} is not a supported attribute".format(attribute)) - if attribute not in self.data[feature]: msg = " Unable to plot {attribute_name}. {attribute_name} of {feature} does not exist." logger.warning(msg.format(attribute_name=attribute, feature=feature)) @@ -564,10 +550,8 @@ def attribute_feature_2d(self, if self.data[feature].time is None or np.all(np.isnan(self.data[feature].time)): extent = None else: - extent=[self.data[feature].time[0], self.data[feature].time[-1], - 0, self.data[feature][attribute].shape[0]] - - + extent = [self.data[feature].time[0], self.data[feature].time[-1], + 0, self.data[feature][attribute].shape[0]] title = feature + ", " + attribute_name labels = self.data.get_labels(feature) @@ -579,7 +563,6 @@ def attribute_feature_2d(self, ax = fig.add_subplot(111) ax.set_title(title.replace("_", " ")) - iax = ax.imshow(self.data[feature][attribute], cmap="viridis", aspect="auto", extent=extent, **plot_kwargs) @@ -606,9 +589,7 @@ def attribute_feature_2d(self, reset_style() - - - def mean_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='linear', **plot_kwargs): + def mean_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='linear', title=None, **plot_kwargs): """ Plot the mean for a specific 1 dimensional model/feature. @@ -624,6 +605,8 @@ def mean_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='l Choose scale for x axis. yscale: {"linear", "log", "symlog", "logit", ...}, optional Choose scale for y axis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -641,11 +624,11 @@ def mean_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='l show=show, xscale=xscale, yscale=yscale, + title=title, color=0, **plot_kwargs) - - def variance_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='linear', **plot_kwargs): + def variance_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='linear', title=None, **plot_kwargs): """ Plot the variance for a specific 1 dimensional model/feature. @@ -661,6 +644,8 @@ def variance_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscal Choose scale for x axis. yscale: {"linear", "log", "symlog", "logit", ...}, optional Choose scale for y axis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -678,6 +663,7 @@ def variance_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscal show=show, xscale=xscale, yscale=yscale, + title=title, color=2, **plot_kwargs) @@ -710,7 +696,6 @@ def mean_2d(self, feature, hardcopy=True, show=False, **plot_kwargs): show=show, **plot_kwargs) - def variance_2d(self, feature, hardcopy=True, show=False, **plot_kwargs): """ Plot the variance for a specific 2 dimensional model/feature. @@ -740,7 +725,6 @@ def variance_2d(self, feature, hardcopy=True, show=False, **plot_kwargs): show=show, **plot_kwargs) - def mean_variance_1d(self, feature=None, new_figure=True, @@ -748,6 +732,7 @@ def mean_variance_1d(self, show=False, xscale='linear', yscale='linear', + title=None, **plot_kwargs): """ Plot the mean and variance for a specific 1 dimensional model/feature. @@ -765,6 +750,8 @@ def mean_variance_1d(self, Choose scale for x axis. yscale: {"linear", "log", "symlog", "logit", ...}, optional Choose scale for y axis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -788,7 +775,7 @@ def mean_variance_1d(self, if "mean" not in self.data[feature] or "variance" not in self.data[feature]: msg = "Mean and/or variance of {feature} does not exist. ".format(feature=feature) \ - + "Unable to plot mean and variance" + + "Unable to plot mean and variance" logger.warning(msg) return @@ -797,13 +784,12 @@ def mean_variance_1d(self, else: time = self.data[feature].time - labels = self.data.get_labels(feature) xlabel, ylabel = labels - - style="seaborn-white" - title = feature + ", mean and variance" + style = "seaborn-white" + if title is None: + title = feature + ", mean and variance" ax = prettyPlot(time, self.data[feature].mean, title.replace("_", " "), xlabel, ylabel + ", mean", style=style, @@ -823,7 +809,7 @@ def mean_variance_1d(self, "right": colors[color_2], "left": "None"}) ax2.tick_params(axis="y", which="both", right=False, left=False, labelright=True, color=colors[color_2], labelcolor=colors[color_2], labelsize=labelsize) - ax2.set_ylabel(ylabel + r"$^2$, variance", color=colors[color_2], fontsize=labelsize) + ax2.set_ylabel("(" + ylabel + r")$^2$, variance", color=colors[color_2], fontsize=labelsize) # ax2.set_ylim([min(self.data.variance[feature]), max(self.data.variance[feature])]) @@ -861,12 +847,11 @@ def mean_variance_1d(self, # if not show or not hardcopy: # return ax, ax2 - - def prediction_interval_1d(self, feature=None, hardcopy=True, show=False, + title=None, xscale='linear', yscale='linear', **plot_kwargs): @@ -882,6 +867,8 @@ def prediction_interval_1d(self, If the plot should be saved to file. Default is True. show : bool, optional If the plot should be shown on screen. Default is False. + title: string, optional + Choose a customized title xscale: string, optional Choose the axis scale for the xaxis. yscale: string, optional @@ -914,17 +901,16 @@ def prediction_interval_1d(self, logger.warning(msg.format(feature=feature)) return - if self.data[feature].time is None or np.all(np.isnan(self.data[feature].time)): time = np.arange(0, len(self.data[feature].mean)) else: time = self.data[feature].time - labels = self.data.get_labels(feature) xlabel, ylabel = labels - title = feature.replace("_", " ") + ", 90% prediction interval" + if title is None: + title = feature.replace("_", " ") + ", 90% prediction interval" ax = prettyPlot(time, self.data[feature].mean, title=title, xlabel=xlabel, ylabel=ylabel, color=0, @@ -934,10 +920,10 @@ def prediction_interval_1d(self, colors = get_current_colormap() ax.fill_between(time, - self.data[feature].percentile_5, - self.data[feature].percentile_95, - alpha=0.5, color=colors[0], - linewidth=0) + self.data[feature].percentile_5, + self.data[feature].percentile_95, + alpha=0.5, color=colors[0], + linewidth=0) ax.set_xlim([min(time), max(time)]) ax.set_xscale(xscale) @@ -957,7 +943,6 @@ def prediction_interval_1d(self, reset_style() - def sensitivity_1d(self, feature=None, sensitivity="first", @@ -965,6 +950,7 @@ def sensitivity_1d(self, show=False, xscale='linear', yscale='linear', + title=None, **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -987,6 +973,8 @@ def sensitivity_1d(self, Choose the axis scale for the xaxis. yscale: string, optional Choose the axis scale for the yaxis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -1005,7 +993,7 @@ def sensitivity_1d(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) if self.data is None: raise ValueError("Datafile must be loaded.") @@ -1030,8 +1018,10 @@ def sensitivity_1d(self, xlabel, ylabel = labels for i in range(len(self.data[feature][sensitivity])): + if title is None: + title = title_tmp + ", " + feature.replace("_", " ") + " - " + self.uncertain_names[i] ax = prettyPlot(time, self.data[feature][sensitivity][i], - title=title + ", " + feature.replace("_", " ") + " - " + self.uncertain_names[i], + title=title, xlabel=xlabel, ylabel=title, color=i, @@ -1056,8 +1046,6 @@ def sensitivity_1d(self, reset_style() - - def sensitivity_1d_grid(self, feature=None, sensitivity="first", @@ -1065,6 +1053,7 @@ def sensitivity_1d_grid(self, show=False, xscale="linear", yscale="linear", + title=None, **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -1088,6 +1077,8 @@ def sensitivity_1d_grid(self, Choose the axis scale for the xaxis. yscale: string, optional Choose the axis scale for the yaxis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -1106,8 +1097,7 @@ def sensitivity_1d_grid(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) - + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) if self.data is None: raise ValueError("Datafile must be loaded.") @@ -1134,7 +1124,7 @@ def sensitivity_1d_grid(self, nr_plots = len(parameter_names) grid_size = np.ceil(np.sqrt(nr_plots)) grid_x_size = int(grid_size) - grid_y_size = int(np.ceil(nr_plots/float(grid_x_size))) + grid_y_size = int(np.ceil(nr_plots / float(grid_x_size))) set_style("seaborn-darkgrid") fig, axes = plt.subplots(nrows=grid_y_size, ncols=grid_x_size, squeeze=False, sharex="col", sharey="row") @@ -1142,6 +1132,9 @@ def sensitivity_1d_grid(self, labels = self.data.get_labels(feature) xlabel, ylabel = labels + if title is None: + title = title_tmp + # Add a larger subplot to use to set a common xlabel and ylabel set_style("seaborn-white") ax = fig.add_subplot(111, zorder=-10) @@ -1153,9 +1146,9 @@ def sensitivity_1d_grid(self, ax.set_xscale(xscale) ax.set_yscale(yscale) - for i in range(0, grid_x_size*grid_y_size): + for i in range(0, grid_x_size * grid_y_size): nx = i % grid_x_size - ny = int(np.floor(i/float(grid_x_size))) + ny = int(np.floor(i / float(grid_x_size))) ax = axes[ny][nx] @@ -1183,7 +1176,6 @@ def sensitivity_1d_grid(self, plt.tight_layout() plt.subplots_adjust(top=0.9) - if hardcopy: plt.savefig(os.path.join(self.folder, feature + "_" + sensitivity + "_grid" + self.figureformat)) @@ -1195,8 +1187,6 @@ def sensitivity_1d_grid(self, reset_style() - - def sensitivity_1d_combined(self, feature=None, sensitivity="first", @@ -1204,6 +1194,7 @@ def sensitivity_1d_combined(self, show=False, xscale="linear", yscale="linear", + title=None, **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -1226,6 +1217,8 @@ def sensitivity_1d_combined(self, Choose the axis scale for the xaxis. yscale: string, optional Choose the axis scale for the yaxis. + title: string, optional + Choose a customized title **plot_kwargs, optional Matplotlib plotting arguments. @@ -1244,7 +1237,7 @@ def sensitivity_1d_combined(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) if self.data is None: raise ValueError("Datafile must be loaded.") @@ -1265,14 +1258,15 @@ def sensitivity_1d_combined(self, else: time = self.data[feature].time - labels = self.data.get_labels(feature) xlabel, ylabel = labels for i in range(len(self.data[feature][sensitivity])): + if title is None: + title = title_tmp + ", " + feature.replace("_", " ") prettyPlot(time, self.data[feature][sensitivity][i], - title=title + ", " + feature.replace("_", " "), + title=title, xlabel=xlabel, ylabel=title, new_figure=False, @@ -1287,7 +1281,7 @@ def sensitivity_1d_combined(self, plt.xscale(xscale) plt.yscale(yscale) if len(self.data[feature][sensitivity]) > 4: - plt.xlim([time[0], 1.3*time[-1]]) + plt.xlim([time[0], 1.3 * time[-1]]) plt.legend() plt.tight_layout() @@ -1303,8 +1297,7 @@ def sensitivity_1d_combined(self, reset_style() - - def features_1d(self, sensitivity="first", xscale='linear', yscale='linear'): + def features_1d(self, sensitivity="first", xscale='linear', yscale='linear', title=None): """ Plot all data for all 1 dimensional model/features. @@ -1324,6 +1317,8 @@ def features_1d(self, sensitivity="first", xscale='linear', yscale='linear'): Choose scale for x axis. yscale: {"linear", "log", "symlog", "logit", ...}, optional Choose scale for y axis. + title: string, optional + Choose a customized title Raises ------ @@ -1355,16 +1350,15 @@ def features_1d(self, sensitivity="first", xscale='linear', yscale='linear'): for feature in self.data: if self.data.ndim(feature) == 1: - self.mean_1d(feature=feature, xscale=xscale, yscale=yscale) - self.variance_1d(feature=feature, xscale=xscale, yscale=yscale) - self.mean_variance_1d(feature=feature, xscale=xscale, yscale=yscale) - self.prediction_interval_1d(feature=feature, xscale=xscale, yscale=yscale) + self.mean_1d(feature=feature, xscale=xscale, yscale=yscale, title=title) + self.variance_1d(feature=feature, xscale=xscale, yscale=yscale, title=title) + self.mean_variance_1d(feature=feature, xscale=xscale, yscale=yscale, title=title) + self.prediction_interval_1d(feature=feature, xscale=xscale, yscale=yscale, title=title) if sensitivity in self.data[feature]: - self.sensitivity_1d(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale) - self.sensitivity_1d_combined(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale) - self.sensitivity_1d_grid(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale) - + self.sensitivity_1d(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale, title=title) + self.sensitivity_1d_combined(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale, title=title) + self.sensitivity_1d_grid(feature=feature, sensitivity=sensitivity, xscale=xscale, yscale=yscale, title=title) def convert_sensitivity(self, sensitivity): """ @@ -1400,7 +1394,6 @@ def convert_sensitivity(self, sensitivity): return sensitivity, full_text - def features_2d(self): """ Plot all implemented plots for all 2 dimensional model/features. @@ -1419,7 +1412,6 @@ def features_2d(self): self.mean_2d(feature=feature) self.variance_2d(feature=feature) - # TODO not finished, missing correct label placement # TODO test that plotting with no sensitivity works def feature_0d(self, @@ -1464,7 +1456,6 @@ def feature_0d(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total", None]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total or None, not {}".format(sensitivity)) - sensitivity, label = self.convert_sensitivity(sensitivity) if self.data is None: @@ -1476,7 +1467,7 @@ def feature_0d(self, for data_type in ["mean", "variance", "percentile_5", "percentile_95"]: if data_type not in self.data[feature]: msg = "{data_type} for {feature} does not exist. Unable to plot." - logger.warning(msg.format(data_type=data_type,feature=feature)) + logger.warning(msg.format(data_type=data_type, feature=feature)) return if len(self.data.uncertain_parameters) > max_legend_size: @@ -1484,13 +1475,13 @@ def feature_0d(self, else: legend_size = len(self.data.uncertain_parameters) - legend_width = np.ceil(len(self.data.uncertain_parameters)/float(max_legend_size)) + legend_width = np.ceil(len(self.data.uncertain_parameters) / float(max_legend_size)) width = 0.2 distance = 0.5 xlabels = ["Mean", "Variance", "$P_5$", "$P_{95}$"] - xticks = [0, width, distance + width, distance + 2*width] + xticks = [0, width, distance + width, distance + 2 * width] values = [self.data[feature].mean, self.data[feature].variance, self.data[feature].percentile_5, self.data[feature].percentile_95] @@ -1505,7 +1496,7 @@ def feature_0d(self, style="seaborn-white") if sensitivity in self.data[feature]: - pos = 2*distance + 2*width + pos = 2 * distance + 2 * width ax2 = ax.twinx() @@ -1516,29 +1507,25 @@ def feature_0d(self, ax2.set_ylabel(label, fontsize=labelsize) ax2.set_ylim([0, 1.05]) - ax2.spines["right"].set_visible(True) ax2.spines["right"].set_edgecolor(axis_grey) - i = 0 legend_bars = [] colors = get_colormap(palette="husl", nr_colors=len(self.data.uncertain_parameters)) for parameter in self.data.uncertain_parameters: - - l = ax2.bar(pos, self.data[feature][sensitivity][i], width=width, - align="center", color=colors[i], linewidth=0) - - legend_bars.append(l) + ll = ax2.bar(pos, self.data[feature][sensitivity][i], width=width, + align="center", color=colors[i], linewidth=0) + legend_bars.append(ll) i += 1 pos += width - xticks.append(pos - (i/2. + 0.5)*width) + xticks.append(pos - (i / 2. + 0.5) * width) xlabels.append(sensitivity.split("_")[0] + " " + sensitivity.split("_")[1]) - location = (0.5, 1.01 + legend_width*0.095) + location = (0.5, 1.01 + legend_width * 0.095) plt.legend(legend_bars, self.uncertain_names, loc="upper center", @@ -1548,8 +1535,7 @@ def feature_0d(self, # lgd.get_frame().set_edgecolor(axis_grey) fig = plt.gcf() - fig.subplots_adjust(top=(0.91 - legend_width*0.053)) - + fig.subplots_adjust(top=(0.91 - legend_width * 0.053)) ax.set_xticks(xticks) ax.set_xticklabels(xlabels, fontsize=labelsize, rotation=0) @@ -1558,7 +1544,6 @@ def feature_0d(self, for tick in ax.get_xticklabels()[:2]: tick.set_rotation(-25) - plt.suptitle(feature.replace("_", " "), fontsize=titlesize) if sensitivity is None or sensitivity not in self.data[feature]: @@ -1581,7 +1566,6 @@ def feature_0d(self, # return ax - def average_sensitivity(self, feature, sensitivity="first", @@ -1634,7 +1618,7 @@ def average_sensitivity(self, width = 0.2 - index = np.arange(1, len(self.data.uncertain_parameters)+1)*width + index = np.arange(1, len(self.data.uncertain_parameters) + 1) * width prettyBar(self.data[feature][sensitivity + "_average"], title="Average of " + title + ", " + feature.replace("_", " "), @@ -1645,7 +1629,6 @@ def average_sensitivity(self, index=index, style="seaborn-darkgrid") - plt.ylim([0, 1]) save_name = feature + "_" + sensitivity + "_average" + self.figureformat @@ -1662,10 +1645,10 @@ def average_sensitivity(self, reset_style() - def average_sensitivity_all(self, sensitivity="first", hardcopy=True, + title=None, show=False): """ Plot the average of the sensitivity for all model/features. @@ -1678,6 +1661,8 @@ def average_sensitivity_all(self, order Sobol indices. Default is "first". hardcopy : bool, optional If the plot should be saved to file. Default is True. + title: string, optional + Choose a customized title show : bool, optional If the plot should be shown on screen. Default is False. @@ -1695,17 +1680,16 @@ def average_sensitivity_all(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, _ = self.convert_sensitivity(sensitivity) for feature in self.data: if sensitivity + "_average" in self.data[feature]: self.average_sensitivity(feature=feature, sensitivity=sensitivity, hardcopy=hardcopy, + title=title, show=show) - def features_0d(self, sensitivity="first", hardcopy=True, show=False): """ Plot the results for all 0 dimensional model/features. @@ -1736,8 +1720,6 @@ def features_0d(self, sensitivity="first", hardcopy=True, show=False): if self.data.ndim(feature) == 0: self.feature_0d(feature, sensitivity=sensitivity, hardcopy=hardcopy, show=show) - - # # TODO Not Tested # def plot_folder(self, data_dir): # self.logger.info("Plotting all data in folder") @@ -1747,7 +1729,6 @@ def features_0d(self, sensitivity="first", hardcopy=True, show=False): # self.plot_all() - # def plot_allNoSensitivity(self, sensitivity="first"): # if self.data is None: # raise ValueError("Datafile must be loaded.") @@ -1756,7 +1737,6 @@ def features_0d(self, sensitivity="first", hardcopy=True, show=False): # self.features_1d(sensitivity=sensitivity) # self.features_0d(sensitivity=sensitivity) - def plot_all(self, sensitivity="first"): """ Plot the results for all model/features, with the chosen sensitivity. @@ -1788,8 +1768,6 @@ def plot_all(self, sensitivity="first"): self.average_sensitivity_all(sensitivity=sensitivity) self.average_sensitivity_grid(sensitivity=sensitivity) - - # TODO find a more descriptive name def plot_all_sensitivities(self): """ @@ -1816,7 +1794,6 @@ def plot_all_sensitivities(self): self.average_sensitivity_all(sensitivity="total") self.average_sensitivity_grid(sensitivity="total") - def plot_condensed(self, sensitivity="first"): """ Plot the subset of data that shows all information in the most concise @@ -1857,9 +1834,6 @@ def plot_condensed(self, sensitivity="first"): if sensitivity is not None: self.average_sensitivity_grid(sensitivity=sensitivity) - - - def plot(self, condensed=True, sensitivity="first"): """ Plot the subset of data that shows all information in the most concise @@ -1892,12 +1866,12 @@ def plot(self, condensed=True, sensitivity="first"): else: self.plot_all(sensitivity) - def average_sensitivity_grid(self, - sensitivity="first", - hardcopy=True, - show=False, - **plot_kwargs): + sensitivity="first", + hardcopy=True, + title=None, + show=False, + **plot_kwargs): """ Plot the average of the sensitivity for all model/features in their own plots in the same figure. @@ -1931,7 +1905,7 @@ def average_sensitivity_grid(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) no_sensitivity = True for feature in self.data: @@ -1947,7 +1921,7 @@ def average_sensitivity_grid(self, nr_plots = len(self.data) grid_size = np.ceil(np.sqrt(nr_plots)) grid_x_size = int(grid_size) - grid_y_size = int(np.ceil(nr_plots/float(grid_x_size))) + grid_y_size = int(np.ceil(nr_plots / float(grid_x_size))) # plt.close("all") @@ -1955,7 +1929,6 @@ def average_sensitivity_grid(self, fig, axes = plt.subplots(nrows=grid_y_size, ncols=grid_x_size, squeeze=False, sharex="col", sharey="row") set_style("seaborn-white") - # Add a larger subplot to use to set a common xlabel and ylabel ax = fig.add_subplot(111, zorder=-10) @@ -1963,19 +1936,18 @@ def average_sensitivity_grid(self, "right": "None", "left": "None"}) ax.tick_params(labelcolor="w", top=False, bottom=False, left=False, right=False) ax.set_xlabel("Parameters") - ax.set_ylabel("Average of " + title) + ax.set_ylabel("Average of " + title_tmp) width = 0.2 - index = np.arange(1, len(self.data.uncertain_parameters)+1)*width + index = np.arange(1, len(self.data.uncertain_parameters) + 1) * width features = list(self.data.keys()) - for i in range(0, grid_x_size*grid_y_size): + for i in range(0, grid_x_size * grid_y_size): nx = i % grid_x_size - ny = int(np.floor(i/float(grid_x_size))) + ny = int(np.floor(i / float(grid_x_size))) ax = axes[ny][nx] - if i < nr_plots: if sensitivity + "_average" not in self.data[features[i]]: msg = " Unable to plot {sensitivity}_average_grid. {sensitivity}_average of {feature} does not exist." @@ -1993,7 +1965,6 @@ def average_sensitivity_grid(self, ax=ax, **plot_kwargs) - for tick in ax.get_xticklabels(): tick.set_rotation(-30) @@ -2003,12 +1974,12 @@ def average_sensitivity_grid(self, else: ax.set_axis_off() - title = "Average of " + title + if title is None: + title = "Average of " + title_tmp plt.suptitle(title, fontsize=titlesize) plt.tight_layout() plt.subplots_adjust(top=0.88) - if hardcopy: plt.savefig(os.path.join(self.folder, sensitivity + "_average_grid" + self.figureformat)) From c4f318f44e3cb02e6bcca88eb553f9c17df40193 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 3 Mar 2020 17:55:11 +0100 Subject: [PATCH 06/26] fixed a few plotting bugs --- src/uncertainpy/plotting/plot_uncertainty.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 3a8c244a..fc909d9a 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -1268,7 +1268,7 @@ def sensitivity_1d_combined(self, self.data[feature][sensitivity][i], title=title, xlabel=xlabel, - ylabel=title, + ylabel=title_tmp, new_figure=False, color=i, palette="husl", @@ -1567,9 +1567,10 @@ def feature_0d(self, # return ax def average_sensitivity(self, - feature, + feature=None, sensitivity="first", hardcopy=True, + title=None, show=False): """ Plot the average of the sensitivity for a specific model/feature. @@ -1585,6 +1586,8 @@ def average_sensitivity(self, order Sobol indices. Default is "first". hardcopy : bool, optional If the plot should be saved to file. Default is True. + title: string, optional + Choose a customized title show : bool, optional If the plot should be shown on screen. Default is False. @@ -1603,11 +1606,14 @@ def average_sensitivity(self, if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) - sensitivity, title = self.convert_sensitivity(sensitivity) + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) if self.data is None: raise ValueError("Datafile must be loaded.") + if feature is None: + feature = self.data.model_name + if feature not in self.data: raise ValueError("{} is not a feature".format(feature)) @@ -1620,10 +1626,12 @@ def average_sensitivity(self, index = np.arange(1, len(self.data.uncertain_parameters) + 1) * width + if title is None: + title = "Average of " + title + ", " + feature.replace("_", " ") prettyBar(self.data[feature][sensitivity + "_average"], - title="Average of " + title + ", " + feature.replace("_", " "), + title=title, xlabels=self.uncertain_names, - ylabel="Average of " + title, + ylabel="Average of " + title_tmp, nr_colors=len(self.data.uncertain_parameters), palette="husl", index=index, From 9d475ccc1b30eb36607580041e9c5a27b214ce2e Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 3 Mar 2020 17:55:33 +0100 Subject: [PATCH 07/26] use new version to distinguish --- src/uncertainpy/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/uncertainpy/_version.py b/src/uncertainpy/_version.py index 42cf7cd5..b7aafca9 100644 --- a/src/uncertainpy/_version.py +++ b/src/uncertainpy/_version.py @@ -1 +1 @@ -__version__ = "1.2.1" \ No newline at end of file +__version__ = "1.2.1a" From 2c1a85ed0e9106a75a169267129a5af841340b3c Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 3 Mar 2020 18:43:22 +0100 Subject: [PATCH 08/26] fixed bug --- src/uncertainpy/plotting/plot_uncertainty.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index fc909d9a..753b038e 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -73,14 +73,15 @@ def __init__(self, if filename is not None: self.load(filename) + if not self.uncertain_names: + logger.warning("no uncertain names passed for labels - will use parameter names") + self.uncertain_names = [i for i in self.data.uncertain_parameters] + assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") + setup_module_logger(class_instance=self, level=logger_level) logger = get_logger(self) - if not self.uncertain_names: - logger.warning("no uncertain names passed for labels - will use parameter names") - self.uncertain_names = [i for i in self.data.uncertain_parameters] - assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") logger.info("uncertain names are: {}".format(" ".join(self.uncertain_names))) def load(self, filename): From 621017f66af91123ca0d127f65f4f1e07f20c039 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Wed, 4 Mar 2020 16:54:02 +0100 Subject: [PATCH 09/26] added plot to compare --- src/uncertainpy/plotting/plot_uncertainty.py | 141 +++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 753b038e..9ce41e76 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plt import numpy as np +import seaborn as sns from .prettyplot import prettyPlot, prettyBar from .prettyplot import spines_color, get_current_colormap from .prettyplot import set_style, get_colormap, reset_style @@ -2000,6 +2001,146 @@ def average_sensitivity_grid(self, reset_style() + def prediction_interval_sensitivity_1d(self, + feature=None, + hardcopy=True, + show=False, + sensitivity="first", + title=None, + xscale='linear', + yscale='linear', + **plot_kwargs): + """ + Plot the prediction interval for a specific 1 dimensional model/feature. + + Parameters + ---------- + feature : {None, str}, optional + The name of the model/feature. If None, the name of the model is + used. Default is None. + hardcopy : bool, optional + If the plot should be saved to file. Default is True. + show : bool, optional + If the plot should be shown on screen. Default is False. + title: string, optional + Choose a customized title + xscale: string, optional + Choose the axis scale for the xaxis. + yscale: string, optional + Choose the axis scale for the yaxis. + **plot_kwargs, optional + Matplotlib plotting arguments. + + Raises + ------ + ValueError + If a Datafile is not loaded. + ValueError + If the model/feature is not 1 dimensional. + """ + logger = get_logger(self) + + if self.data is None: + raise ValueError("Datafile must be loaded.") + + if feature is None: + feature = self.data.model_name + + if self.data.ndim(feature) != 1: + raise ValueError("{} is not a 1D feature".format(feature)) + + if "mean" not in self.data[feature] \ + or "percentile_5" not in self.data[feature] \ + or "percentile_95" not in self.data[feature]: + msg = "E, percentile_5 and/or percentile_95 of {feature} does not exist. Unable to plot prediction interval" + logger.warning(msg.format(feature=feature)) + return + + if sensitivity not in ["sobol_first", "first", "sobol_total", "total"]: + raise ValueError("Sensitivity must be either: sobol_first, first, sobol_total, total, not {}".format(sensitivity)) + + sensitivity, title_tmp = self.convert_sensitivity(sensitivity) + + if sensitivity not in self.data[feature]: + msg = "{sensitivity} of {feature} does not exist. Unable to plot {sensitivity}" + logger.warning(msg.format(sensitivity=sensitivity, feature=feature)) + return + + if self.data[feature].time is None or np.all(np.isnan(self.data[feature].time)): + time = np.arange(0, len(self.data[feature].mean)) + else: + time = self.data[feature].time + + labels = self.data.get_labels(feature) + xlabel, ylabel = labels + + # plot predicition interval + if title is None: + title = feature.replace("_", " ") + ", 90% prediction interval and sensitivities" + ax = prettyPlot(time, self.data[feature].mean, title=title, + xlabel=xlabel, ylabel=ylabel, + color=0, + nr_colors=3, + palette="husl", + **plot_kwargs) + + colors = get_current_colormap() + ax.fill_between(time, + self.data[feature].percentile_5, + self.data[feature].percentile_95, + alpha=0.5, color=colors[0], + linewidth=0) + + ax.set_xlim([min(time), max(time)]) + ax.set_xscale(xscale) + ax.set_yscale(yscale) + + # plot sensitivity + ax2 = ax.twinx() + colors = sns.color_palette("husl", n_colors=len(self.data[feature][sensitivity]) + 2) + color = 0 + color_2 = 2 + + spines_color(ax2, edges={"top": "None", "bottom": "None", + "right": colors[color_2], "left": "None"}) + ax2.grid(False) + ax2.tick_params(axis="y", which="both", right=True, left=False, labelright=True, + color=colors[color_2], labelcolor=colors[color_2], labelsize=labelsize) + ax2.set_ylabel(title_tmp, color=colors[color_2], fontsize=labelsize) + for i in range(len(self.data[feature][sensitivity])): + ax2.plot(time, self.data[feature][sensitivity][i], color=colors[i+2], + linewidth=linewidth, antialiased=True, label=self.uncertain_names[i]) + ax2.legend(loc="lower right") + ax2.yaxis.offsetText.set_fontsize(fontsize) + ax2.yaxis.offsetText.set_color(colors[color_2]) + + ax2.spines["right"].set_visible(True) + ax2.spines["right"].set_edgecolor(colors[color_2]) + + #plt.ylim([0, 1.05]) + ax.tick_params(axis="y", color=colors[color], labelcolor=colors[color]) + ax.spines["left"].set_edgecolor(colors[color]) + ax.set_ylabel(ylabel + ", mean", color=colors[color], fontsize=labelsize) + + ax2.set_xlim([min(time), max(time)]) + ax.set_xlim([min(time), max(time)]) + + ax.legend(["Mean", "90% prediction interval"], loc="best") + plt.tight_layout() + + if hardcopy: + plt.savefig(os.path.join(self.folder, + feature + "_prediction-interval_sensitivity" + self.figureformat)) + + if show: + plt.show() + else: + plt.close() + + reset_style() + + + # if __name__ == "__main__": # parser = argparse.ArgumentParser(description="Plot data") From 5aa888a20b9b703a03fb57e8a842361b9673b35f Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Thu, 5 Mar 2020 14:37:04 +0100 Subject: [PATCH 10/26] bug fix due to new uncertain_names parameter --- src/uncertainpy/plotting/plot_uncertainty.py | 11 +++++++++++ src/uncertainpy/uncertainty.py | 4 ++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 9ce41e76..6fe6f609 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -96,6 +96,17 @@ def load(self, filename): """ self.data = Data(filename, logger_level=self._logger_level) + if not self.uncertain_names: + logger.warning("no uncertain names passed for labels - will use parameter names") + self.uncertain_names = [i for i in self.data.uncertain_parameters] + assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") + + def set_data(self, data): + self.data = data + if not self.uncertain_names: + logger.warning("no uncertain names passed for labels - will use parameter names") + self.uncertain_names = [i for i in self.data.uncertain_parameters] + assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") @property def folder(self): diff --git a/src/uncertainpy/uncertainty.py b/src/uncertainpy/uncertainty.py index 58488d2a..96c65936 100644 --- a/src/uncertainpy/uncertainty.py +++ b/src/uncertainpy/uncertainty.py @@ -1323,13 +1323,13 @@ def plot(type): tmp_folder = os.path.join(folder, uncertain_parameter) self.plotting.folder = tmp_folder - self.plotting.data = self.data[uncertain_parameter] + self.plotting.set_data(self.data[uncertain_parameter]) plot(type) else: self.plotting.folder = folder - self.plotting.data = self.data + self.plotting.set_data(self.data) plot(type) From fddbec50a1f51c8fb8e886466735e5243ad784d1 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Thu, 5 Mar 2020 14:58:57 +0100 Subject: [PATCH 11/26] now all tests are passed --- src/uncertainpy/plotting/plot_uncertainty.py | 42 +++++++++++--------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 6fe6f609..727182f2 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -72,16 +72,15 @@ def __init__(self, self._logger_level = logger_level + logger = get_logger(self) if filename is not None: self.load(filename) if not self.uncertain_names: logger.warning("no uncertain names passed for labels - will use parameter names") self.uncertain_names = [i for i in self.data.uncertain_parameters] assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") - setup_module_logger(class_instance=self, level=logger_level) - logger = get_logger(self) logger.info("uncertain names are: {}".format(" ".join(self.uncertain_names))) @@ -94,6 +93,7 @@ def load(self, filename): filename : str Name of the file to load data from. """ + logger = get_logger(self) self.data = Data(filename, logger_level=self._logger_level) if not self.uncertain_names: @@ -102,6 +102,15 @@ def load(self, filename): assert (len(self.uncertain_names) == len(self.data.uncertain_parameters)), print("You must provide a name for each parameter") def set_data(self, data): + """ + Set data from dictionary. + + Parameters + ---------- + data : dict + Data dictionary + """ + logger = get_logger(self) self.data = data if not self.uncertain_names: logger.warning("no uncertain names passed for labels - will use parameter names") @@ -2013,14 +2022,14 @@ def average_sensitivity_grid(self, reset_style() def prediction_interval_sensitivity_1d(self, - feature=None, - hardcopy=True, - show=False, - sensitivity="first", - title=None, - xscale='linear', - yscale='linear', - **plot_kwargs): + feature=None, + hardcopy=True, + show=False, + sensitivity="first", + title=None, + xscale='linear', + yscale='linear', + **plot_kwargs): """ Plot the prediction interval for a specific 1 dimensional model/feature. @@ -2110,7 +2119,7 @@ def prediction_interval_sensitivity_1d(self, ax2 = ax.twinx() colors = sns.color_palette("husl", n_colors=len(self.data[feature][sensitivity]) + 2) color = 0 - color_2 = 2 + color_2 = 2 spines_color(ax2, edges={"top": "None", "bottom": "None", "right": colors[color_2], "left": "None"}) @@ -2119,16 +2128,16 @@ def prediction_interval_sensitivity_1d(self, color=colors[color_2], labelcolor=colors[color_2], labelsize=labelsize) ax2.set_ylabel(title_tmp, color=colors[color_2], fontsize=labelsize) for i in range(len(self.data[feature][sensitivity])): - ax2.plot(time, self.data[feature][sensitivity][i], color=colors[i+2], - linewidth=linewidth, antialiased=True, label=self.uncertain_names[i]) - ax2.legend(loc="lower right") + ax2.plot(time, self.data[feature][sensitivity][i], color=colors[i + 2], + linewidth=linewidth, antialiased=True, label=self.uncertain_names[i]) + ax2.legend(loc="lower right") ax2.yaxis.offsetText.set_fontsize(fontsize) ax2.yaxis.offsetText.set_color(colors[color_2]) ax2.spines["right"].set_visible(True) ax2.spines["right"].set_edgecolor(colors[color_2]) - #plt.ylim([0, 1.05]) + # plt.ylim([0, 1.05]) ax.tick_params(axis="y", color=colors[color], labelcolor=colors[color]) ax.spines["left"].set_edgecolor(colors[color]) ax.set_ylabel(ylabel + ", mean", color=colors[color], fontsize=labelsize) @@ -2150,9 +2159,6 @@ def prediction_interval_sensitivity_1d(self, reset_style() - - - # if __name__ == "__main__": # parser = argparse.ArgumentParser(description="Plot data") # parser.add_argument("-d", "--data_dir", From b7c89fb98f75059e51bd5abc33d511afa5c6d40c Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Fri, 6 Mar 2020 10:51:04 +0100 Subject: [PATCH 12/26] file name updated --- src/uncertainpy/plotting/plot_uncertainty.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 727182f2..91c46fc0 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -2150,7 +2150,7 @@ def prediction_interval_sensitivity_1d(self, if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_prediction-interval_sensitivity" + self.figureformat)) + feature + "_prediction-interval_sensitivity_" + sensitivity + self.figureformat)) if show: plt.show() From dcc11a44c27795cd6c1c61b6a02dd537afc04b35 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Thu, 12 Mar 2020 13:52:00 +0100 Subject: [PATCH 13/26] added labels --- src/uncertainpy/plotting/plot_uncertainty.py | 30 ++++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 91c46fc0..ac8a19c5 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -1,6 +1,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import os +import itertools import matplotlib.pyplot as plt import numpy as np @@ -2029,6 +2030,8 @@ def prediction_interval_sensitivity_1d(self, title=None, xscale='linear', yscale='linear', + use_markers=False, + legend_locs=None, **plot_kwargs): """ Plot the prediction interval for a specific 1 dimensional model/feature. @@ -2048,6 +2051,8 @@ def prediction_interval_sensitivity_1d(self, Choose the axis scale for the xaxis. yscale: string, optional Choose the axis scale for the yaxis. + use_markers: bool, optional + Use markers for the sensitivity data. **plot_kwargs, optional Matplotlib plotting arguments. @@ -2127,10 +2132,29 @@ def prediction_interval_sensitivity_1d(self, ax2.tick_params(axis="y", which="both", right=True, left=False, labelright=True, color=colors[color_2], labelcolor=colors[color_2], labelsize=labelsize) ax2.set_ylabel(title_tmp, color=colors[color_2], fontsize=labelsize) + + # use markers for secondary axis + marker_list = itertools.cycle(('+', 'o', '*', 'v', '<', '>', 's', 'x')) + for i in range(len(self.data[feature][sensitivity])): + if use_markers is False: + marker = None + else: + marker = next(marker_list) + ax2.plot(time, self.data[feature][sensitivity][i], color=colors[i + 2], - linewidth=linewidth, antialiased=True, label=self.uncertain_names[i]) - ax2.legend(loc="lower right") + linewidth=linewidth, antialiased=True, label=self.uncertain_names[i], + marker=marker, **plot_kwargs) + + if legend_locs is not None: + assert (len(legend_locs) == 2, "you must provide two legend locations") + loc1 = legend_locs[0] + loc2 = legend_locs[1] + else: + loc1 = "best" + loc2 = "lower right" + + ax2.legend(loc=loc2) ax2.yaxis.offsetText.set_fontsize(fontsize) ax2.yaxis.offsetText.set_color(colors[color_2]) @@ -2145,7 +2169,7 @@ def prediction_interval_sensitivity_1d(self, ax2.set_xlim([min(time), max(time)]) ax.set_xlim([min(time), max(time)]) - ax.legend(["Mean", "90% prediction interval"], loc="best") + ax.legend(["Mean", "90% prediction interval"], loc=loc1) plt.tight_layout() if hardcopy: From cfab357c0327ec6f7160b5c4ffb9d038b3b60dff Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Mon, 23 Mar 2020 22:30:20 +0100 Subject: [PATCH 14/26] fixed bug --- src/uncertainpy/plotting/plot_uncertainty.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index ac8a19c5..1d060d96 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -1650,7 +1650,7 @@ def average_sensitivity(self, index = np.arange(1, len(self.data.uncertain_parameters) + 1) * width if title is None: - title = "Average of " + title + ", " + feature.replace("_", " ") + title = "Average of " + title_tmp + ", " + feature.replace("_", " ") prettyBar(self.data[feature][sensitivity + "_average"], title=title, xlabels=self.uncertain_names, From b8457a5dbeb165f7e484694e867b0db1639baaf9 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 28 Apr 2020 20:03:53 +0200 Subject: [PATCH 15/26] changes to default plots --- src/uncertainpy/plotting/plot_uncertainty.py | 226 +++++++++++++----- .../plotting/prettyplot/prettyplot.py | 43 +++- 2 files changed, 191 insertions(+), 78 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 1d060d96..a9c4066c 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -59,7 +59,8 @@ def __init__(self, folder="figures/", figureformat=".png", logger_level="info", - uncertain_names=[]): + uncertain_names=[], + **kwargs): self._folder = None @@ -83,7 +84,21 @@ def __init__(self, setup_module_logger(class_instance=self, level=logger_level) - logger.info("uncertain names are: {}".format(" ".join(self.uncertain_names))) + font_settings = ["fontsize", "linewidth", "titlesize", "labelsize", "dpi"] + for k in font_settings: + if k in kwargs: + setattr(self, k, kwargs[k]) + else: + if k == "linewidth": + self.linewidth = linewidth + elif k == "titlesize": + self.titlesize = titlesize + elif k == "fontsize": + self.fontsize = fontsize + elif k == "labelsize": + self.labelsize = labelsize + elif k == "dpi": + self.dpi = None def load(self, filename): """ @@ -258,7 +273,7 @@ def evaluations_0d(self, feature=None, foldername="", **plot_kwargs): **plot_kwargs) plt.tight_layout() - plt.savefig(os.path.join(save_folder, "evaluations" + self.figureformat)) + plt.savefig(os.path.join(save_folder, "evaluations" + self.figureformat), dpi=self.dpi) plt.close() reset_style() @@ -334,7 +349,7 @@ def evaluations_1d(self, feature=None, foldername="", xscale='linear', yscale='l ax.set_yscale(yscale) plt.tight_layout() plt.savefig(os.path.join(save_folder, - "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat)) + "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat), dpi=self.dpi) plt.close() reset_style() @@ -377,7 +392,7 @@ def evaluations_2d(self, feature=None, foldername="", **plot_kwargs): if self.data.ndim(feature) != 2: raise ValueError("{} is not a 2 dimensional feature.".format(feature)) - set_style("seaborn-dark") + set_style("classic") save_folder = os.path.join(self.folder, foldername, feature + "_evaluations") if not os.path.isdir(save_folder): @@ -410,7 +425,7 @@ def evaluations_2d(self, feature=None, foldername="", **plot_kwargs): ax.set_ylabel(ylabel) plt.tight_layout() plt.savefig(os.path.join(save_folder, - "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat)) + "evaluation_{0:0{1}d}".format(i, padding) + self.figureformat), dpi=self.dpi) plt.close() reset_style() @@ -505,7 +520,7 @@ def attribute_feature_1d(self, if hardcopy: plt.savefig(os.path.join(self.folder, - save_name + self.figureformat)) + save_name + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -580,7 +595,7 @@ def attribute_feature_2d(self, labels = self.data.get_labels(feature) xlabel, ylabel, zlabel = labels - set_style("seaborn-dark") + set_style("classic") fig = plt.figure() ax = fig.add_subplot(111) @@ -594,8 +609,8 @@ def attribute_feature_2d(self, # cbar.ax.set_title(zlabel) cbar.ax.set_ylabel(zlabel) - ax.set_xlabel(xlabel, fontsize=labelsize) - ax.set_ylabel(ylabel, fontsize=labelsize) + ax.set_xlabel(xlabel, fontsize=self.labelsize) + ax.set_ylabel(ylabel, fontsize=self.labelsize) save_name = feature + "_" + attribute_name @@ -603,7 +618,7 @@ def attribute_feature_2d(self, if hardcopy: plt.savefig(os.path.join(self.folder, - save_name + self.figureformat)) + save_name + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -755,6 +770,7 @@ def mean_variance_1d(self, show=False, xscale='linear', yscale='linear', + color_axes=True, title=None, **plot_kwargs): """ @@ -810,7 +826,7 @@ def mean_variance_1d(self, labels = self.data.get_labels(feature) xlabel, ylabel = labels - style = "seaborn-white" + style = "classic" if title is None: title = feature + ", mean and variance" ax = prettyPlot(time, self.data[feature].mean, @@ -818,6 +834,8 @@ def mean_variance_1d(self, style=style, nr_colors=3, palette="husl", + labelfontsize=self.labelsize, + ffontsize=self.fontsize, **plot_kwargs) ax.set_yscale(yscale) @@ -828,26 +846,31 @@ def mean_variance_1d(self, color = 0 color_2 = 2 - spines_color(ax2, edges={"top": "None", "bottom": "None", + axescolor = "black" + if color_axes: + spines_color(ax2, edges={"top": "None", "bottom": "None", "right": colors[color_2], "left": "None"}) + axescolor = colors[color_2] ax2.tick_params(axis="y", which="both", right=False, left=False, labelright=True, - color=colors[color_2], labelcolor=colors[color_2], labelsize=labelsize) - ax2.set_ylabel("(" + ylabel + r")$^2$, variance", color=colors[color_2], fontsize=labelsize) + color=axescolor, labelcolor=axescolor, labelsize=self.labelsize) + ax2.set_ylabel("(" + ylabel + r")$^2$, variance", color=colors[color_2], fontsize=self.labelsize) # ax2.set_ylim([min(self.data.variance[feature]), max(self.data.variance[feature])]) ax2.plot(time, self.data[feature].variance, - color=colors[color_2], linewidth=linewidth, antialiased=True) + color=colors[color_2], linewidth=self.linewidth, antialiased=True) - ax2.yaxis.offsetText.set_fontsize(fontsize) - ax2.yaxis.offsetText.set_color(colors[color_2]) + ax2.yaxis.offsetText.set_fontsize(self.fontsize) + ax2.yaxis.offsetText.set_color(axescolor) ax2.spines["right"].set_visible(True) - ax2.spines["right"].set_edgecolor(colors[color_2]) - - ax.tick_params(axis="y", color=colors[color], labelcolor=colors[color]) - ax.spines["left"].set_edgecolor(colors[color]) - ax.set_ylabel(ylabel + ", mean", color=colors[color], fontsize=labelsize) + ax2.spines["right"].set_edgecolor(axescolor) + + if color_axes: + axescolor = colors[color] + ax.tick_params(axis="y", color=axescolor, labelcolor=axescolor) + ax.spines["left"].set_edgecolor(axescolor) + ax.set_ylabel(ylabel + ", mean", color=axescolor, fontsize=self.labelsize) ax2.set_xlim([min(time), max(time)]) ax.set_xlim([min(time), max(time)]) @@ -858,7 +881,7 @@ def mean_variance_1d(self, if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_mean-variance" + self.figureformat)) + feature + "_mean-variance" + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -877,6 +900,10 @@ def prediction_interval_1d(self, title=None, xscale='linear', yscale='linear', + xmin=None, + xmax=None, + ymin=None, + ymax=None, **plot_kwargs): """ Plot the prediction interval for a specific 1 dimensional model/feature. @@ -932,10 +959,13 @@ def prediction_interval_1d(self, labels = self.data.get_labels(feature) xlabel, ylabel = labels + plot_kwargs["label"] = "Mean" if title is None: title = feature.replace("_", " ") + ", 90% prediction interval" ax = prettyPlot(time, self.data[feature].mean, title=title, xlabel=xlabel, ylabel=ylabel, + labelfontsize=self.labelsize, + ffontsize=self.fontsize, color=0, nr_colors=3, palette="husl", @@ -946,9 +976,21 @@ def prediction_interval_1d(self, self.data[feature].percentile_5, self.data[feature].percentile_95, alpha=0.5, color=colors[0], - linewidth=0) + linewidth=0, label="90% prediction interval") + + if xmin is None: + ax.set_xlim(min(time)) + else: + ax.set_xlim(xmin) + if xmax is None: + ax.set_xlim(right=max(time)) + else: + ax.set_xlim(right=xmax) + if ymin is not None: + ax.set_ylim(ymin) + if ymax is not None: + ax.set_ylim(top=ymax) - ax.set_xlim([min(time), max(time)]) ax.set_xscale(xscale) ax.set_yscale(yscale) plt.legend(["Mean", "90% prediction interval"], loc="best") @@ -957,7 +999,7 @@ def prediction_interval_1d(self, if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_prediction-interval" + self.figureformat)) + feature + "_prediction-interval" + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -1049,6 +1091,8 @@ def sensitivity_1d(self, ylabel=title, color=i, palette="husl", + labelfontsize=self.labelsize, + ffontsize=self.fontsize, nr_colors=len(self.data.uncertain_parameters), **plot_kwargs) # plt.ylim([0, 1.05]) ax.set_xlim([min(time), max(time)]) @@ -1060,7 +1104,7 @@ def sensitivity_1d(self, if hardcopy: plt.savefig(os.path.join(self.folder, feature + "_" + sensitivity + "_" - + self.data.uncertain_parameters[i] + self.figureformat)) + + self.data.uncertain_parameters[i] + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -1149,7 +1193,7 @@ def sensitivity_1d_grid(self, grid_x_size = int(grid_size) grid_y_size = int(np.ceil(nr_plots / float(grid_x_size))) - set_style("seaborn-darkgrid") + set_style("classic") fig, axes = plt.subplots(nrows=grid_y_size, ncols=grid_x_size, squeeze=False, sharex="col", sharey="row") labels = self.data.get_labels(feature) @@ -1159,7 +1203,7 @@ def sensitivity_1d_grid(self, title = title_tmp # Add a larger subplot to use to set a common xlabel and ylabel - set_style("seaborn-white") + set_style("classic") ax = fig.add_subplot(111, zorder=-10) spines_color(ax, edges={"top": "None", "bottom": "None", "right": "None", "left": "None"}) @@ -1182,6 +1226,9 @@ def sensitivity_1d_grid(self, nr_colors=nr_plots, ax=ax, palette="husl", + labelfontsize=self.labelsize, + ffontsize=self.fontsize, + **plot_kwargs) # for tick in ax.get_xticklabels(): @@ -1190,18 +1237,18 @@ def sensitivity_1d_grid(self, ax.set_ylim([0, 1.05]) ax.set_xlim([min(time), max(time)]) # ax.set_xticklabels(xlabels, fontsize=labelsize, rotation=0) - ax.tick_params(labelsize=10) + ax.tick_params(labelsize=self.labelsize) else: ax.set_axis_off() title = title + ", " + feature.replace("_", " ") - plt.suptitle(title, fontsize=titlesize) + plt.suptitle(title, fontsize=self.titlesize) plt.tight_layout() plt.subplots_adjust(top=0.9) if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_" + sensitivity + "_grid" + self.figureformat)) + feature + "_" + sensitivity + "_grid" + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -1218,6 +1265,7 @@ def sensitivity_1d_combined(self, xscale="linear", yscale="linear", title=None, + sobol_limit=.0, **plot_kwargs): """ Plot the sensitivity for a specific 1 dimensional model/feature. The @@ -1284,7 +1332,22 @@ def sensitivity_1d_combined(self, labels = self.data.get_labels(feature) xlabel, ylabel = labels + # plot sensitivity + number_colors = 0 + if not np.isclose(sobol_limit, 0.0): + for i in self.data[feature][sensitivity]: + if not np.any(i > sobol_limit): + continue + number_colors += 1 + else: + number_colors = len(self.data.uncertain_parameters) + + + color_idx = 0 for i in range(len(self.data[feature][sensitivity])): + if not np.any(self.data[feature][sensitivity][i] > sobol_limit): + continue + if title is None: title = title_tmp + ", " + feature.replace("_", " ") prettyPlot(time, @@ -1293,11 +1356,14 @@ def sensitivity_1d_combined(self, xlabel=xlabel, ylabel=title_tmp, new_figure=False, - color=i, + color=color_idx, palette="husl", - nr_colors=len(self.data.uncertain_parameters), + nr_colors=number_colors, label=self.uncertain_names[i], + labelfontsize=self.labelsize, + ffontsize=self.fontsize, **plot_kwargs) + color_idx += 1 plt.ylim([0, 1.05]) plt.xlim([min(time), max(time)]) @@ -1311,7 +1377,7 @@ def sensitivity_1d_combined(self, if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_" + sensitivity + self.figureformat)) + feature + "_" + sensitivity + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -1513,10 +1579,11 @@ def feature_0d(self, ax = prettyBar(values, index=xticks, + labelfontsize=self.labelsize, xlabels=xlabels, ylabel=ylabel, palette="Paired", - style="seaborn-white") + style="classic") if sensitivity in self.data[feature]: pos = 2 * distance + 2 * width @@ -1526,8 +1593,8 @@ def feature_0d(self, spines_color(ax2, edges={"top": "None", "bottom": "None", "right": axis_grey, "left": "None"}) ax2.tick_params(axis="y", which="both", right=True, left=False, labelright=True, - color=axis_grey, labelcolor="black", labelsize=labelsize) - ax2.set_ylabel(label, fontsize=labelsize) + color=axis_grey, labelcolor="black", labelsize=self.labelsize) + ax2.set_ylabel(label, fontsize=self.labelsize) ax2.set_ylim([0, 1.05]) ax2.spines["right"].set_visible(True) @@ -1561,13 +1628,13 @@ def feature_0d(self, fig.subplots_adjust(top=(0.91 - legend_width * 0.053)) ax.set_xticks(xticks) - ax.set_xticklabels(xlabels, fontsize=labelsize, rotation=0) + ax.set_xticklabels(xlabels, fontsize=self.labelsize, rotation=0) if len(self.data.uncertain_parameters) > 3: for tick in ax.get_xticklabels()[:2]: tick.set_rotation(-25) - plt.suptitle(feature.replace("_", " "), fontsize=titlesize) + plt.suptitle(feature.replace("_", " "), fontsize=self.titlesize) if sensitivity is None or sensitivity not in self.data[feature]: plt.subplots_adjust(top=0.93) @@ -1578,7 +1645,7 @@ def feature_0d(self, save_name = feature + "_" + sensitivity + self.figureformat if hardcopy: - plt.savefig(os.path.join(self.folder, save_name)) + plt.savefig(os.path.join(self.folder, save_name), dpi=self.dpi) if show: plt.show() @@ -1656,9 +1723,10 @@ def average_sensitivity(self, xlabels=self.uncertain_names, ylabel="Average of " + title_tmp, nr_colors=len(self.data.uncertain_parameters), + labelfontsize=self.labelsize, palette="husl", index=index, - style="seaborn-darkgrid") + style="classic") plt.ylim([0, 1]) @@ -1667,7 +1735,7 @@ def average_sensitivity(self, plt.tight_layout() if hardcopy: - plt.savefig(os.path.join(self.folder, save_name)) + plt.savefig(os.path.join(self.folder, save_name), dpi=self.dpi) if show: plt.show() @@ -1956,9 +2024,9 @@ def average_sensitivity_grid(self, # plt.close("all") - set_style("seaborn-dark") + set_style("classic") fig, axes = plt.subplots(nrows=grid_y_size, ncols=grid_x_size, squeeze=False, sharex="col", sharey="row") - set_style("seaborn-white") + set_style("classic") # Add a larger subplot to use to set a common xlabel and ylabel @@ -1991,6 +2059,7 @@ def average_sensitivity_grid(self, title=features[i].replace("_", " "), xlabels=self.uncertain_names, nr_colors=len(self.data.uncertain_parameters), + labelfontsize=self.labelsize, index=index, palette="husl", ax=ax, @@ -2001,19 +2070,19 @@ def average_sensitivity_grid(self, ax.set_ylim([0, 1.05]) # ax.set_xticklabels(xlabels, fontsize=labelsize, rotation=0) - ax.tick_params(labelsize=fontsize) + ax.tick_params(labelsize=self.fontsize) else: ax.set_axis_off() if title is None: title = "Average of " + title_tmp - plt.suptitle(title, fontsize=titlesize) + plt.suptitle(title, fontsize=self.titlesize) plt.tight_layout() plt.subplots_adjust(top=0.88) if hardcopy: plt.savefig(os.path.join(self.folder, - sensitivity + "_average_grid" + self.figureformat)) + sensitivity + "_average_grid" + self.figureformat), dpi=self.dpi) if show: plt.show() @@ -2032,6 +2101,8 @@ def prediction_interval_sensitivity_1d(self, yscale='linear', use_markers=False, legend_locs=None, + sobol_limit=.0, + color_axes=True, **plot_kwargs): """ Plot the prediction interval for a specific 1 dimensional model/feature. @@ -2107,6 +2178,8 @@ def prediction_interval_sensitivity_1d(self, color=0, nr_colors=3, palette="husl", + labelfontsize=self.labelsize, + ffontsize=self.fontsize, **plot_kwargs) colors = get_current_colormap() @@ -2121,33 +2194,53 @@ def prediction_interval_sensitivity_1d(self, ax.set_yscale(yscale) # plot sensitivity + number_colors = 0 + if not np.isclose(sobol_limit, 0.0): + for i in self.data[feature][sensitivity]: + if not np.any(i > sobol_limit): + continue + number_colors += 1 + else: + number_colors = len(self.data[feature][sensitivity]) + number_colors += 2 ax2 = ax.twinx() - colors = sns.color_palette("husl", n_colors=len(self.data[feature][sensitivity]) + 2) + colors = sns.color_palette("husl", n_colors=number_colors) color = 0 color_2 = 2 - spines_color(ax2, edges={"top": "None", "bottom": "None", + axescolor = "black" + if color_axes: + spines_color(ax2, edges={"top": "None", "bottom": "None", "right": colors[color_2], "left": "None"}) + axescolor = colors[color_2] + ax2.grid(False) ax2.tick_params(axis="y", which="both", right=True, left=False, labelright=True, - color=colors[color_2], labelcolor=colors[color_2], labelsize=labelsize) - ax2.set_ylabel(title_tmp, color=colors[color_2], fontsize=labelsize) + color=axescolor, labelcolor=axescolor, labelsize=self.labelsize) + ax2.set_ylabel(title_tmp, color=axescolor, fontsize=self.labelsize) # use markers for secondary axis marker_list = itertools.cycle(('+', 'o', '*', 'v', '<', '>', 's', 'x')) - for i in range(len(self.data[feature][sensitivity])): + i = 0 + color_idx = 0 + for sens_data in self.data[feature][sensitivity]: + if not np.any(sens_data > sobol_limit): + i += 1 + continue if use_markers is False: marker = None else: marker = next(marker_list) - ax2.plot(time, self.data[feature][sensitivity][i], color=colors[i + 2], - linewidth=linewidth, antialiased=True, label=self.uncertain_names[i], + ax2.plot(time, sens_data, color=colors[color_idx + 2], + linewidth=self.linewidth, antialiased=True, label=self.uncertain_names[i], marker=marker, **plot_kwargs) + i += 1 + color_idx += 1 if legend_locs is not None: - assert (len(legend_locs) == 2, "you must provide two legend locations") + assert len(legend_locs) == 2, "you must provide two legend locations" loc1 = legend_locs[0] loc2 = legend_locs[1] else: @@ -2155,26 +2248,27 @@ def prediction_interval_sensitivity_1d(self, loc2 = "lower right" ax2.legend(loc=loc2) - ax2.yaxis.offsetText.set_fontsize(fontsize) - ax2.yaxis.offsetText.set_color(colors[color_2]) + ax2.yaxis.offsetText.set_fontsize(self.fontsize) + ax2.yaxis.offsetText.set_color(axescolor) ax2.spines["right"].set_visible(True) - ax2.spines["right"].set_edgecolor(colors[color_2]) - - # plt.ylim([0, 1.05]) - ax.tick_params(axis="y", color=colors[color], labelcolor=colors[color]) - ax.spines["left"].set_edgecolor(colors[color]) - ax.set_ylabel(ylabel + ", mean", color=colors[color], fontsize=labelsize) + ax2.spines["right"].set_edgecolor(axescolor) + if color_axes: + axescolor=colors[color] + ax.tick_params(axis="y", color=axescolor, labelcolor=axescolor) + ax.spines["left"].set_edgecolor(axescolor) + ax.set_ylabel(ylabel + ", mean", color=axescolor, fontsize=self.labelsize) ax2.set_xlim([min(time), max(time)]) ax.set_xlim([min(time), max(time)]) + ax2.set_ylim(0.0, 1.05) ax.legend(["Mean", "90% prediction interval"], loc=loc1) plt.tight_layout() if hardcopy: plt.savefig(os.path.join(self.folder, - feature + "_prediction-interval_sensitivity_" + sensitivity + self.figureformat)) + feature + "_prediction-interval_sensitivity_" + sensitivity + self.figureformat), dpi=self.dpi) if show: plt.show() diff --git a/src/uncertainpy/plotting/prettyplot/prettyplot.py b/src/uncertainpy/plotting/prettyplot/prettyplot.py index b173f6f2..fcd09e14 100644 --- a/src/uncertainpy/plotting/prettyplot/prettyplot.py +++ b/src/uncertainpy/plotting/prettyplot/prettyplot.py @@ -1,6 +1,7 @@ from __future__ import absolute_import, division, print_function, unicode_literals import matplotlib.pyplot as plt +import matplotlib as mpl import numpy as np import seaborn as sns @@ -39,7 +40,7 @@ def set_legendstyle(): "legend.fontsize": fontsize, "legend.handlelength": 2.2, "legend.borderpad": 0.5, - "legend.framealpha": 2, + "legend.framealpha": 1, "legend.fancybox": True } plt.rcParams.update(params) @@ -286,7 +287,7 @@ def set_title(title, ax=None, **kwargs): ax.set_title(title, fontsize=titlesize, **kwargs) -def set_xlabel(xlabel, ax=None, color="black", **kwargs): +def set_xlabel(xlabel, ax=None, color="black", lblsize=labelsize, **kwargs): """ Set the x label @@ -308,12 +309,12 @@ def set_xlabel(xlabel, ax=None, color="black", **kwargs): Matplotlib xlabel() and set_xlabel() arguments """ if ax is None: - plt.xlabel(xlabel, fontsize=labelsize, color=color, **kwargs) + plt.xlabel(xlabel, fontsize=lblsize, color=color, **kwargs) else: - ax.set_xlabel(xlabel, fontsize=labelsize, color=color, **kwargs) + ax.set_xlabel(xlabel, fontsize=lblsize, color=color, **kwargs) -def set_ylabel(ylabel, ax=None, color="black", **kwargs): +def set_ylabel(ylabel, ax=None, color="black", lblsize=labelsize, **kwargs): """ Set the y label @@ -335,9 +336,9 @@ def set_ylabel(ylabel, ax=None, color="black", **kwargs): Matplotlib ylabel() and set_ylabel() arguments """ if ax is None: - plt.ylabel(ylabel, fontsize=labelsize, color=color, **kwargs) + plt.ylabel(ylabel, fontsize=lblsize, color=color, **kwargs) else: - ax.set_ylabel(ylabel, fontsize=labelsize, color=color, **kwargs) + ax.set_ylabel(ylabel, fontsize=lblsize, color=color, **kwargs) def set_style(style="seaborn-darkgrid", nr_colors=6, palette="hls", custom=True): @@ -454,7 +455,7 @@ def prettyPlot(x=[], y=None, xlabel="", ylabel="", color=None, - style="seaborn-darkgrid", + style="classic", custom_style=True, palette="hls", nr_colors=6, @@ -464,6 +465,8 @@ def prettyPlot(x=[], y=None, yerr=None, xerr=None, ecolor=None, + labelfontsize=labelsize, + ffontsize=fontsize, capsize=5, capthick=2, **kwargs): @@ -563,6 +566,13 @@ def prettyPlot(x=[], y=None, ---------- ax : matplotlib.axis object """ + global labelsize + global fontsize + if labelfontsize != labelsize: + labelsize = labelfontsize + if ffontsize != fontsize: + fontsize = ffontsize + set_style(style, nr_colors=nr_colors, palette=palette, custom=custom_style) if ax is None: @@ -583,8 +593,8 @@ def prettyPlot(x=[], y=None, remove_ticks(ax) set_title(title, ax) - set_xlabel(xlabel, ax) - set_ylabel(ylabel, ax) + set_xlabel(xlabel, ax, lblsize=labelfontsize) + set_ylabel(ylabel, ax, lblsize=labelfontsize) if y is None: y = x @@ -606,6 +616,7 @@ def prettyPlot(x=[], y=None, ecolor = ecolors[ecolor] else: ecolor = color + print("E-Color {} with type {}".format(ecolor, type(ecolor))) ax.errorbar(x, y, color=color, @@ -620,7 +631,7 @@ def prettyPlot(x=[], y=None, if custom_style: - ax.yaxis.offsetText.set_fontsize(labelsize) + ax.yaxis.offsetText.set_fontsize(labelfontsize) ax.yaxis.offsetText.set_color("black") # ax.ticklabel_format(useOffset=False) @@ -650,6 +661,8 @@ def prettyBar(x, error=None, palette="hls", nr_colors=6, align="center", + labelfontsize=labelsize, + ffontsize=fontsize, error_kw={"ecolor": axis_grey, "lw": 2, "capsize": 10, @@ -736,6 +749,12 @@ def prettyBar(x, error=None, ax : matplotlib ax Object """ + global labelsize + if labelfontsize != labelsize: + labelsize = labelfontsize + global fontsize + if ffontsize != fontsize: + fontsize = ffontsize set_style(style, nr_colors=nr_colors, palette=palette, custom=custom_style) if ax is None: @@ -778,7 +797,7 @@ def prettyBar(x, error=None, # ax.set_ylim([min(y), max(y)]) set_title(title, ax) - set_ylabel(ylabel, ax) + set_ylabel(ylabel, ax, lblsize=labelsize) return ax From 309c92c65e8d7bd309d8943780bde4bcf22517f8 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Mon, 13 Jul 2020 11:34:24 +0200 Subject: [PATCH 16/26] update --- src/uncertainpy/plotting/plot_uncertainty.py | 83 +++++++++---------- .../plotting/prettyplot/__init__.py | 3 +- .../plotting/prettyplot/prettyplot.py | 44 +++++----- 3 files changed, 61 insertions(+), 69 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index a9c4066c..a1c7724f 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -10,7 +10,7 @@ from .prettyplot import prettyPlot, prettyBar from .prettyplot import spines_color, get_current_colormap from .prettyplot import set_style, get_colormap, reset_style -from .prettyplot import axis_grey, labelsize, fontsize, titlesize, linewidth +from .prettyplot import labelsize, fontsize, titlesize, linewidth from ..data import Data from ..utils.logger import setup_module_logger, get_logger @@ -223,7 +223,7 @@ def evaluations(self, feature=None, foldername="", **plot_kwargs): else: raise AttributeError("Dimension of evaluations is not valid: dim {}".format(dimension)) - def evaluations_0d(self, feature=None, foldername="", **plot_kwargs): + def evaluations_0d(self, feature=None, foldername="", palette="colorblind", **plot_kwargs): """ Plot all 0D evaluations for a specific model/feature. @@ -269,7 +269,7 @@ def evaluations_0d(self, feature=None, foldername="", **plot_kwargs): ylabel=self.data.get_labels(feature)[0], title="{}, evaluations".format(feature.replace("_", " ")), new_figure=True, - palette="husl", + palette=palette, **plot_kwargs) plt.tight_layout() @@ -278,7 +278,7 @@ def evaluations_0d(self, feature=None, foldername="", **plot_kwargs): reset_style() - def evaluations_1d(self, feature=None, foldername="", xscale='linear', yscale='linear', **plot_kwargs): + def evaluations_1d(self, feature=None, foldername="", palette="colorblind", xscale='linear', yscale='linear', **plot_kwargs): """ Plot all 1D evaluations for a specific model/feature. @@ -342,7 +342,7 @@ def evaluations_1d(self, feature=None, foldername="", xscale='linear', yscale='l ylabel=ylabel, title="{}, evaluation {:d}".format(feature.replace("_", " "), i), new_figure=True, - palette="husl", + palette=palette, **plot_kwargs) ax.set_xlim([min(time), max(time)]) ax.set_xscale(xscale) @@ -436,6 +436,7 @@ def attribute_feature_1d(self, attribute_name="mean", hardcopy=True, show=False, + palette="colorblind", xscale="linear", yscale="linear", title=None, @@ -508,7 +509,7 @@ def attribute_feature_1d(self, ax = prettyPlot(time, self.data[feature][attribute], title.replace("_", " "), xlabel, ylabel, nr_colors=3, - palette="husl", + palette=palette, **plot_kwargs) ax.set_xscale(xscale) ax.set_yscale(yscale) @@ -627,7 +628,7 @@ def attribute_feature_2d(self, reset_style() - def mean_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='linear', title=None, **plot_kwargs): + def mean_1d(self, feature, hardcopy=True, show=False, palette="colorblind", xscale='linear', yscale='linear', title=None, **plot_kwargs): """ Plot the mean for a specific 1 dimensional model/feature. @@ -666,7 +667,7 @@ def mean_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='l color=0, **plot_kwargs) - def variance_1d(self, feature, hardcopy=True, show=False, xscale='linear', yscale='linear', title=None, **plot_kwargs): + def variance_1d(self, feature, hardcopy=True, show=False, palette="colorblind", xscale='linear', yscale='linear', title=None, **plot_kwargs): """ Plot the variance for a specific 1 dimensional model/feature. @@ -770,6 +771,7 @@ def mean_variance_1d(self, show=False, xscale='linear', yscale='linear', + palette="colorblind", color_axes=True, title=None, **plot_kwargs): @@ -833,7 +835,7 @@ def mean_variance_1d(self, title.replace("_", " "), xlabel, ylabel + ", mean", style=style, nr_colors=3, - palette="husl", + palette=palette, labelfontsize=self.labelsize, ffontsize=self.fontsize, **plot_kwargs) @@ -900,10 +902,12 @@ def prediction_interval_1d(self, title=None, xscale='linear', yscale='linear', + palette="colorblind", xmin=None, xmax=None, ymin=None, ymax=None, + legend_loc="best", **plot_kwargs): """ Plot the prediction interval for a specific 1 dimensional model/feature. @@ -968,7 +972,7 @@ def prediction_interval_1d(self, ffontsize=self.fontsize, color=0, nr_colors=3, - palette="husl", + palette=palette, **plot_kwargs) colors = get_current_colormap() @@ -993,7 +997,7 @@ def prediction_interval_1d(self, ax.set_xscale(xscale) ax.set_yscale(yscale) - plt.legend(["Mean", "90% prediction interval"], loc="best") + plt.legend(["Mean", "90% prediction interval"], loc=legend_loc) plt.tight_layout() @@ -1015,6 +1019,7 @@ def sensitivity_1d(self, show=False, xscale='linear', yscale='linear', + palette="colorblind", title=None, **plot_kwargs): """ @@ -1090,7 +1095,7 @@ def sensitivity_1d(self, xlabel=xlabel, ylabel=title, color=i, - palette="husl", + palette=palette, labelfontsize=self.labelsize, ffontsize=self.fontsize, nr_colors=len(self.data.uncertain_parameters), **plot_kwargs) @@ -1120,6 +1125,7 @@ def sensitivity_1d_grid(self, show=False, xscale="linear", yscale="linear", + palette="colorblind", title=None, **plot_kwargs): """ @@ -1225,7 +1231,7 @@ def sensitivity_1d_grid(self, color=i, nr_colors=nr_plots, ax=ax, - palette="husl", + palette=palette, labelfontsize=self.labelsize, ffontsize=self.fontsize, @@ -1265,6 +1271,7 @@ def sensitivity_1d_combined(self, xscale="linear", yscale="linear", title=None, + palette="colorblind", sobol_limit=.0, **plot_kwargs): """ @@ -1333,19 +1340,13 @@ def sensitivity_1d_combined(self, xlabel, ylabel = labels # plot sensitivity - number_colors = 0 - if not np.isclose(sobol_limit, 0.0): - for i in self.data[feature][sensitivity]: - if not np.any(i > sobol_limit): - continue - number_colors += 1 - else: - number_colors = len(self.data.uncertain_parameters) + number_colors = len(self.data.uncertain_parameters) color_idx = 0 for i in range(len(self.data[feature][sensitivity])): if not np.any(self.data[feature][sensitivity][i] > sobol_limit): + color_idx += 1 continue if title is None: @@ -1357,7 +1358,7 @@ def sensitivity_1d_combined(self, ylabel=title_tmp, new_figure=False, color=color_idx, - palette="husl", + palette=palette, nr_colors=number_colors, label=self.uncertain_names[i], labelfontsize=self.labelsize, @@ -1386,7 +1387,7 @@ def sensitivity_1d_combined(self, reset_style() - def features_1d(self, sensitivity="first", xscale='linear', yscale='linear', title=None): + def features_1d(self, sensitivity="first", xscale='linear', yscale='linear', palette="colorblind", title=None): """ Plot all data for all 1 dimensional model/features. @@ -1591,18 +1592,17 @@ def feature_0d(self, ax2 = ax.twinx() spines_color(ax2, edges={"top": "None", "bottom": "None", - "right": axis_grey, "left": "None"}) + "left": "None"}) ax2.tick_params(axis="y", which="both", right=True, left=False, labelright=True, - color=axis_grey, labelcolor="black", labelsize=self.labelsize) + labelcolor="black", labelsize=self.labelsize) ax2.set_ylabel(label, fontsize=self.labelsize) ax2.set_ylim([0, 1.05]) ax2.spines["right"].set_visible(True) - ax2.spines["right"].set_edgecolor(axis_grey) i = 0 legend_bars = [] - colors = get_colormap(palette="husl", nr_colors=len(self.data.uncertain_parameters)) + colors = get_colormap(palette=palette, nr_colors=len(self.data.uncertain_parameters)) for parameter in self.data.uncertain_parameters: ll = ax2.bar(pos, self.data[feature][sensitivity][i], width=width, @@ -1622,7 +1622,6 @@ def feature_0d(self, bbox_to_anchor=location, ncol=legend_size) - # lgd.get_frame().set_edgecolor(axis_grey) fig = plt.gcf() fig.subplots_adjust(top=(0.91 - legend_width * 0.053)) @@ -1661,6 +1660,7 @@ def average_sensitivity(self, sensitivity="first", hardcopy=True, title=None, + palette="colorblind", show=False): """ Plot the average of the sensitivity for a specific model/feature. @@ -1724,7 +1724,7 @@ def average_sensitivity(self, ylabel="Average of " + title_tmp, nr_colors=len(self.data.uncertain_parameters), labelfontsize=self.labelsize, - palette="husl", + palette=palette, index=index, style="classic") @@ -1748,6 +1748,7 @@ def average_sensitivity_all(self, sensitivity="first", hardcopy=True, title=None, + palette="colorblind", show=False): """ Plot the average of the sensitivity for all model/features. @@ -1970,6 +1971,7 @@ def average_sensitivity_grid(self, hardcopy=True, title=None, show=False, + palette="colorblind", **plot_kwargs): """ Plot the average of the sensitivity for all model/features in @@ -2061,7 +2063,7 @@ def average_sensitivity_grid(self, nr_colors=len(self.data.uncertain_parameters), labelfontsize=self.labelsize, index=index, - palette="husl", + palette=palette, ax=ax, **plot_kwargs) @@ -2101,7 +2103,9 @@ def prediction_interval_sensitivity_1d(self, yscale='linear', use_markers=False, legend_locs=None, + legend_cols=1, sobol_limit=.0, + palette="colorblind", color_axes=True, **plot_kwargs): """ @@ -2177,7 +2181,7 @@ def prediction_interval_sensitivity_1d(self, xlabel=xlabel, ylabel=ylabel, color=0, nr_colors=3, - palette="husl", + palette=palette, labelfontsize=self.labelsize, ffontsize=self.fontsize, **plot_kwargs) @@ -2194,17 +2198,10 @@ def prediction_interval_sensitivity_1d(self, ax.set_yscale(yscale) # plot sensitivity - number_colors = 0 - if not np.isclose(sobol_limit, 0.0): - for i in self.data[feature][sensitivity]: - if not np.any(i > sobol_limit): - continue - number_colors += 1 - else: - number_colors = len(self.data[feature][sensitivity]) + number_colors = len(self.data[feature][sensitivity]) number_colors += 2 ax2 = ax.twinx() - colors = sns.color_palette("husl", n_colors=number_colors) + colors = sns.color_palette(palette, n_colors=number_colors) color = 0 color_2 = 2 @@ -2226,7 +2223,9 @@ def prediction_interval_sensitivity_1d(self, color_idx = 0 for sens_data in self.data[feature][sensitivity]: if not np.any(sens_data > sobol_limit): + marker = next(marker_list) i += 1 + color_idx += 1 continue if use_markers is False: marker = None @@ -2235,7 +2234,7 @@ def prediction_interval_sensitivity_1d(self, ax2.plot(time, sens_data, color=colors[color_idx + 2], linewidth=self.linewidth, antialiased=True, label=self.uncertain_names[i], - marker=marker, **plot_kwargs) + marker=marker, markeredgecolor=colors[color_idx + 2], **plot_kwargs) i += 1 color_idx += 1 @@ -2247,7 +2246,7 @@ def prediction_interval_sensitivity_1d(self, loc1 = "best" loc2 = "lower right" - ax2.legend(loc=loc2) + ax2.legend(loc=loc2, ncol=legend_cols) ax2.yaxis.offsetText.set_fontsize(self.fontsize) ax2.yaxis.offsetText.set_color(axescolor) diff --git a/src/uncertainpy/plotting/prettyplot/__init__.py b/src/uncertainpy/plotting/prettyplot/__init__.py index 1f782abc..1691c0df 100644 --- a/src/uncertainpy/plotting/prettyplot/__init__.py +++ b/src/uncertainpy/plotting/prettyplot/__init__.py @@ -24,7 +24,6 @@ from .prettyplot import prettyPlot from .prettyplot import prettyBar -from .prettyplot import axis_grey from .prettyplot import titlesize from .prettyplot import fontsize from .prettyplot import labelsize @@ -33,4 +32,4 @@ from .prettyplot import ticklabelsize from .prettyplot import linewidth from .prettyplot import markersize -from .prettyplot import markeredgewidth \ No newline at end of file +from .prettyplot import markeredgewidth diff --git a/src/uncertainpy/plotting/prettyplot/prettyplot.py b/src/uncertainpy/plotting/prettyplot/prettyplot.py index fcd09e14..f5aa8992 100644 --- a/src/uncertainpy/plotting/prettyplot/prettyplot.py +++ b/src/uncertainpy/plotting/prettyplot/prettyplot.py @@ -6,17 +6,17 @@ import seaborn as sns -axis_grey = (0.6, 0.6, 0.6) ticksize = 5 -markersize = 8 -markeredgewidth = 1.4 +markersize = 6 +markeredgewidth = 1. figure_width = 7.08 labelsize = 10 titlesize = 12 -fontsize = 8 +fontsize = 10 ticklabelsize = 8 -linewidth = 1.4 -figsize = (figure_width, figure_width*0.75) +linewidth = 1.5 +#figsize = (figure_width, figure_width*0.75) +figsize = [6.85039, 5.1375] def set_figuresize(): """ @@ -34,14 +34,10 @@ def set_legendstyle(): Set legend options. """ params = { - "legend.frameon": True, "legend.numpoints": 1, - "legend.scatterpoints": 1, "legend.fontsize": fontsize, - "legend.handlelength": 2.2, - "legend.borderpad": 0.5, - "legend.framealpha": 1, - "legend.fancybox": True + "legend.edgecolor": '0.8', + "legend.fancybox": True, } plt.rcParams.update(params) @@ -92,8 +88,8 @@ def set_tickstyle(): Set tick style options """ - params = {"xtick.color": axis_grey, - "ytick.color": axis_grey, + params = {'xtick.direction': 'out', + 'ytick.direction': 'out', "xtick.major.size": ticksize, "ytick.major.size": ticksize, "xtick.labelsize": ticklabelsize, @@ -110,7 +106,6 @@ def set_axestyle(): params = {"axes.titlesize": titlesize, "axes.labelsize": labelsize, - "axes.edgecolor": axis_grey, "axes.labelcolor": "black", "axes.linewidth": 1, "axes.spines.right": False, @@ -153,8 +148,8 @@ def set_grid(ax, bgcolor="#EAEAF2", linecolor="w", linestyle="-", linewidth=1.3) zorder=-10) -def spines_color(ax, edges={"top": "None", "bottom": axis_grey, - "right": "None", "left": axis_grey}): +def spines_color(ax, edges={"top": "None", + "right": "None"}): """ Set spines color @@ -182,11 +177,11 @@ def remove_ticks(ax): axis object where the ticks are removed """ ax.tick_params(axis="x", which="both", bottom=True, top=False, - labelbottom=True, color=axis_grey, labelcolor="black", + labelbottom=True, labelcolor="black", labelsize=labelsize) ax.tick_params(axis="y", which="both", right=False, left=True, - labelleft=True, color=axis_grey, labelcolor="black", + labelleft=True, labelcolor="black", labelsize=labelsize) @@ -446,8 +441,8 @@ def set_legend(labels, ax=None): legend = plt.legend(labels) - frame = legend.get_frame() - frame.set_facecolor(color) + #frame = legend.get_frame() + #frame.set_facecolor(color) def prettyPlot(x=[], y=None, @@ -663,8 +658,7 @@ def prettyBar(x, error=None, align="center", labelfontsize=labelsize, ffontsize=fontsize, - error_kw={"ecolor": axis_grey, - "lw": 2, + error_kw={"lw": 2, "capsize": 10, "capthick": 2}, **kwargs): @@ -779,7 +773,7 @@ def prettyBar(x, error=None, if error_kw is None: - error_kw = dict(ecolor=axis_grey, lw=2, capsize=10, capthick=2) + error_kw = dict(lw=2, capsize=10, capthick=2) if color is None: colors = sns.color_palette() @@ -788,7 +782,7 @@ def prettyBar(x, error=None, ax.bar(index, x, yerr=error, color=colors, width=width, align=align, linewidth=linewidth, error_kw=error_kw, - edgecolor=axis_grey, **kwargs) + **kwargs) ax.set_xticks(xticks) ax.set_xticklabels(xlabels, fontsize=labelsize, rotation=0) From db0344f78a7cfe6029968701b50b5b19959ceb16 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Mon, 25 Jan 2021 22:24:20 +0100 Subject: [PATCH 17/26] keyword to save samples --- .../core/uncertainty_calculations.py | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/uncertainpy/core/uncertainty_calculations.py b/src/uncertainpy/core/uncertainty_calculations.py index 001bb557..6c678b01 100644 --- a/src/uncertainpy/core/uncertainty_calculations.py +++ b/src/uncertainpy/core/uncertainty_calculations.py @@ -968,7 +968,7 @@ def create_PCE_collocation_rosenblatt(self, return U_hat, dist_R, data - def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4): + def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4, save_samples=False): """ Calculate the statistical metrics from the polynomial chaos approximation. @@ -987,6 +987,8 @@ def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4): Number of samples for the Monte Carlo sampling of the polynomial chaos approximation. Default is 10**4. + save_samples: bool, optional + Save samples in feature data to, for example, plot PDFs later. Returns ------- @@ -1046,7 +1048,8 @@ def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4): else: U_mc[feature] = U_hat[feature](samples) - data[feature].samples = U_mc[feature] + if save_samples: + data[feature].samples = U_mc[feature] data[feature].percentile_5 = np.percentile(U_mc[feature], 5, -1) data[feature].percentile_95 = np.percentile(U_mc[feature], 95, -1) @@ -1197,6 +1200,7 @@ def polynomial_chaos(self, nr_pc_mc_samples=10**4, allow_incomplete=True, seed=None, + save_samples=False, **custom_kwargs): """ Perform an uncertainty quantification and sensitivity analysis @@ -1383,18 +1387,20 @@ def polynomial_chaos(self, else: raise ValueError("No polynomial chaos method with name {}".format(method)) - data = self.analyse_PCE(U_hat, distribution, data, nr_samples=nr_pc_mc_samples) + data = self.analyse_PCE(U_hat, distribution, data, + nr_samples=nr_pc_mc_samples, + save_samples=save_samples) data.seed = seed return data - def monte_carlo(self, uncertain_parameters=None, nr_samples=10**4, seed=None, - allow_incomplete=True): + allow_incomplete=True, + save_samples=False): """ Perform an uncertainty quantification using the quasi-Monte Carlo method. @@ -1414,6 +1420,8 @@ def monte_carlo(self, If the uncertainty quantification should be performed for features or models with incomplete evaluations. Default is True. + save_samples: bool, optional + Save samples in feature data to, for example, plot PDFs later. Returns ------- @@ -1525,7 +1533,8 @@ def monte_carlo(self, logger = get_logger(self) if (np.all(mask) or allow_incomplete) and sum(mask) > 0: - data[feature].samples = masked_evaluations + if save_samples: + data[feature].samples = masked_evaluations data[feature].mean = np.mean(masked_evaluations, 0) data[feature].variance = np.var(masked_evaluations, 0) From dc97bb0d24c99206751c19270a81823f24ba94c5 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 26 Jan 2021 00:08:51 +0100 Subject: [PATCH 18/26] added option to UQ --- src/uncertainpy/uncertainty.py | 17 ++++++++++++++--- tests/testing_classes/testing_uncertainty.py | 6 +++++- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/src/uncertainpy/uncertainty.py b/src/uncertainpy/uncertainty.py index 96c65936..23d7edc8 100644 --- a/src/uncertainpy/uncertainty.py +++ b/src/uncertainpy/uncertainty.py @@ -226,6 +226,7 @@ def quantify(self, figure_folder="figures", figureformat=".png", save=True, + save_samples=False, data_folder="data", filename=None, **custom_kwargs): @@ -407,6 +408,7 @@ def quantify(self, figure_folder=figure_folder, figureformat=figureformat, save=save, + save_samples=save_samples, data_folder=data_folder, filename=filename, **custom_kwargs) @@ -425,6 +427,7 @@ def quantify(self, figure_folder=figure_folder, figureformat=figureformat, save=save, + save_samples=save_samples, data_folder=data_folder, filename=filename, **custom_kwargs) @@ -438,6 +441,7 @@ def quantify(self, figureformat=figureformat, save=save, data_folder=data_folder, + save_samples=save_samples, filename=filename, seed=seed) @@ -451,6 +455,7 @@ def quantify(self, save=save, data_folder=data_folder, filename=filename, + save_samples=save_samples, seed=seed) @@ -565,6 +570,7 @@ def polynomial_chaos(self, figure_folder="figures", figureformat=".png", save=True, + save_samples=False, data_folder="data", filename=None, **custom_kwargs): @@ -708,6 +714,7 @@ def polynomial_chaos(self, quadrature_order=quadrature_order, nr_pc_mc_samples=nr_pc_mc_samples, allow_incomplete=allow_incomplete, + save_samples=save_samples, seed=seed, **custom_kwargs ) @@ -736,6 +743,7 @@ def monte_carlo(self, figureformat=".png", save=True, data_folder="data", + save_samples=False, filename=None): """ Perform an uncertainty quantification using the quasi-Monte Carlo method. @@ -832,6 +840,7 @@ def monte_carlo(self, self.data = self.uncertainty_calculations.monte_carlo(uncertain_parameters=uncertain_parameters, nr_samples=nr_samples, + save_samples=save_samples, seed=seed) self.data.backend = self.backend @@ -863,6 +872,7 @@ def polynomial_chaos_single(self, figure_folder="figures", figureformat=".png", save=True, + save_samples=False, data_folder="data", filename=None): """ @@ -1016,8 +1026,8 @@ def polynomial_chaos_single(self, nr_collocation_nodes=nr_collocation_nodes, quadrature_order=quadrature_order, nr_pc_mc_samples=nr_pc_mc_samples, - allow_incomplete=allow_incomplete, - ) + save_samples=save_samples, + allow_incomplete=allow_incomplete) data.backend = self.backend data.seed = seed @@ -1046,6 +1056,7 @@ def monte_carlo_single(self, data_folder="data", figure_folder="figures", figureformat=".png", + save_samples=False, filename=None): """ Perform an uncertainty quantification for a single parameter at the time @@ -1152,7 +1163,7 @@ def monte_carlo_single(self, logger.info("Running MC for " + uncertain_parameter) data = self.uncertainty_calculations.monte_carlo(uncertain_parameters=uncertain_parameter, - nr_samples=nr_samples) + nr_samples=nr_samples, save_samples=save_samples) data.backend = self.backend data.seed = seed diff --git a/tests/testing_classes/testing_uncertainty.py b/tests/testing_classes/testing_uncertainty.py index 589f6b1b..1f738cb2 100644 --- a/tests/testing_classes/testing_uncertainty.py +++ b/tests/testing_classes/testing_uncertainty.py @@ -12,6 +12,7 @@ def polynomial_chaos(self, quadrature_order=4, nr_pc_mc_samples=10**4, allow_incomplete=False, + save_samples=False, seed=None): arguments = {} @@ -25,6 +26,7 @@ def polynomial_chaos(self, arguments["quadrature_order"] = quadrature_order arguments["nr_pc_mc_samples"] = nr_pc_mc_samples arguments["seed"] = seed + arguments["save_samples"] = save_samples arguments["allow_incomplete"] = allow_incomplete @@ -37,6 +39,7 @@ def polynomial_chaos(self, def monte_carlo(self, uncertain_parameters=None, nr_samples=10**3, + save_samples=False, seed=None): arguments = {} @@ -44,6 +47,7 @@ def monte_carlo(self, arguments["uncertain_parameters"] = uncertain_parameters arguments["seed"] = seed arguments["nr_samples"] = nr_samples + arguments["save_samples"] = save_samples data = Data(logger_level=None) data.arguments = arguments @@ -61,4 +65,4 @@ def custom_uncertainty_quantification(self, custom_keyword="custom_value"): data = Data(logger_level=None) data.arguments = arguments - return data \ No newline at end of file + return data From d693663991ffe736f4dd6a66da93240b53057f5e Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 30 Mar 2021 12:19:57 +0200 Subject: [PATCH 19/26] small changes --- src/uncertainpy/plotting/plot_uncertainty.py | 27 +++++++++++-------- .../plotting/prettyplot/prettyplot.py | 15 ++++++----- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index ee553710..3d02ee2f 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -84,7 +84,7 @@ def __init__(self, setup_module_logger(class_instance=self, level=logger_level) - font_settings = ["fontsize", "linewidth", "titlesize", "labelsize", "dpi"] + font_settings = ["fontsize", "linewidth", "titlesize", "labelsize", "dpi"] for k in font_settings: if k in kwargs: setattr(self, k, kwargs[k]) @@ -1478,9 +1478,9 @@ def convert_sensitivity(self, sensitivity): full_text = "" if sensitivity == "sobol_first": - full_text = "first order Sobol indices" + full_text = "First order Sobol indices" elif sensitivity == "sobol_total": - full_text = "total order Sobol indices" + full_text = "Total order Sobol indices" return sensitivity, full_text @@ -1510,7 +1510,8 @@ def feature_0d(self, hardcopy=True, palette="colorblind", show=False, - max_legend_size=5): + max_legend_size=5, + title=None): """ Plot all attributes (mean, variance, p_05, p_95 and sensitivity of it exists) for a 0 dimensional model/feature. @@ -1614,7 +1615,7 @@ def feature_0d(self, pos += width xticks.append(pos - (i / 2. + 0.5) * width) - xlabels.append(sensitivity.split("_")[0] + " " + sensitivity.split("_")[1]) + xlabels.append(sensitivity.split("_")[0].capitalize() + " " + sensitivity.split("_")[1]) location = (0.5, 1.01 + legend_width * 0.095) plt.legend(legend_bars, @@ -1623,7 +1624,6 @@ def feature_0d(self, bbox_to_anchor=location, ncol=legend_size) - fig = plt.gcf() fig.subplots_adjust(top=(0.91 - legend_width * 0.053)) @@ -1634,7 +1634,8 @@ def feature_0d(self, for tick in ax.get_xticklabels()[:2]: tick.set_rotation(-25) - plt.suptitle(feature.replace("_", " "), fontsize=self.titlesize) + if title is not None: + plt.suptitle(feature.replace("_", " "), fontsize=self.titlesize) if sensitivity is None or sensitivity not in self.data[feature]: plt.subplots_adjust(top=0.93) @@ -1644,6 +1645,8 @@ def feature_0d(self, else: save_name = feature + "_" + sensitivity + self.figureformat + plt.tight_layout() + if hardcopy: plt.savefig(os.path.join(self.folder, save_name), dpi=self.dpi) @@ -1661,7 +1664,7 @@ def average_sensitivity(self, sensitivity="first", hardcopy=True, title=None, - palette="colorblind", + palette="colorblind", show=False): """ Plot the average of the sensitivity for a specific model/feature. @@ -1718,11 +1721,13 @@ def average_sensitivity(self, index = np.arange(1, len(self.data.uncertain_parameters) + 1) * width if title is None: - title = "Average of " + title_tmp + ", " + feature.replace("_", " ") + title = "Average of " + title_tmp[0].lower() + title_tmp[1:] + ", " + feature.replace("_", " ") + elif title is False: + title = "" prettyBar(self.data[feature][sensitivity + "_average"], title=title, xlabels=self.uncertain_names, - ylabel="Average of " + title_tmp, + ylabel="Average of " + title_tmp[0].lower() + title_tmp[1:], nr_colors=len(self.data.uncertain_parameters), labelfontsize=self.labelsize, palette=palette, @@ -1749,7 +1754,7 @@ def average_sensitivity_all(self, sensitivity="first", hardcopy=True, title=None, - palette="colorblind", + palette="colorblind", show=False): """ Plot the average of the sensitivity for all model/features. diff --git a/src/uncertainpy/plotting/prettyplot/prettyplot.py b/src/uncertainpy/plotting/prettyplot/prettyplot.py index 6a8423dc..4ab8cb40 100644 --- a/src/uncertainpy/plotting/prettyplot/prettyplot.py +++ b/src/uncertainpy/plotting/prettyplot/prettyplot.py @@ -7,17 +7,18 @@ ticksize = 5 -markersize = 6 +markersize = 6 markeredgewidth = 1. figure_width = 7.08 labelsize = 10 titlesize = 12 -fontsize = 10 +fontsize = 10 ticklabelsize = 8 linewidth = 1.5 -#figsize = (figure_width, figure_width*0.75) +# figsize = (figure_width, figure_width*0.75) figsize = [6.85039, 5.1375] + def set_figuresize(): """ Set the size of a figure. @@ -44,11 +45,11 @@ def set_legendstyle(): plt.rcParams.update(params) - def set_font(): """Set font options.""" params = {"text.antialiased": True, - "font.family": "serif", + "font.family": "sans-serif", + "font.sans-serif": "Arial", "font.weight": "normal", } @@ -62,8 +63,8 @@ def set_latex_font(): params = {"text.usetex": True, "text.latex.preamble": "\\usepackage{amsmath}, \\usepackage{amssymb}", "text.antialiased": True, - # "font.family": "lmodern", - # "font.weight": "normal" + # "font.family": "lmodern", + # "font.weight": "normal" } plt.rcParams.update(params) From ac03d13fa90b01f6dbf8a2f6b085a415f2796ce0 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Sun, 2 May 2021 23:55:15 +0200 Subject: [PATCH 20/26] make samples accessible --- .../core/uncertainty_calculations.py | 105 +++++++++--------- 1 file changed, 55 insertions(+), 50 deletions(-) diff --git a/src/uncertainpy/core/uncertainty_calculations.py b/src/uncertainpy/core/uncertainty_calculations.py index ae41f18e..571e1620 100644 --- a/src/uncertainpy/core/uncertainty_calculations.py +++ b/src/uncertainpy/core/uncertainty_calculations.py @@ -14,6 +14,8 @@ from ..utils.logger import get_logger + + class UncertaintyCalculations(ParameterBase): """ Perform the calculations for the uncertainty quantification and @@ -392,7 +394,54 @@ def create_masked_nodes_weights(self, data, feature, nodes, weights): return masked_evaluations, mask, masked_nodes, masked_weights + def get_UQ_samples(self, method, uncertain_parameters, polynomial_order, quadrature_order=None, seed=None): + if method not in ["spectral", "collocation", "MC"]: + raise ValueError("The UQ method needs to be provided (spectral, collocation or MC)") + uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters) + distribution = self.create_distribution(uncertain_parameters=uncertain_parameters) + if method == "spectral": + if quadrature_order is None: + raise ValueError("The quadrature_order must be an integer") + P = cp.orth_ttr(polynomial_order, distribution) + nodes, weights = cp.generate_quadrature(quadrature_order, + distribution, + rule="J", + sparse=True) + return uncertain_parameters, distribution, P, nodes, weights + elif method == "collocation": + P = cp.orth_ttr(polynomial_order, distribution) + nr_collocation_nodes = quadrature_order + if nr_collocation_nodes is None: + nr_collocation_nodes = 2*len(P) + 2 + nodes = distribution.sample(nr_collocation_nodes, "M") + return uncertain_parameters, distribution, P, nodes + elif method == "MC": + nr_samples = polynomial_order + if nr_samples is None: + raise ValueError("The nr_samples must be an integer") + + if seed is not None: + np.random.seed(seed) + + problem = { + "num_vars": len(uncertain_parameters), + "names": uncertain_parameters, + "bounds": [[0,1]]*len(uncertain_parameters) + } + + # Create the Multivariate normal distribution + dist_R = [] + for parameter in uncertain_parameters: + dist_R.append(cp.Uniform()) + + dist_R = cp.J(*dist_R) + + nr_sobol_samples = int(np.round(nr_samples/2.)) + + nodes_R = saltelli.sample(problem, nr_sobol_samples, calc_second_order=False) + nodes = distribution.inv(dist_R.fwd(nodes_R.transpose())) + return uncertain_parameters, distribution, nodes, nr_sobol_samples def create_PCE_spectral(self, @@ -478,24 +527,14 @@ def create_PCE_spectral(self, uncertainpy.Data uncertainpy.Parameters """ - uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters) - - distribution = self.create_distribution(uncertain_parameters=uncertain_parameters) - - P = cp.orth_ttr(polynomial_order, distribution) - if quadrature_order is None: quadrature_order = polynomial_order + 2 - - - nodes, weights = cp.generate_quadrature(quadrature_order, - distribution, - rule="J", - sparse=True) + uncertain_parameters, distribution, P, nodes, weights = self.get_UQ_samples("spectral", uncertain_parameters, polynomial_order, quadrature_order=quadrature_order) # Running the model data = self.runmodel.run(nodes, uncertain_parameters) + # TODO: import data here instead of running data.method = "polynomial chaos expansion with the pseudo-spectral method. polynomial_order={}, quadrature_order={}".format(polynomial_order, quadrature_order) @@ -614,20 +653,13 @@ def create_PCE_collocation(self, uncertainpy.Parameters """ - uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters) - - distribution = self.create_distribution(uncertain_parameters=uncertain_parameters) - - P = cp.orth_ttr(polynomial_order, distribution) - if nr_collocation_nodes is None: - nr_collocation_nodes = 2*len(P) + 2 - - nodes = distribution.sample(nr_collocation_nodes, "M") - + uncertain_parameters, distribution, P, nodes = self.get_UQ_samples("collocation", uncertain_parameters, polynomial_order, quadrature_order=nr_collocation_nodes) # Running the model data = self.runmodel.run(nodes, uncertain_parameters) + # TODO: import data here instead of running + data.method = "polynomial chaos expansion with point collocation. polynomial_order={}, nr_collocation_nodes={}".format(polynomial_order, nr_collocation_nodes) logger = get_logger(self) @@ -1480,34 +1512,7 @@ def monte_carlo(self, uncertainpy.Parameters """ - if seed is not None: - np.random.seed(seed) - - uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters) - - distribution = self.create_distribution(uncertain_parameters=uncertain_parameters) - - # nodes = distribution.sample(nr_samples, "M") - - problem = { - "num_vars": len(uncertain_parameters), - "names": uncertain_parameters, - "bounds": [[0,1]]*len(uncertain_parameters) - } - - # Create the Multivariate normal distribution - dist_R = [] - for parameter in uncertain_parameters: - dist_R.append(cp.Uniform()) - - dist_R = cp.J(*dist_R) - - nr_sobol_samples = int(np.round(nr_samples/2.)) - - nodes_R = saltelli.sample(problem, nr_sobol_samples, calc_second_order=False) - - nodes = distribution.inv(dist_R.fwd(nodes_R.transpose())) - + uncertain_parameters, distribution, nodes, nr_sobol_samples = self.get_UQ_samples("MC", uncertain_parameters, nr_samples) data = self.runmodel.run(nodes, uncertain_parameters) From 23f4e4daf345106a8402c5aecc0ace033623db16 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Mon, 3 May 2021 17:18:27 +0200 Subject: [PATCH 21/26] small addition --- .../core/uncertainty_calculations.py | 10 +++---- src/uncertainpy/utils/__init__.py | 1 + src/uncertainpy/utils/utility.py | 29 +++++++++++++++++++ 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/src/uncertainpy/core/uncertainty_calculations.py b/src/uncertainpy/core/uncertainty_calculations.py index 571e1620..8c4ea298 100644 --- a/src/uncertainpy/core/uncertainty_calculations.py +++ b/src/uncertainpy/core/uncertainty_calculations.py @@ -412,9 +412,9 @@ def get_UQ_samples(self, method, uncertain_parameters, polynomial_order, quadrat P = cp.orth_ttr(polynomial_order, distribution) nr_collocation_nodes = quadrature_order if nr_collocation_nodes is None: - nr_collocation_nodes = 2*len(P) + 2 + nr_collocation_nodes = 2 * len(P) + 2 nodes = distribution.sample(nr_collocation_nodes, "M") - return uncertain_parameters, distribution, P, nodes + return uncertain_parameters, distribution, P, nodes, None elif method == "MC": nr_samples = polynomial_order if nr_samples is None: @@ -441,7 +441,7 @@ def get_UQ_samples(self, method, uncertain_parameters, polynomial_order, quadrat nodes_R = saltelli.sample(problem, nr_sobol_samples, calc_second_order=False) nodes = distribution.inv(dist_R.fwd(nodes_R.transpose())) - return uncertain_parameters, distribution, nodes, nr_sobol_samples + return uncertain_parameters, distribution, None, nodes, nr_sobol_samples def create_PCE_spectral(self, @@ -653,7 +653,7 @@ def create_PCE_collocation(self, uncertainpy.Parameters """ - uncertain_parameters, distribution, P, nodes = self.get_UQ_samples("collocation", uncertain_parameters, polynomial_order, quadrature_order=nr_collocation_nodes) + uncertain_parameters, distribution, P, nodes, _ = self.get_UQ_samples("collocation", uncertain_parameters, polynomial_order, quadrature_order=nr_collocation_nodes) # Running the model data = self.runmodel.run(nodes, uncertain_parameters) @@ -1512,7 +1512,7 @@ def monte_carlo(self, uncertainpy.Parameters """ - uncertain_parameters, distribution, nodes, nr_sobol_samples = self.get_UQ_samples("MC", uncertain_parameters, nr_samples) + uncertain_parameters, distribution, _, nodes, nr_sobol_samples = self.get_UQ_samples("MC", uncertain_parameters, nr_samples) data = self.runmodel.run(nodes, uncertain_parameters) diff --git a/src/uncertainpy/utils/__init__.py b/src/uncertainpy/utils/__init__.py index 18cfbe6d..a5767f3e 100644 --- a/src/uncertainpy/utils/__init__.py +++ b/src/uncertainpy/utils/__init__.py @@ -14,3 +14,4 @@ from .logger import MyFormatter, TqdmLoggingHandler, MultiprocessLoggingHandler from .utility import lengths, none_to_nan, contains_nan from .utility import is_regular, set_nan +from .utility import create_model_parameters diff --git a/src/uncertainpy/utils/utility.py b/src/uncertainpy/utils/utility.py index 01e665ba..22ce3df7 100644 --- a/src/uncertainpy/utils/utility.py +++ b/src/uncertainpy/utils/utility.py @@ -291,3 +291,32 @@ def is_regular(values): # return values + +def create_model_parameters(nodes, uncertain_parameters): + """ + Combine nodes (values) with the uncertain parameter names to create a + list of dictionaries corresponding to the model values for each + model evaluation. + Parameters + ---------- + nodes : array + A series of different set of parameters. The model and each feature is + evaluated for each set of parameters in the series. + uncertain_parameters : list + A list of names of the uncertain parameters. + Returns + ------- + model_parameters : list + A list where each element is a dictionary with the model parameters + for a single evaluation. + """ + model_parameters = [] + for node in nodes.T: + if node.ndim == 0: + node = [node] + # New set parameters + parameters = {} + for j, parameter in enumerate(uncertain_parameters): + parameters[parameter] = node[j] + model_parameters.append(parameters) + return model_parameters From 2c87210802ab2e377b0939482ee6bf65af630bc3 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Tue, 11 May 2021 21:57:29 +0200 Subject: [PATCH 22/26] plot stddev --- src/uncertainpy/plotting/plot_uncertainty.py | 27 +++++++++++++++----- 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 3d02ee2f..236a022e 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -1507,6 +1507,7 @@ def features_2d(self): def feature_0d(self, feature, sensitivity="first", + error="variance", hardcopy=True, palette="colorblind", show=False, @@ -1526,6 +1527,8 @@ def feature_0d(self, order Sobol indices, while "sobol_total" and "total" are the total order Sobol indices. If None, no sensitivity is plotted. Default is "first". + error: {"variance", "stddev"}, optional + Plot either variance or standard deviation (square root of variance). hardcopy : bool, optional If the plot should be saved to file. Default is True. show : bool, optional @@ -1572,10 +1575,16 @@ def feature_0d(self, width = 0.2 distance = 0.5 - xlabels = ["Mean", "Variance", "$P_5$", "$P_{95}$"] + error_label = "Variance" + if error == "stddev": + error_label = "SD" + xlabels = ["Mean", error_label, "$P_5$", "$P_{95}$"] xticks = [0, width, distance + width, distance + 2 * width] - values = [self.data[feature].mean, self.data[feature].variance, + error_value = self.data[feature].variance + if error == "stddev": + error_value = np.sqrt(error_value) + values = [self.data[feature].mean, error_value, self.data[feature].percentile_5, self.data[feature].percentile_95] ylabel = self.data.get_labels(feature)[0] @@ -1796,7 +1805,7 @@ def average_sensitivity_all(self, title=title, show=show) - def features_0d(self, sensitivity="first", hardcopy=True, show=False): + def features_0d(self, sensitivity="first", error="variance", hardcopy=True, show=False): """ Plot the results for all 0 dimensional model/features. @@ -1806,6 +1815,8 @@ def features_0d(self, sensitivity="first", hardcopy=True, show=False): Which Sobol indices to plot. "sobol_first" and "first" is the first order Sobol indices, while "sobol_total" and "total" are the total order Sobol indices. Default is "first". + error: {"variance", "stddev"}, optional + Plot either variance or standard deviation (square root of variance). hardcopy : bool, optional If the plot should be saved to file. Default is True. show : bool, optional @@ -1824,7 +1835,7 @@ def features_0d(self, sensitivity="first", hardcopy=True, show=False): for feature in self.data: if self.data.ndim(feature) == 0: - self.feature_0d(feature, sensitivity=sensitivity, hardcopy=hardcopy, show=show) + self.feature_0d(feature, sensitivity=sensitivity, error=error, hardcopy=hardcopy, show=show) # # TODO Not Tested # def plot_folder(self, data_dir): @@ -1900,13 +1911,15 @@ def plot_all_sensitivities(self): self.average_sensitivity_all(sensitivity="total") self.average_sensitivity_grid(sensitivity="total") - def plot_condensed(self, sensitivity="first"): + def plot_condensed(self, error="variance", sensitivity="first"): """ Plot the subset of data that shows all information in the most concise way, with the chosen sensitivity. Parameters ---------- + error: {"variance", "stddev"}, optional + Plot either variance or standard deviation (square root of variance). sensitivity : {"sobol_first", "first", "sobol_total", "total"}, optional Which Sobol indices to plot. "sobol_first" and "first" is the first order Sobol indices, while "sobol_total" and "total" are the total @@ -1928,13 +1941,15 @@ def plot_condensed(self, sensitivity="first"): for feature in self.data: if self.data.ndim(feature) == 1: + if error != "variance": + raise NotImplementedError("For 1D features, the standard deviation plot has not yet been implemented.") self.mean_variance_1d(feature=feature) self.prediction_interval_1d(feature=feature) if sensitivity in self.data[feature]: self.sensitivity_1d_grid(feature=feature, sensitivity=sensitivity) - self.features_0d(sensitivity=sensitivity) + self.features_0d(sensitivity=sensitivity, error=error) self.features_2d() if sensitivity is not None: From 845f93a1f3f33f8918c32b66e48e6091940454eb Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Sat, 29 May 2021 15:51:38 +0200 Subject: [PATCH 23/26] make legend more flexible --- src/uncertainpy/plotting/plot_uncertainty.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index 236a022e..dd7d3640 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -84,7 +84,7 @@ def __init__(self, setup_module_logger(class_instance=self, level=logger_level) - font_settings = ["fontsize", "linewidth", "titlesize", "labelsize", "dpi"] + font_settings = ["fontsize", "linewidth", "titlesize", "labelsize", "dpi", "max_legend_size"] for k in font_settings: if k in kwargs: setattr(self, k, kwargs[k]) @@ -99,6 +99,8 @@ def __init__(self, self.labelsize = labelsize elif k == "dpi": self.dpi = None + elif k == "max_legend_size": + self.max_legend_size = None def load(self, filename): """ @@ -1565,6 +1567,10 @@ def feature_0d(self, logger.warning(msg.format(data_type=data_type, feature=feature)) return + if self.max_legend_size is not None and isinstance(self.max_legend_size, int): + max_legend_size = self.max_legend_size + + if len(self.data.uncertain_parameters) > max_legend_size: legend_size = max_legend_size else: @@ -1631,7 +1637,8 @@ def feature_0d(self, self.uncertain_names, loc="upper center", bbox_to_anchor=location, - ncol=legend_size) + ncol=legend_size, + fontsize=self.fontsize) fig = plt.gcf() fig.subplots_adjust(top=(0.91 - legend_width * 0.053)) From 11d72d15e74d71628feb77f4897659dd00d16f6c Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Wed, 28 Sep 2022 23:22:07 +0200 Subject: [PATCH 24/26] fix --- src/uncertainpy/plotting/plot_uncertainty.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/uncertainpy/plotting/plot_uncertainty.py b/src/uncertainpy/plotting/plot_uncertainty.py index dd7d3640..a2430455 100644 --- a/src/uncertainpy/plotting/plot_uncertainty.py +++ b/src/uncertainpy/plotting/plot_uncertainty.py @@ -2226,8 +2226,8 @@ def prediction_interval_sensitivity_1d(self, ax.set_yscale(yscale) # plot sensitivity - number_colors = len(self.data[feature][sensitivity]) - number_colors += 2 + number_colors = len(self.data[feature][sensitivity]) + number_colors += 2 ax2 = ax.twinx() colors = sns.color_palette(palette, n_colors=number_colors) color = 0 @@ -2236,16 +2236,16 @@ def prediction_interval_sensitivity_1d(self, axescolor = "black" if color_axes: spines_color(ax2, edges={"top": "None", "bottom": "None", - "right": colors[color_2], "left": "None"}) + "right": colors[color_2], "left": "None"}) axescolor = colors[color_2] - + ax2.grid(False) ax2.tick_params(axis="y", which="both", right=True, left=False, labelright=True, color=axescolor, labelcolor=axescolor, labelsize=self.labelsize) ax2.set_ylabel(title_tmp, color=axescolor, fontsize=self.labelsize) # use markers for secondary axis - marker_list = itertools.cycle(('+', 'o', '*', 'v', '<', '>', 's', 'x')) + marker_list = itertools.cycle(('+', 'o', '*', 'v', '<', '>', 's', 'x')) i = 0 color_idx = 0 @@ -2281,10 +2281,10 @@ def prediction_interval_sensitivity_1d(self, ax2.spines["right"].set_visible(True) ax2.spines["right"].set_edgecolor(axescolor) if color_axes: - axescolor=colors[color] + axescolor = colors[color] ax.tick_params(axis="y", color=axescolor, labelcolor=axescolor) ax.spines["left"].set_edgecolor(axescolor) - ax.set_ylabel(ylabel + ", mean", color=axescolor, fontsize=self.labelsize) + ax.set_ylabel(ylabel, color=axescolor, fontsize=self.labelsize) ax2.set_xlim([min(time), max(time)]) ax.set_xlim([min(time), max(time)]) From fe055490df391376befc4e79249e551f42e24492 Mon Sep 17 00:00:00 2001 From: Julius Zimmermann Date: Wed, 28 Sep 2022 23:23:31 +0200 Subject: [PATCH 25/26] modernise code, nicer plot style --- src/uncertainpy/data.py | 9 ++++++--- src/uncertainpy/parameters.py | 6 +++++- src/uncertainpy/plotting/prettyplot/prettyplot.py | 3 ++- test.py | 8 ++++++-- 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/src/uncertainpy/data.py b/src/uncertainpy/data.py index 74479d8d..c6ccf1fb 100644 --- a/src/uncertainpy/data.py +++ b/src/uncertainpy/data.py @@ -2,7 +2,10 @@ import six import os -import collections +try: + from collections import MutableMapping +except ImportError: + from collections.abc import MutableMapping import numpy as np @@ -11,7 +14,7 @@ from ._version import __version__ -class DataFeature(collections.MutableMapping): +class DataFeature(MutableMapping): """ Store the results of each statistical metric calculated from the uncertainty quantification and sensitivity analysis for a single model/feature. @@ -291,7 +294,7 @@ def ndim(self): return None -class Data(collections.MutableMapping): +class Data(MutableMapping): """ Store the results of each statistical metric calculated from the uncertainty quantification and sensitivity analysis for each model/features. diff --git a/src/uncertainpy/parameters.py b/src/uncertainpy/parameters.py index a24c73af..a2df484f 100755 --- a/src/uncertainpy/parameters.py +++ b/src/uncertainpy/parameters.py @@ -5,6 +5,10 @@ import fileinput import sys import collections +try: + from collections import MutableMapping +except ImportError: + from collections.abc import MutableMapping import chaospy as cp @@ -145,7 +149,7 @@ def __str__(self): -class Parameters(collections.MutableMapping): +class Parameters(MutableMapping): """ A collection of parameters. diff --git a/src/uncertainpy/plotting/prettyplot/prettyplot.py b/src/uncertainpy/plotting/prettyplot/prettyplot.py index 4ab8cb40..35ea8137 100644 --- a/src/uncertainpy/plotting/prettyplot/prettyplot.py +++ b/src/uncertainpy/plotting/prettyplot/prettyplot.py @@ -113,7 +113,8 @@ def set_axestyle(): "axes.linewidth": 1, "axes.spines.right": False, "axes.spines.top": False, - "axes.unicode_minus": True + "axes.unicode_minus": True, + "axes.formatter.useoffset": False } plt.rcParams.update(params) diff --git a/test.py b/test.py index 01758db1..78610a9e 100644 --- a/test.py +++ b/test.py @@ -1,7 +1,11 @@ import unittest import sys import click -import collections +try: + from collections import Iterable +except ImportError: + from collections.abc import Iterable + import matplotlib matplotlib.use('Agg') @@ -26,7 +30,7 @@ def create_test_suite_parameter(testcase, parameter=False): def to_iterable(iterable): - if not isinstance(iterable, collections.Iterable): + if not isinstance(iterable, Iterable): iterable = [iterable] return iterable From 64ed8a2160722410a51a685e9c8f8b83e69de7ee Mon Sep 17 00:00:00 2001 From: Lam Vien Che Date: Thu, 17 Apr 2025 16:35:07 +0200 Subject: [PATCH 26/26] Updated uncertainty_calculations and prettyplot with new changes --- .../core/uncertainty_calculations.py | 53 +++++++++++++++---- .../plotting/prettyplot/prettyplot.py | 4 +- 2 files changed, 44 insertions(+), 13 deletions(-) diff --git a/src/uncertainpy/core/uncertainty_calculations.py b/src/uncertainpy/core/uncertainty_calculations.py index 8c4ea298..9fe87de3 100644 --- a/src/uncertainpy/core/uncertainty_calculations.py +++ b/src/uncertainpy/core/uncertainty_calculations.py @@ -141,7 +141,33 @@ def parameters(self, new_parameters): self.runmodel.parameters = self.parameters - + def calc_sens(self, dim, poly, uhat): + variance = np.sum(uhat[1:]**2) + main = np.zeros(dim) + total = np.zeros(dim) + #Compute sens + for idx, pol in enumerate(poly[1:]): + add_total = [False,]*dim + add_main = [True,]*dim + for term in pol.exponents: + for var in range(dim): + if term[var] > 0: + add_total[var] = True + add_main[var] = add_main[var] and True + add_main[0:var] = [False,]*var + try: + add_main[var+1::] = [False,]*(dim-var) + except IndexError: + pass + for var in range(dim): + if add_main[var]: + main[var] += uhat[idx+1]**2 + if add_total[var]: + total[var] += uhat[idx+1]**2 + + main = main/variance + total = total/variance + return main, total def convert_uncertain_parameters(self, uncertain_parameters=None): @@ -409,7 +435,7 @@ def get_UQ_samples(self, method, uncertain_parameters, polynomial_order, quadrat sparse=True) return uncertain_parameters, distribution, P, nodes, weights elif method == "collocation": - P = cp.orth_ttr(polynomial_order, distribution) + P = cp.orth_ttr(polynomial_order, distribution, normed=True) nr_collocation_nodes = quadrature_order if nr_collocation_nodes is None: nr_collocation_nodes = 2 * len(P) + 2 @@ -675,8 +701,8 @@ def create_PCE_collocation(self, masked_evaluations, mask, masked_nodes = self.create_masked_nodes(data, feature, nodes) if (np.all(mask) or allow_incomplete) and sum(mask) > 0: - U_hat[feature] = cp.fit_regression(P, masked_nodes, - masked_evaluations) + U_hat[feature], approx[feature] = cp.fit_regression(P, masked_nodes, + masked_evaluations, retall=1) elif not allow_incomplete: logger.warning("{}: not all parameter combinations give results.".format(feature) + " No uncertainty quantification is performed since allow_incomplete=False") @@ -688,7 +714,7 @@ def create_PCE_collocation(self, if not np.all(mask): data.incomplete.append(feature) - return U_hat, distribution, data + return U_hat, approx, distribution, data, P def create_PCE_spectral_rosenblatt(self, @@ -998,7 +1024,7 @@ def create_PCE_collocation_rosenblatt(self, return U_hat, dist_R, data - def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4, save_samples=False): + def analyse_PCE(self, U_hat, approx, distribution, data, P, polynomial_order, nr_samples=10**4, save_samples=False): """ Calculate the statistical metrics from the polynomial chaos approximation. @@ -1058,6 +1084,7 @@ def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4, save_samples= logger.info("Only 1 uncertain parameter. Sensitivities are not calculated") U_mc = {} + samples = distribution.sample(nr_samples, "M") for feature in tqdm(data, desc="Calculating statistics from PCE", total=len(data)): @@ -1065,13 +1092,17 @@ def analyse_PCE(self, U_hat, distribution, data, nr_samples=10**4, save_samples= data[feature].mean = cp.E(U_hat[feature], distribution) data[feature].variance = cp.Var(U_hat[feature], distribution) - samples = distribution.sample(nr_samples, "M") + if len(data.uncertain_parameters) > 1: U_mc[feature] = U_hat[feature](*samples) - data[feature].sobol_first = cp.Sens_m(U_hat[feature], distribution) - data[feature].sobol_total = cp.Sens_t(U_hat[feature], distribution) + #data[feature].sobol_first = cp.Sens_m(U_hat[feature], distribution) + e = np.array(U_hat[feature].exponents, dtype=int) + dim = e.shape[1] + poly = P + data[feature].sobol_first, data[feature].sobol_total = self.calc_sens(dim, poly, approx[feature]) + #data[feature].sobol_total = cp.Sens_t(U_hat[feature], distribution) data = self.average_sensitivity(data, sensitivity="sobol_first") data = self.average_sensitivity(data, sensitivity="sobol_total") @@ -1384,7 +1415,7 @@ def polynomial_chaos(self, nr_collocation_nodes=nr_collocation_nodes, allow_incomplete=allow_incomplete) else: - U_hat, distribution, data = \ + U_hat, approx, distribution, data, P = \ self.create_PCE_collocation(uncertain_parameters=uncertain_parameters, polynomial_order=polynomial_order, nr_collocation_nodes=nr_collocation_nodes, @@ -1417,7 +1448,7 @@ def polynomial_chaos(self, else: raise ValueError("No polynomial chaos method with name {}".format(method)) - data = self.analyse_PCE(U_hat, distribution, data, + data = self.analyse_PCE(U_hat, approx, distribution, data, P, polynomial_order=polynomial_order, nr_samples=nr_pc_mc_samples, save_samples=save_samples) diff --git a/src/uncertainpy/plotting/prettyplot/prettyplot.py b/src/uncertainpy/plotting/prettyplot/prettyplot.py index 35ea8137..a8b459c4 100644 --- a/src/uncertainpy/plotting/prettyplot/prettyplot.py +++ b/src/uncertainpy/plotting/prettyplot/prettyplot.py @@ -76,7 +76,7 @@ def set_linestyle(): params = {"lines.linewidth": linewidth, "lines.linestyle": "solid", - "lines.marker": None, + "lines.marker": '.', "lines.antialiased": True, "lines.markersize": markersize, "lines.markeredgewidth": markeredgewidth, @@ -656,7 +656,7 @@ def prettyBar(x, error=None, linewidth=0, ax=None, new_figure=True, - style="seaborn-dark", + style="seaborn-v0_8-darkgrid", palette="hls", nr_colors=6, align="center",