diff --git a/setup.cfg b/setup.cfg index e35a98e..8338662 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,7 +9,7 @@ long_description_content_type = text/markdown packages = find_namespace: package_dir = =src include_package_data = true -python_requires = >= 3.9 +python_requires = >= 3.10 # Dependencies are in setup.py for GitHub's dependency graph. [options.packages.find] diff --git a/src/MaCh3PythonUtils/machine_learning/file_ml_interface.py b/src/MaCh3PythonUtils/machine_learning/file_ml_interface.py index ffc5c1e..793ab3b 100644 --- a/src/MaCh3PythonUtils/machine_learning/file_ml_interface.py +++ b/src/MaCh3PythonUtils/machine_learning/file_ml_interface.py @@ -213,8 +213,6 @@ def test_model(self): print("Training Results!") train_prediction = self.model_predict(self._training_data) - print(self._training_data) - print(train_prediction) train_as_numpy = self.scale_labels(self._training_labels).T[0] self.evaluate_model(train_prediction, train_as_numpy, "train_qq_plot.pdf") diff --git a/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_interface.py b/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_interface.py index 449273b..06f4d04 100644 --- a/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_interface.py +++ b/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_interface.py @@ -5,7 +5,7 @@ import numpy as np import tensorflow.keras as tfk import tensorflow_probability as tfp - +from typing import Iterable class TfInterface(FileMLInterface): @@ -36,7 +36,7 @@ def add_layer(self, layer_id: str, layer_args: dict): self._layers.append(self.__TF_LAYER_IMPLEMENTATIONS[layer_id.lower()](**layer_args)) - def build_model(self, _: dict): + def build_model(self, **kwargs): return None @@ -93,11 +93,14 @@ def model_predict(self, test_data: pd.DataFrame): if self._model is None: return np.zeros(len(test_data)) - pred = self._model.predict(scaled_data, verbose=False) - print(f"PREDICTION: {pred}") - # return self._model.predict(scaled_data, verbose=False).T[0] - return pred + return self._model.predict(scaled_data, verbose=False).T[0] def model_predict_no_scale(self, test_data): # Same as above but specifically for TF, optimised to avoid if statement... return self._model(test_data, training=False) + + def evaluate_model(self, predicted_values: Iterable, true_values: Iterable, outfile: str = ""): + + # CODE TO DO TF SPECIFIC PLOTS GOES HERE + + return super().evaluate_model(predicted_values, true_values, outfile) \ No newline at end of file diff --git a/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_normalizing_flow_model.py b/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_normalizing_flow_model.py index 930ea3f..dc09714 100644 --- a/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_normalizing_flow_model.py +++ b/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_normalizing_flow_model.py @@ -54,10 +54,10 @@ def __call__(self): class TfNormalizingFlowModel(TfManualInterface): - def build_model(self, model_args): + def build_model(self, **kwargs): input_dim = self.chain.ndim-1 - self._model = NormalizingFlow(model_args.get("hidden_units", [100]), model_args.get("n_bijectors", 1), input_dim)() - self._optimizer = tfk.optimizers.Adam(model_args.get("learning_rate", 1e-3)) + self._model = NormalizingFlow(kwargs.get("hidden_units", [100]), kwargs.get("n_bijectors", 1), input_dim)() + self._optimizer = tfk.optimizers.Adam(kwargs.get("learning_rate", 1e-3)) def nll_loss(self, features): return -tf.reduce_mean(self._model.log_prob(features)) diff --git a/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_residual_model.py b/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_residual_model.py index 2c235c9..3e537b6 100644 --- a/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_residual_model.py +++ b/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_residual_model.py @@ -3,7 +3,7 @@ class TfResidualModel(TfManualLayeredInterface): - def build_model(self, model_args: dict): + def build_model(self, **kwargs): input_shape = self.training_data.shape[1:] # Assuming shape is (batch_size, features) network_input = tfk.layers.Input(shape=input_shape) @@ -23,9 +23,9 @@ def build_model(self, model_args: dict): # Define and compile the model self._model = tfk.Model(inputs=network_input, outputs=x) - optimizer = tfk.optimizers.AdamW(learning_rate=model_args.get("learning_rate", 1e-5), + optimizer = tfk.optimizers.AdamW(learning_rate=kwargs.get("learning_rate", 1e-5), weight_decay=1e-4, clipnorm=1.0) - _ = model_args.pop("learning_rate", None) + _ = kwargs.pop("learning_rate", None) - self._model.compile(optimizer=optimizer, **model_args) \ No newline at end of file + self._model.compile(optimizer=optimizer, **kwargs) \ No newline at end of file diff --git a/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_sequential_model.py b/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_sequential_model.py index 4a88408..cbab8e2 100644 --- a/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_sequential_model.py +++ b/src/MaCh3PythonUtils/machine_learning/tensorflow/tf_sequential_model.py @@ -4,11 +4,11 @@ class TfSequentialModel(TfManualLayeredInterface): - def build_model(self, model_args: dict): + def build_model(self, **kwargs: dict): """Build and compile TF model - :param model_args: Model arguments as dictionary - :type model_args: dict + :param kwargs: Model arguments as dictionary + :type kwargs: dict :raises ValueError: Model not set up yet """ self._model = tfk.Sequential() @@ -20,10 +20,10 @@ def build_model(self, model_args: dict): self._model.add(layer) self._model.build() - optimizer = tfk.optimizers.AdamW(learning_rate=model_args.get("learning_rate", 1e-5), + optimizer = tfk.optimizers.AdamW(learning_rate=kwargs.get("learning_rate", 1e-5), weight_decay=1e-4, clipnorm=1.0) - model_args.pop("learning_rate", None) + kwargs.pop("learning_rate", None) - self._model.compile(**model_args, optimizer=optimizer) \ No newline at end of file + self._model.compile(**kwargs, optimizer=optimizer) \ No newline at end of file