diff --git a/Get_predictions_2.py b/Get_predictions_2.py new file mode 100644 index 0000000..b28b587 --- /dev/null +++ b/Get_predictions_2.py @@ -0,0 +1,135 @@ + +#%% import relevant modules +import sys +import numpy as np +import torch +import glob # finds all the pathnames matching a specified pattern according to the rules used by the Unix shell, although results are returned in +# arbitrary order +import monai # build on top of pytorch +from monai.transforms import Compose, LoadImaged +from monai.data import list_data_collate +# I added these +import pytorch_lightning + +#%% import files +# insert path of modules folders +# sys.path.insert(0, 'D:/eigenvector-grouping') + +# import the modules directly +from lightning_module import SegmentatorModule +from src.transforms.transforms import ( + PointcloudRandomSubsampled, + ExtractSegmentationLabeld, + ToFloatTensord +) + + +#%% import data +# change the directory for (vein_only) and (atery_vein) +files_root = glob.glob(r'D:\Data\LungSegmentations\PointClouds(artery_only)\PointClouds\Test_set\*') +test_dict = [{"input": file} for file in files_root[:2]] # only two files +# test_dict = [{"input": file} for file in files_root] + +#%% try lightning example +# https://lightning.ai/forums/t/how-to-load-and-use-model-checkpoint-ckpt/677 +model = SegmentatorModule() +trainer = pytorch_lightning.Trainer() +chk_path = "D:/eigenvector-grouping/.data/output/01-20-2023-13-49-42/checkpoint/epoch=191-step=3264.ckpt" +model2 = SegmentatorModule.load_from_checkpoint(chk_path) +results = trainer.test(model=model2, datamodule=my_datamodule, verbose=True) +trainer = Trainer() +trainer.fit(model) + +# automatically loads the best weights for you +trainer.test(model) + + +#%% try train loader etc. from lightning_module.py as it should have the save structure +# https://lightning.ai/docs/pytorch/stable/data/datamodule.html +test_transforms = Compose([ + LoadImaged(keys=["input"], reader="NumpyReader"), + PointcloudRandomSubsampled(keys=["input"], sub_size=20_000), + ExtractSegmentationLabeld(pcd_key="input"), + ToFloatTensord(keys=["input", "label"]), +]) + +test_dataset = monai.data.CacheDataset( + data=test_dict, + transform=test_transforms, + cache_rate=1.0, + num_workers=10, +) + +test_loader = monai.data.DataLoader( + test_dataset, + batch_size=2, + shuffle=True, + num_workers=10, + collate_fn=list_data_collate, +) + +#%% other example thing lightning +chk_path = "D:/eigenvector-grouping/.data/output/01-20-2023-13-49-42/checkpoint/epoch=191-step=3264.ckpt" +model = SegmentatorModule.load_from_checkpoint(chk_path) +trainer = pytorch_lightning.Trainer() +trainer.test(model, dataloaders=test_loader) + + +#%% test transforms +# provides the ability to chain a series of callables together in a sequential manner. Each transform in the sequence must take a single argument and +# return a single value. +test_transforms = Compose([ + LoadImaged(keys=["input"], reader="NumpyReader"), + # PointcloudRandomSubsampled(keys=["input"], sub_size=20_000), + ExtractSegmentationLabeld(pcd_key="input"), + ToFloatTensord(keys=["input", "label"]) + ]) + + +#%% test dataset +# Dataset with cache mechanism that can load data and cache deterministic transforms’ result during training +test_dataset = monai.data.CacheDataset( + data=test_dict, + transform=test_transforms, + cache_rate=1.0, + num_workers=1, # 10, the number of worker threads if computing cache in the initialization + ) + + + +#%% test loader +# Provides an iterable over the given dataset +multiprocessing_context_name = 'fork' +test_loader = monai.data.DataLoader( + test_dataset, + batch_size=1, + #shuffle=True, + num_workers=10, + collate_fn=list_data_collate, + # multiprocessing_context=multiprocessing_context_name, + ) + +#%% import model from checkpoint +# add different checkpoint_path probably +checkpoint_path = "D:/eigenvector-grouping/.data/output/01-20-2023-13-49-42/checkpoint/epoch=191-step=3264.ckpt" +model = SegmentatorModule.load_from_checkpoint(checkpoint_path) +# don't use .to(device) --> https://lightning.ai/docs/pytorch/stable/common/lightning_module.html +# device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +# model.to(device) + + +#%% evaluate model +model.eval() +i=0 +# model.to(device) +for batch_data in test_loader: + input_tensor = batch_data["input"] + # input_tensor.to(device) + label_pred = model(input_tensor) + i+=1 + #tensor=label_pred.detach().numpy() + #tensor=tesnor[0,:,:,:].squeeze() + + + +# %% diff --git a/config.py b/config.py index 66b2733..0554d4b 100644 --- a/config.py +++ b/config.py @@ -27,26 +27,32 @@ class TrainSegmentatorConfig: seed: int = 0 # Paths - input_dir: str = os.path.join(".data", "input") + # input_dir: str = os.path.join(".data", "input") # artery_vein, Laurens, normtot is used + input_dir: str = os.path.join(".data", "input(artery_only)") # artery_only, normtot + # input_dir: str = "D:\eigenvector-grouping\.data\input(artery_only)" + # input_dir: str = os.path.join(".data", "input(vein_only)") # veins_only, normtot + # input_dir: str = os.path.join(".data", "input(artery_vein)") # artery_vein, normtot output_dir: str = os.path.join(".data", "output") + # Data - size: int = 20_000 + size: int = 20_000 # number of point selected in PointCloud, bijv. 95% of points split: str = None # Dataloader batch_size: int = 2 - num_workers: int = 10 + num_workers: int = 1 # 10 # Number of subprocesses to use for data loading. 0 means + # that the data will be loaded in the main process. Number of CPUs available. # Model model: object = PointNet2EVG model_type: str = "radius" - ncomponents: int = 0 - features: int = 0 - classes: int = 3 + ncomponents: int = 0 + features: int = 0 # I don't know what this variable does + classes: int = 2 # 3 # do I need to change this if I'm changing input data? I did # Trainer - epochs: int = 200 + epochs: int = 20 # 200 # Optimizer optimizer: object = OptimizerConfig( diff --git a/lightning_module.py b/lightning_module.py index 23f8b18..3dfff1d 100644 --- a/lightning_module.py +++ b/lightning_module.py @@ -30,7 +30,7 @@ def __init__(self, config): super().__init__() set_determinism(seed=config.seed) - self.save_hyperparameters() + #self.save_hyperparameters() self.size = config.size self.input_dir = config.input_dir @@ -41,7 +41,10 @@ def __init__(self, config): self.model = config.model(config) self.loss_fn = nn.CrossEntropyLoss() - self.confmat = ConfusionMatrix(num_classes=2) + self.confmat = ConfusionMatrix(task="multiclass", num_classes=2) + + # save hyper-parameters to self.hparamsm auto-logged by wandb + self.save_hyperparameters() def prepare_data(self): diff --git a/pointnet2_ops_lib/setup.py b/pointnet2_ops_lib/setup.py index 0cea284..689b6ed 100644 --- a/pointnet2_ops_lib/setup.py +++ b/pointnet2_ops_lib/setup.py @@ -16,7 +16,7 @@ exec(open(osp.join("pointnet2_ops", "_version.py")).read()) -os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5;8.0;8.6" +os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0;11.7" setup( name="pointnet2_ops", version=__version__, diff --git a/random_tries.py b/random_tries.py new file mode 100644 index 0000000..3c73e18 --- /dev/null +++ b/random_tries.py @@ -0,0 +1,27 @@ +from dataclasses import dataclass +from config import TrainSegmentatorConfig + +@dataclass +class OptimizerHyperparamsConfig: + lr: float = 0.0005 + weight_decay: float = 0 + betas: tuple = (0.9, 0.999) + amsgrad: bool = False + +print(OptimizerHyperparamsConfig.lr) + +OptimizerHyperparamsConfig.lr = 0.0025 + +print(OptimizerHyperparamsConfig.lr) + +# config +config = TrainSegmentatorConfig() + +max_epoch = config.epochs +print(max_epoch) + +config.epochs = 100 +print(max_epoch) +print(config.epochs) + + diff --git a/requirements.txt b/requirements.txt index bbb80fb..72fd467 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 pytorch-lightning monai tensorboard diff --git a/src/models/pointnet2_evg.py b/src/models/pointnet2_evg.py index aae7d44..dc60419 100644 --- a/src/models/pointnet2_evg.py +++ b/src/models/pointnet2_evg.py @@ -28,41 +28,41 @@ def build_radius(self, classes: int, features: int): [ PointnetSAModuleEVG( npoint=2048, - knn_nsamples=[64, 64], - vec_radii=[0.05, 0.1], - vec_lengths=[0.01, 0.02], + knn_nsamples=[64, 64], + vec_radii=[0.80, 1.6], + vec_lengths=[0.16, 0.32], vec_nsamples=[64, 128], mlps=[[features, 16, 16, 32], [features, 32, 32, 64]], ), PointnetSAModuleEVG( npoint=1024, knn_nsamples=[16, 16], - vec_radii=[0.1, 0.2], - vec_lengths=[0.01, 0.02], + vec_radii=[1.6, 3.2], + vec_lengths=[0.16, 0.32], vec_nsamples=[32, 64], mlps=[[32 + 64, 64, 64, 96], [32 + 64, 64, 96, 96]], ), PointnetSAModuleEVG( npoint=256, knn_nsamples=[8, 8], - vec_radii=[0.2, 0.4], - vec_lengths=[0.01, 0.02], + vec_radii=[3.2, 6.4], + vec_lengths=[0.16, 0.32], vec_nsamples=[32, 64], mlps=[[96 + 96, 64, 64, 128], [96 + 96, 64, 96, 128]], ), PointnetSAModuleEVG( npoint=64, knn_nsamples=[8, 8], - vec_radii=[0.4, 0.6], - vec_lengths=[0.01, 0.02], + vec_radii=[6.4, 9.6], + vec_lengths=[0.16, 0.32], vec_nsamples=[32, 64], mlps=[[128 + 128, 128, 196, 256], [128 + 128, 128, 196, 256]], ), PointnetSAModuleEVG( npoint=16, knn_nsamples=[4, 4], - vec_radii=[0.4, 0.8], - vec_lengths=[0.01, 0.02], + vec_radii=[6.4, 12.8], + vec_lengths=[0.16, 0.32], vec_nsamples=[32, 64], mlps=[[256 + 256, 256, 256, 512], [256 + 256, 256, 384, 512]], ), diff --git a/train.py b/train.py index a2ec437..e5d7cd3 100644 --- a/train.py +++ b/train.py @@ -9,11 +9,12 @@ from lightning_module import SegmentatorModule from config import TrainSegmentatorConfig + def train(): """Train surface model""" # Load config - config = TrainSegmentatorConfig() + config = TrainSegmentatorConfig() # change hyperparameters here np.random.seed(config.seed) # Init out directories diff --git a/train_hp_optimization.py b/train_hp_optimization.py new file mode 100644 index 0000000..fa4e71c --- /dev/null +++ b/train_hp_optimization.py @@ -0,0 +1,244 @@ +# remove self.save_hyperparameter() from lightning_model.py +# add absolute path in config.py + +import os +import datetime + +import numpy as np +import pytorch_lightning +from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint +from pytorch_lightning.loggers import TensorBoardLogger + +from lightning_module import SegmentatorModule +from config import TrainSegmentatorConfig + +from ray import air, tune +from ray.tune.integration.pytorch_lightning import TuneReportCallback, TuneReportCheckpointCallback +# https://towardsdatascience.com/how-to-tune-pytorch-lightning-hyperparameters-80089a281646 +from ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining +from ray.tune import CLIReporter + +import torch + +# functions for the first try +def train_hp_loop(search_space): + + # Load config + # change hyperparameters here + # config = TrainSegmentatorConfig(optimizer = OptimizerConfig(hyperparams=OptimizerHyperparamsConfig(lr=0.0025))) + config = TrainSegmentatorConfig() + np.random.seed(config.seed) + + # # Init out directories + ct = datetime.datetime.now().strftime("%m-%d-%Y-%H-%M-%S") + logs_path = os.path.join(config.output_dir, ct, "logs") + checkpoint_path = os.path.join(config.output_dir, ct, "checkpoint") + + os.makedirs(logs_path, exist_ok=True) + os.makedirs(os.path.join(logs_path, "lightning_logs"), exist_ok=True) + os.makedirs(checkpoint_path, exist_ok=True) + print("\nLogging to:", logs_path) + + # Init model + net = SegmentatorModule(config) + + # # Set up loggers and checkpoints + tb_logger = TensorBoardLogger(save_dir=str(logs_path)) + checkpoint_callback = ModelCheckpoint( + dirpath=checkpoint_path, + monitor="val_loss", + mode="min", + save_last=True, + ) + + # Initialise Lightning's trainer. + metrics = {"loss": "ptl/val_loss", "acc": "ptl/val_accuracy"} # added + trainer = pytorch_lightning.Trainer( + accelerator="auto", # "gpu", # I changed it to auto + max_epochs=config.epochs, + logger=tb_logger, + # callbacks=[checkpoint_callback], # commented out + num_sanity_val_steps=1, + # callbacks=[TuneReportCallback(metrics, on="validation_end")], # added + callbacks=[checkpoint_callback, TuneReportCheckpointCallback(metrics, on="validation_end")], # added + ) + + + # Fit model + trainer.fit(net) + + # best_model_path = checkpoint_callback.best_model_path + # print(f"Best model path: {best_model_path}") + # return best_model_path + + +def train(num_samples = 10, num_epochs = 200, gpus_per_trial = 0): + """Train surface model""" + + # data_dir = os.path.join(tempfile.gettempdir(), "mnist_data_") + # # Download data + # MNISTDataModule(data_dir=data_dir).prepare_data() + + # define search space for hyperparameters + search_space = { + "lr": tune.loguniform(1e-7, 1e-1), + "weight_decay": tune.choice([0, 0.001, 0.005, 0.01]), + # "betas": tune.choice((0.9, 0.999)), # how to give a tuple as input? + "amsgrad": tune.choice(['True','False']), + "batch_size" : tune.choice([2]), + "epochs" : tune.uniform(5,50) # 10-200 + } + + trainable = tune.with_parameters( + train_hp_loop(search_space), + # data_dir=data_dir, + # num_epochs=config.epochs, + # num_gpus=gpus_per_trial + ) + + analysis = tune.run( + trainable, + resources_per_trial={ + "cpu": 1, + "gpu": gpus_per_trial + # cuda? + }, + metric="loss", + mode="min", + config=search_space, + num_samples=num_samples, + name="tune_mnist") + + print(analysis.best_config) + + best_model_path = analysis.best_checkpoint + print(f"Best checkpoint: {best_model_path}") + return best_model_path + + +# functions for the second try +def train_tune(search_space, num_epochs=200, num_gpus=0): + # https://docs.ray.io/en/latest/tune/examples/tune-pytorch-lightning.html#putting-it-together + + config = TrainSegmentatorConfig() + np.random.seed(config.seed) + + # adapt config to the correct parameters of search_space + + model = SegmentatorModule(config) + ct = datetime.datetime.now().strftime("%m-%d-%Y-%H-%M-%S") + # logs_path = os.path.join(config.output_dir, ct, "logs") + logs_path = os.path.join(ct,"lightning_logs") + # checkpoint_path = os.path.join(config.output_dir, ct, "checkpoint") + checkpoint_path = os.path.join(ct, "checkpoint") + os.makedirs(logs_path, exist_ok=True) + os.makedirs(os.path.join(logs_path, "lightning_logs"), exist_ok=True) + os.makedirs(checkpoint_path, exist_ok=True) + print("\nLogging to:", logs_path) + + checkpoint_callback = ModelCheckpoint( + dirpath=checkpoint_path, + monitor="val_loss", + mode="min", + save_last=True, + ) + + metrics = {"loss": "ptl/val_loss", "mean_accuracy": "ptl/val_accuracy"} + # default_root_dir=root_dir, + # callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),tune_report_callback] + tune_report_callback = TuneReportCheckpointCallback( + metrics, + filename="ray_ckpt", + on="validation_end", + ) + + trainer = pytorch_lightning.Trainer( + max_epochs=config.epochs, + # If fractional GPUs passed in, convert to int. + # gpus= math.ceil(num_gpus), # commented out + accelerator="auto", # "gpu", # I changed it to auto + logger=TensorBoardLogger(save_dir=str(logs_path)), + enable_progress_bar=False, + # callbacks=[ + # TuneReportCallback(metrics,on="validation_end") + # # TuneReportCheckpointCallback( + # # metrics, + # # filename="ray_ckpt", + # # on="validation_end") + # ] + # callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),tune_report_callback] + callbacks=[checkpoint_callback,tune_report_callback] + ) + trainer.fit(model) + + # session.report({"acc": accuracy, "metric_foo": random_metric_1, "bar": metric_2}) + print("------------------ Model trained ------------------") + +def train_ASHA(num_samples = 10, num_epochs = 200, gpus_per_trial = 0): + # define search space for hyperparameters + search_space = { + "lr": tune.loguniform(1e-7, 1e-1), + "weight_decay": tune.choice([0, 0.001, 0.005, 0.01]), + # "betas": tune.choice(tuple(0.9, 0.999)), # how to input tuple to tune.choice + "amsgrad": tune.choice(['True','False']), + "batch_size" : tune.choice([2]), + "epochs" : tune.randint(5, 50) # 10,200) + } + + # maybe also try Population Based Training + # look into this with other values of parameters + scheduler = ASHAScheduler( + max_t=num_epochs, # max time units per trial. Trials will be stopped after + # max_t time units (determined by time_attr) have passed. + grace_period=1, # Only stop trials at least this old in time. + reduction_factor=2 # Used to set halving rate and amount + ) + + train_with_parameters = tune.with_parameters(train_tune, num_epochs = num_epochs, num_gpus = gpus_per_trial) + + resources_per_trial = {"cpu": 1, "gpu": gpus_per_trial} + reporter = CLIReporter( + parameter_columns=["lr", "weight_decay", "amsgrad", "batch_size", "epochs"], + metric_columns=["loss", "mean_accuracy", "training_iteration"]) + + tuner = tune.Tuner( + tune.with_resources( + train_with_parameters, + resources=resources_per_trial + ), + tune_config=tune.TuneConfig( + metric="val_loss", # chane back to "loss" + mode="min", + scheduler=scheduler, + num_samples=num_samples, + ), + run_config=air.RunConfig( + name="tune_asha", + local_dir=TrainSegmentatorConfig().output_dir, + progress_reporter=reporter, # import CLIreporter for this + ), + param_space=search_space, + ) + # tuner.report() + results = tuner.fit() + + print('insert results.get_best_results().config here') + # if results.get_best_result().config is None: + # print('No best trial found for the given metric: loss. This means that no trial has reported this metric or all values are NaN') + # else: + # print("Best hyperparameters found were: ", results.get_best_result().config) + +# run the train functions (only the first OR the second try) +if __name__ == "__main__": + + num_samples = 2 # 10 # how many trials are performed + gpus_per_trial = 1 if torch.cuda.is_available() else 0 + num_epochs = 200 # max, otherwise it takes too long + + # first try + # train(num_samples, gpus_per_trial, num_epochs) + + # second try + train_ASHA(num_samples, num_epochs, gpus_per_trial) + + diff --git a/train_hp_optimization_wandb.py b/train_hp_optimization_wandb.py new file mode 100644 index 0000000..99f0438 --- /dev/null +++ b/train_hp_optimization_wandb.py @@ -0,0 +1,134 @@ +# add self.save_hyperparameters in lightning_module.py +# remove absolute path and add relative path in config.py +# num-workers in config.py + +import os +import datetime +import wandb + +import numpy as np +import pytorch_lightning +from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint +from pytorch_lightning.loggers import TensorBoardLogger +from pytorch_lightning.loggers import WandbLogger + +from lightning_module import SegmentatorModule +from config import TrainSegmentatorConfig + + +def train(): + """Train surface model""" + + # Load config default (from config.py) + config_default = TrainSegmentatorConfig() # change hyperparameters in config.py here + np.random.seed(config_default.seed) + + # Init out directories + ct = datetime.datetime.now().strftime("%m-%d-%Y-%H-%M-%S") + logs_path = os.path.join(config_default.output_dir, ct, "logs") + checkpoint_path = os.path.join(config_default.output_dir, ct, "checkpoint") + + os.makedirs(logs_path, exist_ok=True) + os.makedirs(os.path.join(logs_path, "lightning_logs"), exist_ok=True) + os.makedirs(checkpoint_path, exist_ok=True) + print("\nLogging to:", logs_path) + + # # Init model + # net = SegmentatorModule(config) + + # Set up loggers and checkpoints + # tb_logger = TensorBoardLogger(save_dir=str(logs_path)) # co + # wandb.init(project="hp-optizimation", config=config_default) # co + # config = config_default + # config = wandb.config # co + wandb_logger = WandbLogger(project='hp-optimization') + print(wandb.config['batch_size']) + print(wandb.config['epochs']) + hp_sweep = wandb.config + print(hp_sweep) + dict_key_hp_sweep = list(hp_sweep.keys()) + # print(dict_key_config) + + # update hyperparameters in config_default, which will be used to construct the lightning module class + # cave: did not put lr, weight_decay etc. in config.py so those cannot be updated at the moment + if 'model_type' in dict_key_hp_sweep: + config_default.model_type = hp_sweep['model_type'] + if 'size' in dict_key_hp_sweep: + config_default.size = hp_sweep['size'] + if 'split' in dict_key_hp_sweep: + config_default.split = hp_sweep['split'] + if 'batch_size' in dict_key_hp_sweep: + config_default.batch_size = hp_sweep['batch_size'] + if 'num_workers' in dict_key_hp_sweep: + config_default.num_workers = hp_sweep['num_workers'] + if 'ncomponents' in dict_key_hp_sweep: + config_default.ncomponents = hp_sweep['ncomponents'] + if 'features' in dict_key_hp_sweep: + config_default.features = hp_sweep['features'] + if 'epochs' in dict_key_hp_sweep: + config_default.epochs = hp_sweep['epochs'] + if 'optimizer' in dict_key_hp_sweep: + # if you want to use a different optimizer, you have to import it first in config.py + config_default.optimizer.optim = hp_sweep['optimizer'] + if 'lr' in dict_key_hp_sweep: + config_default.optimizer.hyperparams.lr = hp_sweep['lr'] + if 'weight_decay' in dict_key_hp_sweep: + config_default.optimizer.hyperparams.weight_decay = hp_sweep['weight_decay'] + if 'beta1' in dict_key_hp_sweep and 'beta2' in dict_key_hp_sweep: + config_default.optimizer.hyperparams.betas = (hp_sweep['beta1'],hp_sweep['beta2']) + if 'amsgrad' in dict_key_hp_sweep: + config_default.optimizer.hyperparams.amsgrad = bool(hp_sweep['amsgrad']) + + print(config_default) + + # wandb_logger = WandbLogger() # co + # wandb_logger.experiment.config["batch_size"] = config.batch_size # co + checkpoint_callback = ModelCheckpoint( + dirpath=checkpoint_path, + monitor="val_loss", + mode="min", + save_last=True, + ) + + # Init model + net = SegmentatorModule(config_default) + + # Initialise Lightning's trainer. + trainer = pytorch_lightning.Trainer( + accelerator="gpu", + max_epochs=config_default.epochs, # 200, # changed because of time + logger=wandb_logger, + callbacks=[checkpoint_callback], + num_sanity_val_steps=1, + ) + + # Fit model + trainer.fit(net) + + best_model_path = checkpoint_callback.best_model_path + print(f"Best model path: {best_model_path}") + return best_model_path + + +if __name__ == "__main__": + sweep_configuration = { + 'method': 'random', + 'metric': {'goal': 'minimize', 'name': 'val_loss'}, + 'parameters': + { + "size": {'min': 20_000, 'max': 30_000}, # 'max': 80_000}, # use these bigger values on faster computer + "batch_size" : {'value': 2}, # {'values': [1,2,4]} # use these bigger values on faster computer + # "model_type": {'values': ["radius", "knn"]}, # knn does not seem to work yet + "epochs" : {'min': 3, 'max': 10}, # 10,200), + "lr": {'min': 1e-7, 'max': 1e-1}, + "weight_decay": {"min": 0.0, 'max': 0.5}, # {'values': [0, 0.001, 0.005, 0.01]}, + "beta1": {'min': 0.5, 'max': 1.0}, + "beta2": {'min': 0.5, 'max': 1.0}, + "amsgrad": {"values": ['True','False']}, + } + } + + sweep_id = wandb.sweep(sweep_configuration, project='hp-optimization') + wandb.agent(sweep_id=sweep_id, function=train, count=3) + + # train() \ No newline at end of file