diff --git a/art/estimators/classification/pytorch.py b/art/estimators/classification/pytorch.py index c1fc0117f4..9c43f36cfe 100644 --- a/art/estimators/classification/pytorch.py +++ b/art/estimators/classification/pytorch.py @@ -375,6 +375,7 @@ def fit( # pylint: disable=W0221 training_mode: bool = True, drop_last: bool = False, scheduler: Optional["torch.optim.lr_scheduler._LRScheduler"] = None, + verbose: bool = False, **kwargs, ) -> None: """ @@ -390,14 +391,13 @@ def fit( # pylint: disable=W0221 the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) :param scheduler: Learning rate scheduler to run at the start of every epoch. - :param kwargs: Dictionary of framework-specific arguments. Currently supports "display_progress_bar" to - display training progress. + :param verbose: If to display the progress bar information. + :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch + and providing it takes no effect. """ import torch from torch.utils.data import TensorDataset, DataLoader - display_progress_bar = kwargs.get("display_progress_bar", False) - # Set model mode self._model.train(mode=training_mode) @@ -419,8 +419,8 @@ def fit( # pylint: disable=W0221 dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last) # Start training - for _ in tqdm(range(nb_epochs), disable=not display_progress_bar, desc="Epochs"): - for x_batch, y_batch in tqdm(dataloader, disable=not display_progress_bar, desc="Batches"): + for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): + for x_batch, y_batch in tqdm(dataloader, disable=not verbose, desc="Batches"): # Move inputs to device x_batch = x_batch.to(self._device) y_batch = y_batch.to(self._device) @@ -456,20 +456,19 @@ def fit( # pylint: disable=W0221 if scheduler is not None: scheduler.step() - def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None: + def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None: """ Fit the classifier using the generator that yields batches as specified. :param generator: Batch generator providing `(x, y)` for each epoch. :param nb_epochs: Number of epochs to use for training. - :param kwargs: Dictionary of framework-specific arguments. Currently supports "display_progress_bar" to - display training progress. + :param verbose: If to display the progress bar information. + :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch + and providing it takes no effect. """ import torch from art.data_generators import PyTorchDataGenerator - display_progress_bar = kwargs.get("display_progress_bar", False) - # Put the model in the training mode self._model.train() @@ -490,8 +489,8 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg == (0, 1) ) ): - for _ in tqdm(range(nb_epochs), disable=not display_progress_bar, desc="Epochs"): - for i_batch, o_batch in tqdm(generator.iterator, disable=not display_progress_bar, desc="Batches"): + for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): + for i_batch, o_batch in tqdm(generator.iterator, disable=not verbose, desc="Batches"): if isinstance(i_batch, np.ndarray): i_batch = torch.from_numpy(i_batch).to(self._device) else: @@ -500,7 +499,10 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg if isinstance(o_batch, np.ndarray): o_batch = torch.argmax(torch.from_numpy(o_batch).to(self._device), dim=1) else: - o_batch = torch.argmax(o_batch.to(self._device), dim=1) + if o_batch.dim() > 1: + o_batch = torch.argmax(o_batch.to(self._device), dim=1) + else: + o_batch = o_batch.to(self._device) # Zero the parameter gradients self._optimizer.zero_grad() diff --git a/art/estimators/classification/tensorflow.py b/art/estimators/classification/tensorflow.py index f4e2aece0f..16fc4d69d5 100644 --- a/art/estimators/classification/tensorflow.py +++ b/art/estimators/classification/tensorflow.py @@ -266,7 +266,9 @@ def predict( # pylint: disable=W0221 return predictions - def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None: + def fit( + self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = False, **kwargs + ) -> None: """ Fit the classifier on the training set `(x, y)`. @@ -275,6 +277,7 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. + :param verbose: If to display the progress bar information. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for TensorFlow and providing it takes no effect. """ @@ -298,12 +301,12 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in ind = np.arange(len(x_preprocessed)).tolist() # Start training - for _ in range(nb_epochs): + for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): # Shuffle the examples random.shuffle(ind) # Train for one epoch - for m in range(num_batch): + for m in tqdm(range(num_batch), disable=not verbose, desc="Batches"): i_batch = x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]] o_batch = y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]] @@ -314,13 +317,14 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in # Run train step self._sess.run(self.train, feed_dict=feed_dict) - def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None: + def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None: """ Fit the classifier using the generator that yields batches as specified. :param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native training in TensorFlow, it will. :param nb_epochs: Number of epochs to use for training. + :param verbose: If to display the progress bar information. :param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for TensorFlow and providing it takes no effect. """ @@ -343,8 +347,8 @@ def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwarg == (0, 1) ) ): - for _ in range(nb_epochs): - for _ in range(int(generator.size / generator.batch_size)): # type: ignore + for _ in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): + for _ in tqdm(range(int(generator.size / generator.batch_size)), disable=not verbose, desc="Batches"): # type: ignore i_batch, o_batch = generator.get_batch() if self._reduce_labels: @@ -953,7 +957,9 @@ def _predict_framework(self, x: "tf.Tensor", training_mode: bool = False) -> "tf return self._model(x_preprocessed, training=training_mode) - def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None: + def fit( + self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, verbose: bool = False, **kwargs + ) -> None: """ Fit the classifier on the training set `(x, y)`. @@ -962,14 +968,13 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in shape (nb_samples,). :param batch_size: Size of batches. :param nb_epochs: Number of epochs to use for training. + :param verbose: If to display progress bar information. :param kwargs: Dictionary of framework-specific arguments. This parameter currently supports "scheduler" which is an optional function that will be called at the end of every - epoch to adjust the learning rate, and "display_progress_bar" to display training progress. + epoch to adjust the learning rate. """ import tensorflow as tf - display_progress_bar = kwargs.get("display_progress_bar", False) - if self._train_step is None: # pragma: no cover if self._loss_object is None: # pragma: no cover raise TypeError( @@ -1006,29 +1011,28 @@ def train_step(model, images, labels): train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size) - for epoch in tqdm(range(nb_epochs), disable=not display_progress_bar, desc="Epochs"): - for images, labels in tqdm(train_ds, disable=not display_progress_bar, desc="Batches"): + for epoch in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): + for images, labels in tqdm(train_ds, disable=not verbose, desc="Batches"): train_step(self.model, images, labels) if scheduler is not None: scheduler(epoch) - def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None: + def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, verbose: bool = False, **kwargs) -> None: """ Fit the classifier using the generator that yields batches as specified. :param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native training in TensorFlow, it will. :param nb_epochs: Number of epochs to use for training. + :param verbose: If to display progress bar information :param kwargs: Dictionary of framework-specific arguments. This parameter currently supports "scheduler" which is an optional function that will be called at the end of every - epoch to adjust the learning rate, and "display_progress_bar" to display training progress. + epoch to adjust the learning rate. """ import tensorflow as tf from art.data_generators import TensorFlowV2DataGenerator - display_progress_bar = kwargs.get("display_progress_bar", False) - if self._train_step is None: # pragma: no cover if self._loss_object is None: # pragma: no cover raise TypeError( @@ -1068,8 +1072,8 @@ def train_step(model, images, labels): == (0, 1) ) ): - for epoch in tqdm(range(nb_epochs), disable=not display_progress_bar, desc="Epochs"): - for i_batch, o_batch in tqdm(generator.iterator, disable=not display_progress_bar, desc="Batches"): + for epoch in tqdm(range(nb_epochs), disable=not verbose, desc="Epochs"): + for i_batch, o_batch in tqdm(generator.iterator, disable=not verbose, desc="Batches"): if self._reduce_labels: o_batch = tf.math.argmax(o_batch, axis=1) train_step(self._model, i_batch, o_batch) diff --git a/tests/estimators/classification/test_deeplearning_common.py b/tests/estimators/classification/test_deeplearning_common.py index 2f051ecf77..3b40966d3f 100644 --- a/tests/estimators/classification/test_deeplearning_common.py +++ b/tests/estimators/classification/test_deeplearning_common.py @@ -202,11 +202,7 @@ def get_lr(_): # Test a valid callback classifier, _ = image_dl_estimator(from_logits=True) - # Keras fit has its own kwarg arguments - if framework in ["kerastf", "keras"]: - kwargs = {"callbacks": [LearningRateScheduler(get_lr)]} - else: - kwargs = {"callbacks": [LearningRateScheduler(get_lr)], "display_progress_bar": True} + kwargs = {"callbacks": [LearningRateScheduler(get_lr)], "verbose": True} classifier.fit(x_train_mnist, y_train_mnist, batch_size=default_batch_size, nb_epochs=1, **kwargs) # Test failure for invalid parameters: does not apply to many frameworks which allow arbitrary kwargs