Skip to content

Commit c89de4d

Browse files
committed
unnecessary logging is blocked
1 parent fb9ef0a commit c89de4d

File tree

4 files changed

+19
-268
lines changed

4 files changed

+19
-268
lines changed

fault-localization/artifact.ipynb

Lines changed: 11 additions & 262 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
},
1919
{
2020
"cell_type": "code",
21-
"execution_count": 1,
21+
"execution_count": null,
2222
"metadata": {},
2323
"outputs": [],
2424
"source": [
@@ -34,30 +34,9 @@
3434
},
3535
{
3636
"cell_type": "code",
37-
"execution_count": 2,
37+
"execution_count": null,
3838
"metadata": {},
39-
"outputs": [
40-
{
41-
"name": "stderr",
42-
"output_type": "stream",
43-
"text": [
44-
"/home/gulzar/anaconda3/envs/torch/lib/python3.10/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
45-
" from .autonotebook import tqdm as notebook_tqdm\n",
46-
"Global seed set to 786\n",
47-
"Global seed set to 786\n"
48-
]
49-
},
50-
{
51-
"data": {
52-
"text/plain": [
53-
"786"
54-
]
55-
},
56-
"execution_count": 2,
57-
"metadata": {},
58-
"output_type": "execute_result"
59-
}
60-
],
39+
"outputs": [],
6140
"source": [
6241
"import logging\n",
6342
"import time\n",
@@ -92,7 +71,7 @@
9271
},
9372
{
9473
"cell_type": "code",
95-
"execution_count": 3,
74+
"execution_count": null,
9675
"metadata": {},
9776
"outputs": [],
9877
"source": [
@@ -130,7 +109,7 @@
130109
},
131110
{
132111
"cell_type": "code",
133-
"execution_count": 4,
112+
"execution_count": null,
134113
"metadata": {},
135114
"outputs": [],
136115
"source": [
@@ -151,212 +130,9 @@
151130
},
152131
{
153132
"cell_type": "code",
154-
"execution_count": 5,
133+
"execution_count": null,
155134
"metadata": {},
156-
"outputs": [
157-
{
158-
"name": "stdout",
159-
"output_type": "stream",
160-
"text": [
161-
"\n",
162-
"\n",
163-
" ***Simulating FL setup iid_resnet50_cifar10_clients_5_faulty_[0]_bsize_512_epochs_5_lr_0.001 ***\n",
164-
"Files already downloaded and verified\n",
165-
"Files already downloaded and verified\n",
166-
"Spliting Datasets 50000 into parts:[10000, 10000, 10000, 10000, 10000]\n",
167-
"input shape, torch.Size([1, 3, 32, 32])\n",
168-
"Training : .storage/checkpoints/iid_resnet50_cifar10_clients_5_faulty_[0]_bsize_512_epochs_5_lr_0.001/faulty_client_0_noise_rate_1_classes.ckpt\n"
169-
]
170-
},
171-
{
172-
"name": "stderr",
173-
"output_type": "stream",
174-
"text": [
175-
"Using 16bit native Automatic Mixed Precision (AMP)\n",
176-
"GPU available: True (cuda), used: True\n",
177-
"TPU available: False, using: 0 TPU cores\n",
178-
"IPU available: False, using: 0 IPUs\n",
179-
"HPU available: False, using: 0 HPUs\n"
180-
]
181-
},
182-
{
183-
"name": "stdout",
184-
"output_type": "stream",
185-
"text": [
186-
"Train mod batch = 272, and drop_last = False\n"
187-
]
188-
},
189-
{
190-
"name": "stderr",
191-
"output_type": "stream",
192-
"text": [
193-
"LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n"
194-
]
195-
},
196-
{
197-
"name": "stdout",
198-
"output_type": "stream",
199-
"text": [
200-
"Epoch 4: 100%|██████████| 20/20 [00:05<00:00, 3.47it/s, loss=2.35, train_acc=0.130, train_loss=2.360, val_acc=0.0961, val_loss=2.460] "
201-
]
202-
},
203-
{
204-
"name": "stderr",
205-
"output_type": "stream",
206-
"text": [
207-
"`Trainer.fit` stopped: `max_epochs=5` reached.\n"
208-
]
209-
},
210-
{
211-
"name": "stdout",
212-
"output_type": "stream",
213-
"text": [
214-
"Epoch 4: 100%|██████████| 20/20 [00:05<00:00, 3.47it/s, loss=2.35, train_acc=0.130, train_loss=2.360, val_acc=0.0961, val_loss=2.460]\n",
215-
"Training : .storage/checkpoints/iid_resnet50_cifar10_clients_5_faulty_[0]_bsize_512_epochs_5_lr_0.001/client_1.ckpt\n"
216-
]
217-
},
218-
{
219-
"name": "stderr",
220-
"output_type": "stream",
221-
"text": [
222-
"Using 16bit native Automatic Mixed Precision (AMP)\n",
223-
"GPU available: True (cuda), used: True\n",
224-
"TPU available: False, using: 0 TPU cores\n",
225-
"IPU available: False, using: 0 IPUs\n",
226-
"HPU available: False, using: 0 HPUs\n",
227-
"LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n"
228-
]
229-
},
230-
{
231-
"name": "stdout",
232-
"output_type": "stream",
233-
"text": [
234-
"Train mod batch = 272, and drop_last = False\n",
235-
"Epoch 4: 100%|██████████| 20/20 [00:06<00:00, 3.17it/s, loss=0.585, train_acc=0.812, train_loss=0.597, val_acc=0.729, val_loss=0.810]"
236-
]
237-
},
238-
{
239-
"name": "stderr",
240-
"output_type": "stream",
241-
"text": [
242-
"`Trainer.fit` stopped: `max_epochs=5` reached.\n"
243-
]
244-
},
245-
{
246-
"name": "stdout",
247-
"output_type": "stream",
248-
"text": [
249-
"Epoch 4: 100%|██████████| 20/20 [00:06<00:00, 3.17it/s, loss=0.585, train_acc=0.812, train_loss=0.597, val_acc=0.729, val_loss=0.810]\n",
250-
"Training : .storage/checkpoints/iid_resnet50_cifar10_clients_5_faulty_[0]_bsize_512_epochs_5_lr_0.001/client_2.ckpt\n"
251-
]
252-
},
253-
{
254-
"name": "stderr",
255-
"output_type": "stream",
256-
"text": [
257-
"Using 16bit native Automatic Mixed Precision (AMP)\n",
258-
"GPU available: True (cuda), used: True\n",
259-
"TPU available: False, using: 0 TPU cores\n",
260-
"IPU available: False, using: 0 IPUs\n",
261-
"HPU available: False, using: 0 HPUs\n",
262-
"LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n"
263-
]
264-
},
265-
{
266-
"name": "stdout",
267-
"output_type": "stream",
268-
"text": [
269-
"Train mod batch = 272, and drop_last = False\n",
270-
"Epoch 4: 100%|██████████| 20/20 [00:06<00:00, 3.21it/s, loss=0.544, train_acc=0.817, train_loss=0.516, val_acc=0.745, val_loss=0.840]"
271-
]
272-
},
273-
{
274-
"name": "stderr",
275-
"output_type": "stream",
276-
"text": [
277-
"`Trainer.fit` stopped: `max_epochs=5` reached.\n"
278-
]
279-
},
280-
{
281-
"name": "stdout",
282-
"output_type": "stream",
283-
"text": [
284-
"Epoch 4: 100%|██████████| 20/20 [00:06<00:00, 3.21it/s, loss=0.544, train_acc=0.817, train_loss=0.516, val_acc=0.745, val_loss=0.840]\n",
285-
"Training : .storage/checkpoints/iid_resnet50_cifar10_clients_5_faulty_[0]_bsize_512_epochs_5_lr_0.001/client_3.ckpt\n"
286-
]
287-
},
288-
{
289-
"name": "stderr",
290-
"output_type": "stream",
291-
"text": [
292-
"Using 16bit native Automatic Mixed Precision (AMP)\n",
293-
"GPU available: True (cuda), used: True\n",
294-
"TPU available: False, using: 0 TPU cores\n",
295-
"IPU available: False, using: 0 IPUs\n",
296-
"HPU available: False, using: 0 HPUs\n",
297-
"LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n"
298-
]
299-
},
300-
{
301-
"name": "stdout",
302-
"output_type": "stream",
303-
"text": [
304-
"Train mod batch = 272, and drop_last = False\n",
305-
"Epoch 4: 100%|██████████| 20/20 [00:07<00:00, 2.85it/s, loss=0.54, train_acc=0.817, train_loss=0.539, val_acc=0.743, val_loss=0.842] "
306-
]
307-
},
308-
{
309-
"name": "stderr",
310-
"output_type": "stream",
311-
"text": [
312-
"`Trainer.fit` stopped: `max_epochs=5` reached.\n"
313-
]
314-
},
315-
{
316-
"name": "stdout",
317-
"output_type": "stream",
318-
"text": [
319-
"Epoch 4: 100%|██████████| 20/20 [00:07<00:00, 2.84it/s, loss=0.54, train_acc=0.817, train_loss=0.539, val_acc=0.743, val_loss=0.842]\n",
320-
"Training : .storage/checkpoints/iid_resnet50_cifar10_clients_5_faulty_[0]_bsize_512_epochs_5_lr_0.001/client_4.ckpt\n"
321-
]
322-
},
323-
{
324-
"name": "stderr",
325-
"output_type": "stream",
326-
"text": [
327-
"Using 16bit native Automatic Mixed Precision (AMP)\n",
328-
"GPU available: True (cuda), used: True\n",
329-
"TPU available: False, using: 0 TPU cores\n",
330-
"IPU available: False, using: 0 IPUs\n",
331-
"HPU available: False, using: 0 HPUs\n",
332-
"LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n"
333-
]
334-
},
335-
{
336-
"name": "stdout",
337-
"output_type": "stream",
338-
"text": [
339-
"Train mod batch = 272, and drop_last = False\n",
340-
"Epoch 4: 100%|██████████| 20/20 [00:05<00:00, 3.51it/s, loss=0.558, train_acc=0.803, train_loss=0.622, val_acc=0.744, val_loss=0.849]"
341-
]
342-
},
343-
{
344-
"name": "stderr",
345-
"output_type": "stream",
346-
"text": [
347-
"`Trainer.fit` stopped: `max_epochs=5` reached.\n"
348-
]
349-
},
350-
{
351-
"name": "stdout",
352-
"output_type": "stream",
353-
"text": [
354-
"Epoch 4: 100%|██████████| 20/20 [00:05<00:00, 3.50it/s, loss=0.558, train_acc=0.803, train_loss=0.622, val_acc=0.744, val_loss=0.849]\n",
355-
"Total clients: 5\n",
356-
"++Training is done: iid_resnet50_cifar10_clients_5_faulty_[0]_bsize_512_epochs_5_lr_0.001\n"
357-
]
358-
}
359-
],
135+
"outputs": [],
360136
"source": [
361137
"# FL training\n",
362138
"c2ms, exp2info = trainFLMain(args)\n",
@@ -366,18 +142,9 @@
366142
},
367143
{
368144
"cell_type": "code",
369-
"execution_count": 6,
145+
"execution_count": null,
370146
"metadata": {},
371-
"outputs": [
372-
{
373-
"name": "stdout",
374-
"output_type": "stream",
375-
"text": [
376-
"> Running FaultyClientLocalization ..\n",
377-
"Same prediction threshold 5\n"
378-
]
379-
}
380-
],
147+
"outputs": [],
381148
"source": [
382149
"# Fault localazation to find potetial faulty clients\n",
383150
"potential_faulty_clients, _, _ = runFaultyClientLocalization(\n",
@@ -386,27 +153,9 @@
386153
},
387154
{
388155
"cell_type": "code",
389-
"execution_count": 7,
156+
"execution_count": null,
390157
"metadata": {},
391-
"outputs": [
392-
{
393-
"name": "stdout",
394-
"output_type": "stream",
395-
"text": [
396-
"+++ Faulty Clients {0}\n",
397-
"+++ Faulty Clients {0}\n",
398-
"+++ Faulty Clients {0}\n",
399-
"+++ Faulty Clients {0}\n",
400-
"+++ Faulty Clients {0}\n",
401-
"+++ Faulty Clients {0}\n",
402-
"+++ Faulty Clients {0}\n",
403-
"+++ Faulty Clients {0}\n",
404-
"+++ Faulty Clients {0}\n",
405-
"+++ Faulty Clients {0}\n",
406-
"Fault Localization Accuracy: 100.0\n"
407-
]
408-
}
409-
],
158+
"outputs": [],
410159
"source": [
411160
"# Fault localization accuracy \n",
412161
"acc = evaluateFaultLocalization(\n",

fault-localization/utils/FLSimulation.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,10 @@
1010
from .dl_models import ImageClassifer, initialize_model
1111
from .fl_datasets import *
1212

13-
logging.basicConfig(filename='example.log', level=logging.ERROR)
14-
logger = logging.getLogger("pytorch_lightning")
13+
# logging.basicConfig(filename='example.log', level=logging.ERROR)
14+
# logger = logging.getLogger("pytorch_lightning")
15+
logging.getLogger("pytorch_lightning").setLevel(logging.WARNING)
16+
1517
seed_everything(786)
1618

1719

@@ -126,7 +128,7 @@ def getFLClientsDatasets():
126128
stringID2intID[k] = normal_id
127129

128130
data_config["single_input_shape"] = valid[0][0].unsqueeze(0).shape
129-
print(f'input shape, {data_config["single_input_shape"]}')
131+
# print(f'input shape, {data_config["single_input_shape"]}')
130132
# return
131133

132134
model_config["classes"] = num_classes

fault-localization/utils/faulty_client_localization/InferenceGuidedInputs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ class InferenceGuidedInputs:
1111
def __init__(self, clients2models, shape, randomGenerator, apply_transform, dname=None, k_gen_inputs=10, min_nclients_same_pred=5, time_delta=60):
1212
self.clients2models = clients2models
1313
self.min_nclients_same_pred = 3 #min_nclients_same_pred
14-
print(f"Same prediction threshold {self.min_nclients_same_pred}")
14+
# print(f"Same prediction threshold {self.min_nclients_same_pred}")
1515
self.same_seqs_set = set()
1616
self.k_gen_inputs = k_gen_inputs
1717
self.size = 1024

fault-localization/utils/fl_datasets.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -273,8 +273,8 @@ def __init__(self, train_dataset, val_dataset, batch_size, num_workers=4) -> Non
273273
print(
274274
f"Dropping last batch because of uneven data size: {len(self.train_dataset)} % {self.batch_size} == 1")
275275

276-
print(
277-
f"Train mod batch = {len(train_dataset) % batch_size}, and drop_last = {self.drop_last}")
276+
# print(
277+
# f"Train mod batch = {len(train_dataset) % batch_size}, and drop_last = {self.drop_last}")
278278

279279
def train_dataloader(self):
280280
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True, shuffle=True, drop_last=self.drop_last)

0 commit comments

Comments
 (0)