Skip to content

Commit

Permalink
🐛 Fix a bug about batch_size.
Browse files Browse the repository at this point in the history
  • Loading branch information
futabato committed Nov 1, 2024
1 parent 6c9e8e8 commit 771de0f
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
4 changes: 2 additions & 2 deletions src/federatedlearning/client/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,12 @@ def train_val_test(
)
validloader: DataLoader = DataLoader(
DatasetSplit(dataset, idxs_val),
batch_size=int(len(idxs_val) / 10),
batch_size=self.cfg.train.local_batch_size,
shuffle=False,
)
testloader: DataLoader = DataLoader(
DatasetSplit(dataset, idxs_test),
batch_size=int(len(idxs_test) / 10),
batch_size=self.cfg.train.local_batch_size,
shuffle=False,
)
return trainloader, validloader, testloader
Expand Down
2 changes: 1 addition & 1 deletion src/federatedlearning/server/inferencing.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def inference(
criterion = nn.NLLLoss().to(device)

# Create DataLoader for the testing set
testloader = DataLoader(test_dataset, batch_size=128, shuffle=False)
testloader = DataLoader(test_dataset, batch_size=cfg.train.local_batch_size, shuffle=False)

# Loop through the dataset using DataLoader
images: torch.Tensor
Expand Down

0 comments on commit 771de0f

Please # to comment.