from tensorflow.keras.layers import Dropout
model.add(Dropout(0.5))
#you can set the drop out rate here , in this example 50% of the neurons will be Dropout
# Define the objective function for Optuna
def hypertuning(trial):
# Suggest hyperparameters
learning_rate = trial.suggest_float("learning_rate", 0.0001, 0.1, log=True)
dropout_rate = trial.suggest_float("dropout_rate", 0.2, 0.7)
batch_size = trial.suggest_int("batch_size", 16, 128, step=16)
optimizer_name = trial.suggest_categorical("optimizer", ["SGD", "Adam", "RMSprop"])
# Load dataset
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
train_dataset = datasets.CIFAR10(root="./data", train=True, transform=transform, download=True)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# Initialize the model
model = AlexNet(num_classes=10, dropout_rate=dropout_rate)
criterion = nn.CrossEntropyLoss()
optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=learning_rate)
# Train the model (1 epoch for simplicity)
model.train()
for epoch in range(1):
for images, labels in train_loader:
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Optimize hyperparameters using bayseyian algoritmic for hyperparameters tuning
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=50)
# Print the best hyperparameters
print("Best hyperparameters:", study.best_params)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())