can't get skorch to run on GPU

My skorch Neural Net GridsearchCV code is running, it just appears to not be on the GPU so it is slow.

Any help appreciated.

It takes 13 seconds per epoch. I know that pytorch runs much faster on the GPU.

Code:

  cuda = torch.device('cuda')
  parameters = {'lr':[1e-4,1e-5], 
                   }
    class Model(nn.Module):
        def __init__(self):
            super(Model, self).__init__()
            self.lin1 = nn.Linear(50, 30)
            self.lin2 = nn.Linear(30, 1)

        def forward(self, x):
            x = F.tanh(self.lin1(x))
            return self.lin2(x).squeeze()

    class NLL(torch.nn.modules.loss._Loss):

        def forward(self, input, target):
            loss = F.nll_loss(input.unsqueeze(1),target)
            return loss   


    model = Model()
    model.to(cuda)

    cl1 = NeuralNetBinaryClassifier(
        module=model,
        criterion=NLL,
        optimizer = torch.optim.Adam,
        max_epochs=100,
        #lr=0.0004,
        train_split=None,
        device='cuda',
        )  


    clf = model_selection.GridSearchCV(cl1, parameters, 
        iid= False, 
        scoring ='neg_log_loss',
        cv=ps,
        verbose=1,
        n_jobs=1,
        return_train_score=True,
        )
    print("# Training...")

    clf.fit(X, Y)