ValueError: scoring value looks like it is a metric function rather than a scorer. A scorer should require an estimator as its first parameter

I have a neural network and I'm using permutation importance to find the important components. When I run it, I get error ValueError: scoring value looks like it is a metric function rather than a scorer. A scorer should require an estimator as its first parameter.

Here's a minimal feasible code for this.

def run():
    torch.multiprocessing.freeze_support()
    print('loop')
 
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)  # cpu or cuda (GPU)
 
 
    metabolites = pd.read_excel("TPGx1_3_metabolitedata.xlsx")
    subject_metadata = pd.read_excel("TPGx1_3_subjectdata.xlsx")
    metabolitesdf = pd.DataFrame(data=metabolites)
    dataset = metabolitesdf.iloc[:, 1:9153] 
    subjectsdf = pd.DataFrame(data=subject_metadata)
 
    n_samples, n_metabolites = metabolitesdf.shape
    print(n_samples)
 
    target = subjectsdf['target']
 
   
    print("Moving to splitting")
    X_train,X_test,y_train,y_test = train_test_split(dataset.values.astype(np.float32),
                                                 target.values.reshape(-1,1).astype(np.float32),
                                                 test_size=.2,
                                                random_state=42)
    print("Moving to scaling")
    scaler = StandardScaler()
    scaler.fit(X_train)
    X_train_scaled = scaler.transform(X_train)
    X_test_scaled = scaler.transform(X_test)
 
 
 
    class MultiLayerPredictor(torch.nn.Module):
        def __init__(self, input_shape=9152, output_shape=1, hidden_dim=1024, **kwargs):
            super().__init__()
            self.fc1 = torch.nn.Linear(in_features=input_shape, out_features=hidden_dim)
            self.fc2 = torch.nn.Linear(in_features=hidden_dim, out_features=hidden_dim)
            self.fc3 = torch.nn.Linear(in_features=hidden_dim, out_features=output_shape)
 
        def forward(self, x):
            l1 = torch.relu(self.fc1(x))
            l2 = torch.relu(self.fc2(l1))
            return torch.sigmoid(self.fc3(l2)).reshape(-1)
 
        def score(self, X_test, y_test,):
            y_pred_acc = accuracy_score(y_test, self.predict(X_test))
            return y_pred_acc
        

 
    print("Moving to wrapping the neural net")
    net = NeuralNet(
        MultiLayerPredictor,
        criterion=nn.MSELoss,
        max_epochs=10,
        optimizer=optim.Adam,
        lr=0.1,
        iterator_train__shuffle=True
    )
 
    lr = (10**np.random.uniform(-5,-2.5,1000)).tolist()
    params = {
        'optimizer__lr': lr,
        'max_epochs':[300,400,500],
        'module__num_units': [14,20,28,36,42],
        'module__drop' : [0,.1,.2,.3,.4]
    }
 
        
    r = permutation_importance(net, X_test, y_test, n_repeats=30,random_state=0, scoring=accuracy_score)
 
    for i in r.importances_mean.argsort()[::-1]:
        if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
            print(f"{metabolites.feature_names[i]:<8}"
                  f"{r.importances_mean[i]:.3f}"
                  f" +/- {r.importances_std[i]:.3f}")
 
 
 
 
if __name__ == '__main__':
    run()

And here's the full stacktrace.

Traceback (most recent call last):
  File "///home/h//Desktop/neuralnet/neuralnet_wrapped.py", line 151, in <module>
    run()
  File "///home/h//Desktop/neuralnet/neuralnet_wrapped.py", line 131, in run
    r = permutation_importance(net, X_test, y_test, n_repeats=30,random_state=0, scoring=accuracy_score)
  File "C:\Users\hamalkre\AppData\Roaming\Python\Python38\site-packages\sklearn\utils\validation.py", line 73, in inner_f
    return f(**kwargs)
  File "C:\Users\\AppData\Roaming\Python\Python38\site-packages\sklearn\inspection\_permutation_importance.py", line 132, in permutation_importance
    scorer = check_scoring(estimator, scoring=scoring)
  File "C:\Users\\AppData\Roaming\Python\Python38\site-packages\sklearn\utils\validation.py", line 73, in inner_f
    return f(**kwargs)
  File "C:\Users\\AppData\Roaming\Python\Python38\site-packages\sklearn\metrics\_scorer.py", line 413, in check_scoring
    raise ValueError('scoring value %r looks like it is a metric '
ValueError: scoring value <function accuracy_score at 0x0000024CE496C8B0> looks like it is a metric function rather than a scorer. A scorer should require an estimator as its first parameter. Please use `make_scorer` to convert a metric to a scorer.

I took few lines out as they have nothing to do with this issue, but as you can see the issue is the permutation scoring but I can't figure out what I need to do.