Permutation_importance runs but doesn't print anything, no error messages

I have a neural network that I've been working on for a while. I wanted to add permutation importance to find the most important components, and it runs for hours with no errors or anything. When it stops running, it doesn't print anything.

Input data is structured like this:

SampleID| ID1 | ID2 | ID3 | 

sample1 | 123 | 123 | 123 | 

The first column has sample codes, and each column that has different measurements. The first row has an ID for each column. As a target for my neural network, I have a genotype for each sample marked with either 0, 1, or 2.

My code (It's a bit long, sorry about that)


def run():
    torch.multiprocessing.freeze_support()
    print('loop')

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)  # cpu or cuda (GPU)


    dataset = pd.read_excel("metabolitedata_withgender.xlsx")
    dataset = dataset.iloc[:, 1:9154]
    print(dataset.columns)
    subjectsdf = pd.read_excel("subjectdata.xlsx")
    n_samples, n_metabolites = dataset.shape
    print(n_samples)

    #genotypes of the target gene
    print(subjectsdf['genotype'])
    target = subjectsdf['genotype']
    print(target)
    print(target.shape)

    #genotypes of the target gene
    print(subjectsdf['genotype'])
    target = subjectsdf['genotype']
    print(target)
    print(target.shape)
   
    print("Moving to splitting")
    X_train,X_test,y_train,y_test = train_test_split(dataset.values.astype(np.float32),
                                                 target.values.reshape(-1,1).astype(np.float32),
                                                 test_size=.2,
                                                random_state=42)
    print("Moving to scaling")
    scaler = StandardScaler()
    scaler.fit(X_train)
    X_train_scaled = scaler.transform(X_train)
    X_test_scaled = scaler.transform(X_test)



    class MultiLayerPredictor(torch.nn.Module):
        def __init__(self, input_shape=9153, output_shape=1, hidden_dim=1024, **kwargs):
            super().__init__()
            self.fc1 = torch.nn.Linear(in_features=input_shape, out_features=hidden_dim)
            self.fc2 = torch.nn.Linear(in_features=hidden_dim, out_features=hidden_dim)
            self.fc3 = torch.nn.Linear(in_features=hidden_dim, out_features=output_shape)

        def forward(self, x):
            l1 = torch.relu(self.fc1(x))
            l2 = torch.relu(self.fc2(l1))
            return torch.sigmoid(self.fc3(l2)).reshape(-1)

    print("Moving to wrapping the neural net")
    net = NeuralNet(
        MultiLayerPredictor,
        criterion=nn.MSELoss,
        max_epochs=10,
        optimizer=optim.Adam,
        lr=0.1,
        iterator_train__shuffle=True
    )

    print("Moving to finding optimal hyperparameters")

    lr = (10**np.random.uniform(-5,-2.5,1000)).tolist()
    params = {
        'optimizer__lr': lr,
        'max_epochs':[300,400,500],
        'module__num_units': [14,20,28,36,42],
        'module__drop' : [0,.1,.2,.3,.4]
    }

    gs = RandomizedSearchCV(net,params,refit=True,cv=3,scoring='neg_mean_squared_error',n_iter=100)
    gs.fit(X_train_scaled,y_train);

    def report(results, n_top=3):
        for i in range(1, n_top + 1):
            candidates = np.flatnonzero(results['rank_test_score'] == i)
        for candidate in candidates:
            print("Model with rank: {0}".format(i))
            print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
                  results['mean_test_score'][candidate],
                  results['std_test_score'][candidate]))
            print("Parameters: {0}".format(results['params'][candidate]))
            print("")
    
    print(report(gs.cv_results_,10))

    epochs = [i for i in range(len(gs.best_estimator_.history))]
    train_loss = gs.best_estimator_.history[:,'train_loss']
    valid_loss = gs.best_estimator_.history[:,'valid_loss']

    plt.plot(epochs,train_loss,'g-');
    plt.plot(epochs,valid_loss,'r-');
    plt.title('Training Loss Curves');
    plt.xlabel('Epochs');
    plt.ylabel('Mean Squared Error');
    plt.legend(['Train','Validation']);
    plt.show()
    
    print("Moving to scorer creation")
    mkaccuracy_score = make_scorer(r2_score)
    print("Moving to permutation importance")
    r = permutation_importance(gs, X_train_scaled, y_train, n_repeats=30,random_state=0, scoring=mkaccuracy_score)

    for i in r.importances_mean.argsort()[::-1]:
        if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
            print(f"{dataset.column[i]:<8}"
                  f"{r.importances_mean[i]:.3f}"
                  f" +/- {r.importances_std[i]:.3f}")





if __name__ == '__main__':
    run()

What could be causing this? It's hard to figure this thing out without error messages or anything. The runtime for just the permutation_importance is pretty long. Should I be seeing something printed during the runtime too? Currently, there's nothing being printed during and after the run.