How to get rid of this float object error related to ndarray in my code?

I am writing a code to visualize a data in pygame. Passing the data in form of ndarray throws an error that is given below

if noCollision(random_point.getcoord(), near_point.getcoord(), rectObs):
                    new_point.setcoord(distance(new_point.getcoord(), near_point.getcoord()) + near_point.getcost())

                    for k in range(heuristic_random_point):
                        random_point_heuristic = Point([random.random()*XDIM, random.random()*YDIM],0,0)
                        ndist_heuristic = []
                        
                        for j in range(len(nodes)):
                            
                            temp_heuristic = self.distance(nodes[j].getcoord(), random_point_heuristic.getcoord())

                            ndist_heuristic.append(temp_heuristic)

                            min_index_heuristic = ndist_heuristic.index(min(ndist_heuristic))
                            min_val_heuristic = min(ndist_heuristic)
                            near_point_heuristic = nodes[min_index_heuristic]
                            new_point_heuristic = steer(random_point_heuristic.getcoord(), near_point_heuristic.getcoord(), min_val_heuristic, EPS)


                        if noCollision(random_point_heuristic.getcoord(), near_point_heuristic.getcoord(), rectObs):

                            new_point_heuristic.setcoord(distance(new_point_heuristic.getcoord(), near_point_heuristic.getcoord()) + near_point_heuristic.getcost())

                        if new_point.getcost() + distance(new_point.getcoord(), goal_point.getcoord()) > new_point_heuristic.getcost() + distance(new_point_heuristic.getcoord(), goal_point_heuristic.getcoord()):

                            new_point = new_point_heuristic
                            near_point = near_point_heuristic 

                            data = np.ndarray([new_point.getcoord()[0], near_point.getcoord()[0]], [new_point.getcoord()[1], near_point.getcoord()[1]])
                            pygame_plot.pcolor(data)

                            min_point = near_point

                        for k in range(len(nodes)):
                            if nodes[k].getcoord() == min_point.getcoord():
                                
                                new_point.setparent(k)
                                nodes.append(goal_point)

        D= []
        for i in range(len(nodes)):
            tmpdist = distance(nodes[i].getcoord(), goal_point.getcoord())
            D.append(tmpdist)
        min_goal_node_index = D.index(min(D))
        final_point = nodes[min_goal_node_index]
        end_point = final_point
        goal_point.setparent(min_goal_node_index)
        nodes.append(goal_point)

        total_cost = nodes[end_point.getparent()].getcost()

        while end_point.getparent() != 0:
            start = end_point.getparent()
                                    
            data = np.ndarray([end_point.getcoord()[0], nodes[start].getcoord()[0]],
                 [end_point.getcoord()[1], nodes[start].getcoord()[1]])
            
            pygame_plot.pcolor(data)

            end_point = nodes[start]

        start = end_point.getparent()
                                    
        data = np.ndarray([end_point.getcoord()[0], nodes[start].getcoord()[0]],
                 [end_point.getcoord()[1], nodes[start].getcoord()[1]])
        
        pygame_plot.pcolor(data)
        end_time = time.time()
        print(end_time - start_time,'seconds')

The error is

TypeError Traceback (most recent call last) in 251 if name == 'main': 252 --> 253 main()

in main()

data = np.ndarray([end_point.getcoord()[0],nodes[start].getcoord()[0]], [end_point.getcoord()[1], nodes[start].getcoord()[1]])

--> pygame_plot.pcolor(data)

TypeError: 'float' object cannot be interpreted as an integer