I want to apply the partial MPI to the code, and is my code correct?

subroutine mdmove( x, y, z, wkf )
    implicit none
    integer, parameter:: natm = 1000, nprocs = 4

    double precision, dimension(1:natm), intent(inout):: x, y, z
    double precision, dimension(1:3, 1:natm), intent(in):: wkf
    double precision:: inv_natm = 1.0 / dfloat( natm )

    x = x – exp( -wkf(1, : ) * inv_natms)
    y = y – exp( -wkf(2, : ) * inv_natms)
    z = z – exp( -wkf(3, : ) * inv_natms)

end subroutine mdmove

I want to apply the partial MPI parallelism to the above subroutine. And use MPI_Allgather for the message passing so that all process shares the array of coordinate(x, y, z) And below is my code, is it correct? Should I build a matrix to put the x, y, z together rather than use 3 times mpi_allgather?

subroutine mdmove( x, y, z, wkf )
    implicit none
    integer, parameter:: natm = 1000, nprocs = 4
    integer:: myrank, ierr

    double precision, dimension(1:natm), intent(inout):: x, y, z, xall, yall, zall
    double precision, dimension(1:3, 1:natm), intent(in):: wkf
    double precision:: inv_natm = 1.0 / dfloat( natm )

    call MPI_Init( ierr )
    call MPI_Comm_size(mpi_comm_world, nprocs, ierr)
    call MPI_Comm_rank(mpi_comm_world, myrank, ierr)

    do i = myrank + 1, ntam – 1, nprocs
        x(i) = x(i) – exp( -wkf(1, i) * inv_natms)
        y(i) = y(i) – exp( -wkf(2, i) * inv_natms)
        z(i) = z(i) – exp( -wkf(3, i) * inv_natms)
    end do

    call MPI_Allgather(x, ntam, MPI_INT, xall, ntam, MPI_INT, MPI_COMM_WORLD)
    call MPI_Allgather(y, ntam, MPI_INT, yall, ntam, MPI_INT, MPI_COMM_WORLD)
    call MPI_Allgather(z, ntam, MPI_INT, zall, ntam, MPI_INT, MPI_COMM_WORLD)

end subroutine mdmove