How can I access individual elements of RDD in a function called in rdd.map

from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.functions import expr
import logging

log = logging.getLogger('mytest')
_h = logging.StreamHandler()
_h.setFormatter(logging.Formatter("%(levelname)s  %(msg)s"))
log.addHandler(_h)
log.setLevel(logging.DEBUG)
log.info("module imported and logger initialized")

def dowork(x):
    log.info("Working... ")
    infile = "yyy_in" + x(1) + ".txt"
    outfile = "yyy_out" + x(2) + ".txt"
    log.info("Infile is: ", infile)
    log.info("Outfile is: ", outfile)

    rDF = spark.read.format("csv").option("header", "true").load(infile)
    #do more work here
    rDF.rdd.coalesce(1).saveAsTextFile(outfile)
    return "Success"

def Mytest(spark):
    log.info("Started... ")
    file1 = "file1.txt"
    fDF = spark.read.format("csv").option("header", "true").option("delimiter","\t").load(file1)
    fDF.registerTempTable("f")
    mDF = spark.sql("SELECT s, c, m  from f")
    mDF.show()
    r=mDF.rdd
    r.map(dowork(r))
    spark.stop()

if __name__ == '__main__':
    spark = SparkSession \
        .builder \
        .appName("my app") \
        .getOrCreate()
    Mytest(spark)

Sample file used in this example is file1.txt s c m 219 00 836 229 40 4933

You can run the below code Mytest.py by- spark-submit Mytest.py

I get an error- infile = "yyy_in" + x(1) + ".txt" TypeError: 'RDD' object is not callable