java -jar PROGRESS_DATADIRECT_JDBC_SF_ALL.jar

import sysfrom awsglue.transforms import *from awsglue.utils import getResolvedOptionsfrom pyspark.context import SparkContextfrom awsglue.context import GlueContextfrom awsglue.dynamicframe import DynamicFramefrom awsglue.job import Jobargs = getResolvedOptions(sys.argv, ['JOB_NAME'])sc = SparkContext()glueContext = GlueContext(sc)spark = glueContext.spark_sessionjob = Job(glueContext)##Read Data from Salesforce using DataDirect JDBC driver in to DataFramesource_df = spark.read.format("jdbc").option("url","jdbc:datadirect:sforce://login.salesforce.com;SecurityToken=<token>").option("dbtable", "SFORCE.OPPORTUNITY").option("driver", "com.ddtek.jdbc.sforce.SForceDriver").option("user", "user@mail.com").option("password", "pass123").load()job.init(args['JOB_NAME'], args)##Convert DataFrames to AWS Glue's DynamicFrames Objectdynamic_dframe = DynamicFrame.fromDF(source_df, glueContext, "dynamic_df")##Write Dynamic Frames to S3 in CSV format. You can write it to any rds/redshift, by using the connection that you have defined previously in Gluedatasink4 = glueContext.write_dynamic_frame.from_options(frame = dynamic_dframe, connection_type = "s3", connection_options = {"path": "s3://glueuserdata"}, format = "csv", transformation_ctx = "datasink4")job.commit()You can use similar steps with any of DataDirect JDBC suite of drivers available for Relational, Big Data, Saas and NoSQL Data sources. Feel free to try any of our drivers with AWS Glue for your ETL jobs for 15-days trial period.