%scala /***** Copyright © 2024 Stelodata, a StarQuest company; All rights reserved. DISCLAIMER The information in technical documents comes without any warranty or applicability for a specific purpose. The author(s) or distributor(s) will not accept responsibility for any damage incurred directly or indirectly through use of the information contained in these documents. The instructions may need to be modified to be appropriate for the hardware and software that has been installed and configured within a particular organization. The information in technical documents should be considered only as an example and may include information from various sources, including IBM, Microsoft, and other organizations. https://www.stelodata.com/privacy-policy https://www.stelodata.com/legal *****/ import org.apache.spark.sql.SparkSession import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.{Await, Future} import com.starquest.sqdr.spark.scala.{ApplyJdbc} val jdbcHostname = "DNS-NAME" // (or IP-ADDRESS) val jdbcPort = 50000 // Optional for ApplyJdbc.apply val jdbcUsername = "SQDR" val jdbcPassword = "mypassword" val unityValue = true // Optional - Default is false val continValue = false // Optional - Default is true // ApplyJdbc.run method - connect directly to the staging database // val jdbcDatabase = "SQDRP0" // val controlDbSchema = "SQDR" // val destinationId = null // Optional - Default is null // ApplyJdbc.run(spark, jdbcHostname, jdbcPort, jdbcDatabase, jdbcUsername, jdbcPassword, controlDbSchema,unity=unityValue, continuousOperation=continValue, incrementalThreads = 5, snapshotThreads = 5) // ApplyJdbc.apply - Connects to the T3 control database SQDRC val jdbcDatabase = "SQDRC" // Optional - Default is "SQDRC" val controlDbSchema = "SQDR" // Optional - Default is "SQDR" val destinationName = "MyDestination" // Required - Retrieve the destination Databricks name from the Data Replicator Manager ApplyJdbc.apply(spark, destinationName, jdbcUsername, jdbcPassword, jdbcHostname, jdbcPort, jdbcDatabase, unity=unityValue, continuousOperation = continValue, incrementalThreads = 10, snapshotThreads = 10) /***** This is the simplest invocation (using defaults) ApplyJdbc.apply(spark, destinationName, jdbcUsername, jdbcPassword, jdbcHostname) *****/