Hi Team,
I have been using following strip of code to generate OAuth session to connect to Google Adwords API (ads-lib => 4.4.0, adwords-axis => 4.4.0).
val oAuth2Credential: Credential = new OfflineCredentials.Builder().forApi(OfflineCredentials.Api.ADWORDS).withClientSecrets(adId, adSecret).withRefreshToken(adToken).build().generateCredential()
val session = new AdWordsSession.Builder().withDeveloperToken(developerToken).withOAuth2Credential(oAuth2Credential).build()
session.setClientCustomerId(adCustomerId)
As the maven dependency of above group requires guava 20.0.jar, commons-configuration 1.10 jar, I did add them as extra class path in Spark job and everything was working fine until a week back. Now I am facing the following error. I did check the class path and I do see the above jars in the beginning(which should be considered if there is a conflicting second version available). Is there any other reason (other than dependency issues) why the following error message occurs?
Exception in thread "streaming-job-executor-0" java.lang.NoSuchMethodError: com.google.common.base.Preconditions.checkNotNull(Ljava/lang/Object;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/Object;
at com.google.api.ads.common.lib.auth.OfflineCredentials$Api.<init>(OfflineCredentials.java:89)
at com.google.api.ads.common.lib.auth.OfflineCredentials$Api.<clinit>(OfflineCredentials.java:81)
at arrow.google.GoogleUtils$.generateOAuthCredential(GoogleUtils.scala:251)
at arrow.google.GoogleUtils$.<init>(GoogleUtils.scala:62)
at arrow.google.GoogleUtils$.<clinit>(GoogleUtils.scala)
at arrow.google.SyndicateToGoogle$.getCustomAudienceId(SyndicateToGoogle.scala:201)
at arrow.google.SyndicateToGoogle$$anonfun$run$1.apply(SyndicateToGoogle.scala:86)
at arrow.google.SyndicateToGoogle$$anonfun$run$1.apply(SyndicateToGoogle.scala:82)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at arrow.google.SyndicateToGoogle$.run(SyndicateToGoogle.scala:82)
at arrow.GoogleSyndication$$anonfun$main$2$$anonfun$apply$1.apply(GoogleSyndication.scala:139)
at arrow.GoogleSyndication$$anonfun$main$2$$anonfun$apply$1.apply(GoogleSyndication.scala:134)
at scala.collection.Iterator$class.foreach(Iterator.scala:891)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
at arrow.GoogleSyndication$$anonfun$main$2.apply(GoogleSyndication.scala:134)
at arrow.GoogleSyndication$$anonfun$main$2.apply(GoogleSyndication.scala:121)
at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:628)
at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:628)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ForEachDStream.scala:51)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:51)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:51)
at org.apache.spark.streaming.dstream.DStream.createRDDWithLocalProperties(DStream.scala:416)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply$mcV$sp(ForEachDStream.scala:50)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:50)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:50)
at scala.util.Try$.apply(Try.scala:192)
at org.apache.spark.streaming.scheduler.Job.run(Job.scala:39)
at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply$mcV$sp(JobScheduler.scala:257)
at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:257)
at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:257)
at scala.util.DynamicVariable.withValue(DynamicVariable.scala:58)
at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.run(JobScheduler.scala:256)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Classpath: CLASSPATH -> /home/hadoop/guava-20.0.jar:/home/hadoop/commons-configuration-1.10.jar<CPS>{{PWD}}<CPS>{{PWD}}/__spark_conf__<CPS>{{PWD}}/__spark_libs__/*<CPS>$HADOOP_CONF_DIR<CPS>$HADOOP_COMMON_HOME/*<CPS>$HADOOP_COMMON_HOME/lib/*<CPS>$HADOOP_HDFS_HOME/*<CPS>$HADOOP_HDFS_HOME/lib/*<CPS>$HADOOP_MAPRED_HOME/*<CPS>$HADOOP_MAPRED_HOME/lib/*<CPS>$HADOOP_YARN_HOME/*<CPS>$HADOOP_YARN_HOME/lib/*<CPS>/usr/lib/hadoop-lzo/lib/*<CPS>/usr/share/aws/emr/emrfs/conf<CPS>/usr/share/aws/emr/emrfs/lib/*<CPS>/usr/share/aws/emr/emrfs/auxlib/*<CPS>/usr/share/aws/emr/lib/*<CPS>/usr/share/aws/emr/ddb/lib/emr-ddb-hadoop.jar<CPS>/usr/share/aws/emr/goodies/lib/emr-hadoop-goodies.jar<CPS>/usr/share/aws/emr/kinesis/lib/emr-kinesis-hadoop.jar<CPS>/usr/share/aws/emr/cloudwatch-sink/lib/*<CPS>/usr/share/aws/aws-java-sdk/*<CPS>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*<CPS>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*<CPS>/usr/lib/hadoop-lzo/lib/*<CPS>/usr/share/aws/emr/emrfs/conf<CPS>/usr/share/aws/emr/emrfs/lib/*<CPS>/usr/share/aws/emr/emrfs/auxlib/*<CPS>/usr/share/aws/emr/lib/*<CPS>/usr/share/aws/emr/ddb/lib/emr-ddb-hadoop.jar<CPS>/usr/share/aws/emr/goodies/lib/emr-hadoop-goodies.jar<CPS>/usr/share/aws/emr/kinesis/lib/emr-kinesis-hadoop.jar<CPS>/usr/share/aws/emr/cloudwatch-sink/lib/*<CPS>/usr/share/aws/aws-java-sdk/*<CPS>{{PWD}}/__spark_conf__/__hadoop_conf__
Thanks