I'm using Spark 1.4.1 and Spark Cassandra Connector is 1.4.0-M3,
When I trying to execute the query using spark application I'm getting the below errors. Please help me
15/08/26 11:56:01 WARN TaskSetManager: Lost task 18.0 in stage 0.0 (TID 8, 10.15.0.177): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.types.DecimalType$
at org.apache.spark.sql.catalyst.expressions.BinaryArithmetic.resolved$lzycompute(arithmetic.scala:80)
at org.apache.spark.sql.catalyst.expressions.BinaryArithmetic.resolved(arithmetic.scala:77)
at org.apache.spark.sql.catalyst.expressions.BinaryArithmetic.dataType(arithmetic.scala:83)
at org.apache.spark.sql.catalyst.expressions.Add.numeric$lzycompute(arithmetic.scala:111)
at org.apache.spark.sql.catalyst.expressions.Add.numeric(arithmetic.scala:111)
at org.apache.spark.sql.catalyst.expressions.Add.eval(arithmetic.scala:125)
at org.apache.spark.sql.catalyst.expressions.Coalesce.eval(nullFunctions.scala:51)
at org.apache.spark.sql.catalyst.expressions.MutableLiteral.update(literals.scala:91)
at org.apache.spark.sql.catalyst.expressions.SumFunction.update(aggregates.scala:625)
at org.apache.spark.sql.execution.Aggregate$$anonfun$doExecute$1$$anonfun$7.apply(Aggregate.scala:165)
at org.apache.spark.sql.execution.Aggregate$$anonfun$doExecute$1$$anonfun$7.apply(Aggregate.scala:149)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:686)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:686)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:70)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
at org.apache.spark.scheduler.Task.run(Task.scala:70)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
15/08/26 11:55:57 WARN TaskSetManager: Lost task 2.0 in stage 0.0 (TID 48, 10.15.0.152): java.lang.NoClassDefFoundError: Could not initialize class org.apache.spark.sql.types.FloatType$
at org.apache.spark.sql.catalyst.expressions.Cast.org$apache$spark$sql$catalyst$expressions$Cast$$cast(Cast.scala:424)
at org.apache.spark.sql.catalyst.expressions.Cast.cast$lzycompute(Cast.scala:432)
at org.apache.spark.sql.catalyst.expressions.Cast.cast(Cast.scala:432)
at org.apache.spark.sql.catalyst.expressions.Cast.eval(Cast.scala:436)
at org.apache.spark.sql.catalyst.expressions.Coalesce.eval(nullFunctions.scala:51)
at org.apache.spark.sql.catalyst.expressions.Add.eval(arithmetic.scala:117)
at org.apache.spark.sql.catalyst.expressions.Coalesce.eval(nullFunctions.scala:51)
at org.apache.spark.sql.catalyst.expressions.MutableLiteral.update(literals.scala:91)
at org.apache.spark.sql.catalyst.expressions.SumFunction.update(aggregates.scala:625)
at org.apache.spark.sql.execution.Aggregate$$anonfun$doExecute$1$$anonfun$7.apply(Aggregate.scala:165)
at org.apache.spark.sql.execution.Aggregate$$anonfun$doExecute$1$$anonfun$7.apply(Aggregate.scala:149)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:686)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:686)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:70)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
at org.apache.spark.scheduler.Task.run(Task.scala:70)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
15/08/26 11:55:57 WARN TaskSetManager: Lost task 53.0 in stage 0.0 (TID 53, 10.15.0.152): java.lang.ExceptionInInitializerError
at org.apache.spark.sql.catalyst.expressions.Cast.org$apache$spark$sql$catalyst$expressions$Cast$$cast(Cast.scala:424)
at org.apache.spark.sql.catalyst.expressions.Cast.cast$lzycompute(Cast.scala:432)
at org.apache.spark.sql.catalyst.expressions.Cast.cast(Cast.scala:432)
at org.apache.spark.sql.catalyst.expressions.Cast.eval(Cast.scala:436)
at org.apache.spark.sql.catalyst.expressions.Coalesce.eval(nullFunctions.scala:51)
at org.apache.spark.sql.catalyst.expressions.Add.eval(arithmetic.scala:117)
at org.apache.spark.sql.catalyst.expressions.Coalesce.eval(nullFunctions.scala:51)
at org.apache.spark.sql.catalyst.expressions.MutableLiteral.update(literals.scala:91)
at org.apache.spark.sql.catalyst.expressions.SumFunction.update(aggregates.scala:625)
at org.apache.spark.sql.execution.Aggregate$$anonfun$doExecute$1$$anonfun$7.apply(Aggregate.scala:165)
at org.apache.spark.sql.execution.Aggregate$$anonfun$doExecute$1$$anonfun$7.apply(Aggregate.scala:149)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:686)
at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:686)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:70)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
at org.apache.spark.scheduler.Task.run(Task.scala:70)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.RuntimeException: error reading Scala signature of org.apache.spark.sql.types.FloatType: tail of empty list
at scala.reflect.internal.pickling.UnPickler.unpickle(UnPickler.scala:45)
at scala.reflect.runtime.JavaMirrors$JavaMirror.unpickleClass(JavaMirrors.scala:565)
at scala.reflect.runtime.SymbolLoaders$TopClassCompleter.complete(SymbolLoaders.scala:32)
at scala.reflect.internal.Symbols$Symbol.info(Symbols.scala:1231)
at scala.reflect.internal.BuildUtils$BuildImpl.select(BuildUtils.scala:20)
at scala.reflect.internal.BuildUtils$BuildImpl.selectType(BuildUtils.scala:11)
at scala.reflect.internal.BuildUtils$BuildImpl.selectType(BuildUtils.scala:8)
at org.apache.spark.sql.types.FloatType$$typecreator1$1.apply(FloatType.scala:39)
at scala.reflect.api.TypeTags$WeakTypeTagImpl.tpe$lzycompute(TypeTags.scala:231)
at scala.reflect.api.TypeTags$WeakTypeTagImpl.tpe(TypeTags.scala:231)
at org.apache.spark.sql.types.AtomicType.<init>(DataType.scala:96)
at org.apache.spark.sql.types.NumericType.<init>(DataType.scala:107)
at org.apache.spark.sql.types.FractionalType.<init>(DataType.scala:158)
at org.apache.spark.sql.types.FloatType.<init>(FloatType.scala:34)
at org.apache.spark.sql.types.FloatType$.<init>(FloatType.scala:53)
at org.apache.spark.sql.types.FloatType$.<clinit>(FloatType.scala)
... 26 more
Alex