Hi,
I have been getting the following errors when I started installing Spark-0.7.2
[root@10 spark-0.7.2]# sbt/sbt package
[info] Loading project definition from /root/spark-0.7.2/project/project
[info] Loading project definition from /root/spark-0.7.2/project
[info] Set current project to root (in build file:/root/spark-0.7.2/)
[info] Compiling 202 Scala sources and 9 Java sources to /root/spark-0.7.2/core/target/scala-2.9.3/classes...
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapred/HadoopMapRedUtil.scala:4: not found: type JobContext
[error] def newJobContext(conf: JobConf, jobId: JobID): JobContext = new JobContext(conf, jobId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapred/HadoopMapRedUtil.scala:4: not found: type JobConf
[error] def newJobContext(conf: JobConf, jobId: JobID): JobContext = new JobContext(conf, jobId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapred/HadoopMapRedUtil.scala:4: not found: type JobID
[error] def newJobContext(conf: JobConf, jobId: JobID): JobContext = new JobContext(conf, jobId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapred/HadoopMapRedUtil.scala:4: not found: type JobContext
[error] def newJobContext(conf: JobConf, jobId: JobID): JobContext = new JobContext(conf, jobId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapred/HadoopMapRedUtil.scala:6: not found: type TaskAttemptContext
[error] def newTaskAttemptContext(conf: JobConf, attemptId: TaskAttemptID): TaskAttemptContext = new TaskAttemptContext(conf, attemptId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapred/HadoopMapRedUtil.scala:6: not found: type JobConf
[error] def newTaskAttemptContext(conf: JobConf, attemptId: TaskAttemptID): TaskAttemptContext = new TaskAttemptContext(conf, attemptId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapred/HadoopMapRedUtil.scala:6: not found: type TaskAttemptID
[error] def newTaskAttemptContext(conf: JobConf, attemptId: TaskAttemptID): TaskAttemptContext = new TaskAttemptContext(conf, attemptId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapred/HadoopMapRedUtil.scala:6: not found: type TaskAttemptContext
[error] def newTaskAttemptContext(conf: JobConf, attemptId: TaskAttemptID): TaskAttemptContext = new TaskAttemptContext(conf, attemptId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapreduce/HadoopMapReduceUtil.scala:3: object conf is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.conf.Configuration
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapreduce/HadoopMapReduceUtil.scala:6: not found: type JobContext
[error] def newJobContext(conf: Configuration, jobId: JobID): JobContext = new JobContext(conf, jobId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapreduce/HadoopMapReduceUtil.scala:6: not found: type JobID
[error] def newJobContext(conf: Configuration, jobId: JobID): JobContext = new JobContext(conf, jobId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapreduce/HadoopMapReduceUtil.scala:6: not found: type JobContext
[error] def newJobContext(conf: Configuration, jobId: JobID): JobContext = new JobContext(conf, jobId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapreduce/HadoopMapReduceUtil.scala:8: not found: type TaskAttemptContext
[error] def newTaskAttemptContext(conf: Configuration, attemptId: TaskAttemptID): TaskAttemptContext = new TaskAttemptContext(conf, attemptId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapreduce/HadoopMapReduceUtil.scala:8: not found: type TaskAttemptID
[error] def newTaskAttemptContext(conf: Configuration, attemptId: TaskAttemptID): TaskAttemptContext = new TaskAttemptContext(conf, attemptId)
[error] ^
[error] /root/spark-0.7.2/core/src/hadoop1/scala/org/apache/hadoop/mapreduce/HadoopMapReduceUtil.scala:8: not found: type TaskAttemptContext
[error] def newTaskAttemptContext(conf: Configuration, attemptId: TaskAttemptID): TaskAttemptContext = new TaskAttemptContext(conf, attemptId)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:3: object fs is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.fs.FileSystem
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:4: object fs is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.fs.Path
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:5: object util is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.util.ReflectionUtils
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:6: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.NullWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:7: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.Text
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:26: not found: type JobConf
[error] class HadoopWriter(@transient jobConf: JobConf) extends Logging with HadoopMapRedUtil with Serializable {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SerializableWritable.scala:6: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.Writable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:34: not found: type JobID
[error] private var jID: SerializableWritable[JobID] = null
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:35: not found: type TaskAttemptID
[error] private var taID: SerializableWritable[TaskAttemptID] = null
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:37: not found: type RecordWriter
[error] @transient private var writer: RecordWriter[AnyRef,AnyRef] = null
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:38: not found: type OutputFormat
[error] @transient private var format: OutputFormat[AnyRef,AnyRef] = null
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:39: not found: type OutputCommitter
[error] @transient private var committer: OutputCommitter = null
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:40: not found: type JobContext
[error] @transient private var jobContext: JobContext = null
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:41: not found: type TaskAttemptContext
[error] @transient private var taskContext: TaskAttemptContext = null
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:136: not found: type JobContext
[error] private def getJobContext(): JobContext = {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:129: not found: type OutputCommitter
[error] private def getOutputCommitter(): OutputCommitter = {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:63: not found: value FileOutputFormat
[error] val path = FileOutputFormat.getOutputPath(conf.value)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:121: not found: type OutputFormat
[error] private def getOutputFormat(): OutputFormat[AnyRef,AnyRef] = {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:143: not found: type TaskAttemptContext
[error] private def getTaskContext(): TaskAttemptContext = {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:124: not found: type OutputFormat
[error] .asInstanceOf[OutputFormat[AnyRef,AnyRef]]
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:155: not found: type JobID
[error] jID = new SerializableWritable[JobID](HadoopWriter.createJobID(now, jobid))
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:156: not found: type TaskAttemptID
[error] taID = new SerializableWritable[TaskAttemptID](
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:170: not found: type JobID
[error] def createJobID(time: Date, id: Int): JobID = {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:173: not found: type JobID
[error] return new JobID(jobtrackerID, id)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/HadoopWriter.scala:176: not found: type JobConf
[error] def createPathFromString(path: String, conf: JobConf): Path = {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:11: object conf is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.conf.Configuration
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:12: object fs is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.fs.Path
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:13: FileOutputCommitter is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.FileOutputCommitter
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:14: FileOutputFormat is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.FileOutputFormat
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:16: JobConf is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.JobConf
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:17: OutputFormat is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.OutputFormat
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:19: object lib is not a member of package org.apache.hadoop.mapreduce
[error] import org.apache.hadoop.mapreduce.lib.output.{FileOutputFormat => NewFileOutputFormat}
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:20: OutputFormat is not a member of org.apache.hadoop.mapreduce
[error] import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat, RecordWriter => NewRecordWriter, Job => NewAPIHadoopJob, HadoopMapReduceUtil, TaskAttemptID, TaskAttemptContext}
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:16: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.Writable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:31: Job is not a member of org.apache.hadoop.mapreduce
[error] import org.apache.hadoop.mapreduce.{Job => NewHadoopJob}
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:30: object lib is not a member of package org.apache.hadoop.mapreduce
[error] import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:29: InputFormat is not a member of org.apache.hadoop.mapreduce
[error] import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:28: TextInputFormat is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.TextInputFormat
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:27: JobConf is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.JobConf
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:26: FileInputFormat is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.FileInputFormat
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:25: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.Text
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:24: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.NullWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:23: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.ArrayWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:22: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.BytesWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:21: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.BooleanWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:20: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.DoubleWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:19: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.FloatWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:18: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.LongWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:17: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.IntWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:15: SequenceFileInputFormat is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.SequenceFileInputFormat
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:14: InputFormat is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.InputFormat
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:13: object conf is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.conf.Configuration
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:12: object fs is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.fs.Path
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SequenceFileRDDFunctions.scala:21: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.Writable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:793: could not find implicit value for evidence parameter of type ClassManifest[<error>]
[error] implicit def intWritableConverter() = simpleWritableConverter[Int, IntWritable](_.get)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:795: could not find implicit value for evidence parameter of type ClassManifest[<error>]
[error] implicit def longWritableConverter() = simpleWritableConverter[Long, LongWritable](_.get)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:797: could not find implicit value for evidence parameter of type ClassManifest[<error>]
[error] implicit def doubleWritableConverter() = simpleWritableConverter[Double, DoubleWritable](_.get)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:799: could not find implicit value for evidence parameter of type ClassManifest[<error>]
[error] implicit def floatWritableConverter() = simpleWritableConverter[Float, FloatWritable](_.get)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:801: could not find implicit value for evidence parameter of type ClassManifest[<error>]
[error] implicit def booleanWritableConverter() = simpleWritableConverter[Boolean, BooleanWritable](_.get)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:803: could not find implicit value for evidence parameter of type ClassManifest[<error>]
[error] implicit def bytesWritableConverter() = simpleWritableConverter[Array[Byte], BytesWritable](_.getBytes)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SparkContext.scala:805: could not find implicit value for evidence parameter of type ClassManifest[<error>]
[error] implicit def stringWritableConverter() = simpleWritableConverter[String, Text](_.toString)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:513: not found: type OutputFormat
[error] def saveAsHadoopFile[F <: OutputFormat[K, V]](path: String)(implicit fm: ClassManifest[F]) {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:585: not found: type OutputFormat
[error] outputFormatClass: Class[_ <: OutputFormat[_, _]],
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:586: not found: type JobConf
[error] conf: JobConf = new JobConf(self.context.hadoopConfiguration)) {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:521: not found: type NewOutputFormat
[error] def saveAsNewAPIHadoopFile[F <: NewOutputFormat[K, V]](path: String)(implicit fm: ClassManifest[F]) {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:533: not found: type NewOutputFormat
[error] outputFormatClass: Class[_ <: NewOutputFormat[_, _]],
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:535: not found: type NewAPIHadoopJob
[error] val job = new NewAPIHadoopJob(conf)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:548: not found: type TaskAttemptID
[error] val attemptId = new TaskAttemptID(jobtrackerID,
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:552: value getOutputCommitter is not a member of Any
[error] val committer = format.getOutputCommitter(hadoopContext)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:554: value getRecordWriter is not a member of Any
[error] val writer = format.getRecordWriter(hadoopContext).asInstanceOf[NewRecordWriter[K,V]]
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:554: not found: type NewRecordWriter
[error] val writer = format.getRecordWriter(hadoopContext).asInstanceOf[NewRecordWriter[K,V]]
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:568: not found: type TaskAttemptID
[error] val jobAttemptId = new TaskAttemptID(jobtrackerID, stageId, true, 0, 0)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:570: value getOutputCommitter is not a member of Any
[error] val jobCommitter = jobFormat.getOutputCommitter(jobTaskContext)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:586: not found: type JobConf
[error] conf: JobConf = new JobConf(self.context.hadoopConfiguration)) {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:592: not found: value FileOutputFormat
[error] FileOutputFormat.setOutputPath(conf, HadoopWriter.createPathFromString(path, conf))
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/PairRDDFunctions.scala:602: not found: type JobConf
[error] def saveAsHadoopDataset(conf: JobConf) {
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/RDD.scala:12: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.BytesWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/RDD.scala:13: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.NullWritable
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/RDD.scala:14: object io is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.io.Text
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/RDD.scala:15: TextOutputFormat is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.TextOutputFormat
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/RDD.scala:691: value saveAsHadoopFile is not a member of spark.RDD[T]
[error] possible cause: maybe a semicolon is missing before `value saveAsHadoopFile'?
[error] .saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/RDD.scala:700: value saveAsSequenceFile is not a member of spark.RDD[T]
[error] possible cause: maybe a semicolon is missing before `value saveAsSequenceFile'?
[error] .saveAsSequenceFile(path)
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/RDDCheckpointData.scala:3: object fs is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.fs.Path
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/RDDCheckpointData.scala:4: object conf is not a member of package org.apache.hadoop
[error] import org.apache.hadoop.conf.Configuration
[error] ^
[error] /root/spark-0.7.2/core/src/main/scala/spark/SequenceFileRDDFunctions.scala:15: JobConf is not a member of org.apache.hadoop.mapred
[error] import org.apache.hadoop.mapred.JobConf
[error] ^
[warn] /root/spark-0.7.2/core/src/main/scala/spark/rdd/NewHadoopRDD.scala:45: type <error> in type <error> is unchecked since it is eliminated by erasure
[warn] if (inputFormat.isInstanceOf[Configurable]) {
[warn] ^
[warn] /root/spark-0.7.2/core/src/main/scala/spark/rdd/NewHadoopRDD.scala:63: type <error> in type <error> is unchecked since it is eliminated by erasure
[warn] if (format.isInstanceOf[Configurable]) {
[warn] ^
[warn] two warnings found
[error] 190 errors found
[error] (core/compile:compile) Compilation failed
[error] Total time: 15 s, completed Jun 10, 2013 2:15:58 PM
--
You received this message because you are subscribed to the Google Groups "Spark Users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to spark-users...@googlegroups.com.
For more options, visit https://groups.google.com/groups/opt_out.