val maxSequenceLength = 10
val embeddingDim = 100
val batching = Batching(param.batchSize, Array(maxSequenceLength, embeddingDim))
val trainingDataSet = DataSet.rdd(trainingRDD) -> batching
val valDataSet = DataSet.rdd(valRDD) -> batching
val hiddenSize = 40
val bpttTruncate = 4
val outputSize = 1000
val inputSize = 1000
val model_N = Sequential[Float]()
.add(Recurrent[Float](hiddenSize, bpttTruncate)
.add(RnnCell[Float](inputSize, hiddenSize))
.add(Tanh[Float]()))
.add(Linear[Float](hiddenSize, outputSize))
.add(Linear(2, classNum))
.add(LogSoftMax())
val optimizer = Optimizer(
model = model_N,
dataset = trainingDataSet,
criterion = new CrossEntropyCriterion[Float]()
).asInstanceOf[DistriOptimizer[Float]].disableCheckSingleton()
val numEpochs = 5
optimizer.
setState(state).
setValidation(Trigger.everyEpoch, valDataSet, Array(new Top1Accuracy[Float])).
setOptimMethod(new SGD[Float]()).
setEndWhen(Trigger.maxEpoch(numEpochs)).
optimize()
This is the error that Im getting.java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException: requirement failed: input should be a two dimension Tensor
I think Im lost as to how reshape my data correctly. I would expect Batching to spit out miniBatches of [maxSequenceLength x embedding] but Im probably wrong.Any support would be much appreciated.
java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException: requirement failed
at java.util.concurrent.FutureTask.report(FutureTask.java:122)
at java.util.concurrent.FutureTask.get(FutureTask.java:192)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$7.apply(DistriOptimizer.scala:176)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$7.apply(DistriOptimizer.scala:176)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:245)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:245)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:245)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4.apply(DistriOptimizer.scala:176)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4.apply(DistriOptimizer.scala:125)
at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:89)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
at org.apache.spark.scheduler.Task.run(Task.scala:85)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: requirement failed
at scala.Predef$.require(Predef.scala:207)
at com.intel.analytics.bigdl.tensor.DenseTensorMath$.addmv(DenseTensorMath.scala:603)
at com.intel.analytics.bigdl.tensor.DenseTensor.addmv(DenseTensor.scala:1204)
at com.intel.analytics.bigdl.nn.Linear.updateOutput(Linear.scala:70)
at com.intel.analytics.bigdl.nn.Linear.updateOutput(Linear.scala:29)
at com.intel.analytics.bigdl.nn.ParallelTable.updateOutput(ParallelTable.scala:36)
at com.intel.analytics.bigdl.nn.RnnCell.updateOutput(RNN.scala:66)
at com.intel.analytics.bigdl.nn.RnnCell.updateOutput(RNN.scala:28)
at com.intel.analytics.bigdl.nn.Recurrent.updateOutput(Recurrent.scala:47)
at com.intel.analytics.bigdl.nn.Recurrent.updateOutput(Recurrent.scala:26)
at com.intel.analytics.bigdl.nn.abstractnn.AbstractModule.forward(AbstractModule.scala:129)
at com.intel.analytics.bigdl.nn.Sequential.updateOutput(Sequential.scala:33)
at com.intel.analytics.bigdl.nn.abstractnn.AbstractModule.forward(AbstractModule.scala:129)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$5$$anonfun$apply$2.apply$mcI$sp(DistriOptimizer.scala:164)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$5$$anonfun$apply$2.apply(DistriOptimizer.scala:158)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$5$$anonfun$apply$2.apply(DistriOptimizer.scala:158)
at com.intel.analytics.bigdl.utils.ThreadPool$$anonfun$1$$anon$4.call(Engine.scala:119)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
java.lang.IllegalArgumentException: requirement failed: total batch size(1) should be at least two times of node number(1) * core number(1), please change your batch size
java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException: requirement failed
at java.util.concurrent.FutureTask.report(FutureTask.java:122)
at java.util.concurrent.FutureTask.get(FutureTask.java:192)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$7.apply(DistriOptimizer.scala:176)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$7.apply(DistriOptimizer.scala:176)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:245)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:245)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:245)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4.apply(DistriOptimizer.scala:176)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4.apply(DistriOptimizer.scala:125)
at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:89)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
at org.apache.spark.scheduler.Task.run(Task.scala:85)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
| val model_N = Sequential[Double]() | |
| .add(Recurrent[Double](hiddenSize, bpttTruncate) | |
| .add(RnnCell[Double](inputSize, hiddenSize)) | |
| .add(Tanh[Double]())) | |
| // .add(Linear(2, classNum)) | |
| .add(LogSoftMax()) |
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/33e7ad0b-25b8-41ca-8e90-e1d3a458d72c%40googlegroups.com.--
You received this message because you are subscribed to the Google Groups "BigDL User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
val nrows = 10
val ncols = 100
val batchSize = 2
val trainSet = DataSet.rdd(trainingRDD) -> ToSample(nrows, ncols) -> SampleToBatch(batchSize)
val valSet = DataSet.rdd(valRDD) -> ToSample(nrows, ncols) -> SampleToBatch(batchSize)
val hiddenSize = 40
val bpttTruncate = 4
val inputSize = 100
val classNum = 34
val model_N = Sequential[Double]()
.add(Recurrent[Double](hiddenSize, bpttTruncate)
.add(RnnCell[Double](inputSize, hiddenSize))
.add(Tanh[Double]()))
.add(Reshape(Array(400)))
.add(Linear(400, classNum))
.add(LogSoftMax())
java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException: requirement failed: input should be a two dimension Tensor
at java.util.concurrent.FutureTask.report(FutureTask.java:122)
at java.util.concurrent.FutureTask.get(FutureTask.java:192)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$7.apply(DistriOptimizer.scala:176)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$7.apply(DistriOptimizer.scala:176)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:245)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:245)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:245)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4.apply(DistriOptimizer.scala:176)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4.apply(DistriOptimizer.scala:125)
at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:89)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
at org.apache.spark.scheduler.Task.run(Task.scala:85)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: requirement failed: input should be a two dimension Tensor
at scala.Predef$.require(Predef.scala:219)
at com.intel.analytics.bigdl.nn.Recurrent.updateOutput(Recurrent.scala:35)
at com.intel.analytics.bigdl.nn.Recurrent.updateOutput(Recurrent.scala:26)
at com.intel.analytics.bigdl.nn.abstractnn.AbstractModule.forward(AbstractModule.scala:129)
at com.intel.analytics.bigdl.nn.Sequential.updateOutput(Sequential.scala:33)
at com.intel.analytics.bigdl.nn.abstractnn.AbstractModule.forward(AbstractModule.scala:129)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$5$$anonfun$apply$2.apply$mcI$sp(DistriOptimizer.scala:164)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$5$$anonfun$apply$2.apply(DistriOptimizer.scala:158)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$5$$anonfun$apply$2.apply(DistriOptimizer.scala:158)
at com.intel.analytics.bigdl.utils.ThreadPool$$anonfun$1$$anon$4.call(Engine.scala:119)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
... 3 more
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-gro...@googlegroups.com.
To post to this group, send email to bigdl-us...@googlegroups.com.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/fcb3cfa4-78b7-42a3-9987-f8c1380efafa%40googlegroups.com.
error: too many arguments for method copy: (other: com.intel.analytics.bigdl.dataset.Sample[Double])com.intel.analytics.bigdl.dataset.Sample[Double]
buffer.copy(featureBuffer, labelBuffer,
java.util.concurrent.ExecutionException: java.lang.IllegalArgumentException: requirement failed: invalid size
at java.util.concurrent.FutureTask.report(FutureTask.java:122)
at java.util.concurrent.FutureTask.get(FutureTask.java:192)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$7.apply(DistriOptimizer.scala:176)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$7.apply(DistriOptimizer.scala:176)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:245)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:245)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:245)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4.apply(DistriOptimizer.scala:176)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4.apply(DistriOptimizer.scala:125)
at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:89)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
at org.apache.spark.scheduler.Task.run(Task.scala:85)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: requirement failed: invalid size
at scala.Predef$.require(Predef.scala:219)
at com.intel.analytics.bigdl.tensor.DenseTensor.valueAt(DenseTensor.scala:513)
at com.intel.analytics.bigdl.nn.ClassNLLCriterion.updateOutput(ClassNLLCriterion.scala:44)
at com.intel.analytics.bigdl.nn.CrossEntropyCriterion.updateOutput(CrossEntropyCriterion.scala:40)
at com.intel.analytics.bigdl.nn.CrossEntropyCriterion.updateOutput(CrossEntropyCriterion.scala:32)
at com.intel.analytics.bigdl.nn.abstractnn.AbstractCriterion.forward(AbstractCriterion.scala:43)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$5$$anonfun$apply$2.apply$mcI$sp(DistriOptimizer.scala:165)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$5$$anonfun$apply$2.apply(DistriOptimizer.scala:158)
at com.intel.analytics.bigdl.optim.DistriOptimizer$$anonfun$4$$anonfun$5$$anonfun$apply$2.apply(DistriOptimizer.scala:158)
at com.intel.analytics.bigdl.utils.ThreadPool$$anonfun$1$$anon$4.call(Engine.scala:119)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
val nrows = 10val ncols = 100
val batchSize = 1
val trainSet = DataSet.rdd(trainingRDD) -> ToSample(nrows, ncols) -> SampleToBatch(batchSize)val valSet = DataSet.rdd(valRDD) -> ToSample(nrows, ncols) -> SampleToBatch(batchSize)
val hiddenSize = 40val bpttTruncate = 4val inputSize = 100val classNum = 34val model_N = Sequential[Double]().add(Recurrent[Double](hiddenSize, bpttTruncate).add(RnnCell[Double](inputSize, hiddenSize)).add(Tanh[Double]())).add(Reshape(Array(400))).add(Linear(400, classNum)).add(LogSoftMax())
val learningRate: Double = 0.1val momentum: Double = 0.0val weightDecay: Double = 0.0val dampening: Double = 0.0val state = {T("learningRate" -> learningRate,"momentum" -> momentum,"weightDecay" -> weightDecay,"dampening" -> dampening)
}val optimizer = Optimizer(model = model_N,
dataset = trainSet,criterion = new CrossEntropyCriterion[Double]()).asInstanceOf[DistriOptimizer[Double]].disableCheckSingleton()
val numEpochs = 5optimizer
.setValidation(Trigger.everyEpoch, valSet, Array(new Loss[Double])).setState(state).setEndWhen(Trigger.maxEpoch(numEpochs)).optimize()
val state = T("learningRate" -> 0.01, "learningRateDecay" -> 0.0002)
val optimizer = Optimizer(model = model_N,
dataset = trainSet,criterion = new ClassNLLCriterion[Double]()).asInstanceOf[DistriOptimizer[Double]].disableCheckSingleton()
val numEpochs = 5optimizer.setState(state).
setValidation(Trigger.everyEpoch, valSet, Array(new Top1Accuracy[Double])).setOptimMethod(new Adagrad[Double]()).setEndWhen(Trigger.maxEpoch(numEpochs)).optimize()
val model_N = Sequential[Double]()
.add(Recurrent[Double](hiddenSize, bpttTruncate)
.add(RnnCell[Double](inputSize, hiddenSize))
.add(Tanh[Double]()))
.add(Select(2, 10))
// .add(Reshape(Array(400)))
.add(Linear(40, classNum))
.add(LogSoftMax())
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/c4a6c9a2-101c-4552-bfc8-2c1dd1bec201%40googlegroups.com.To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/ede72c02-83e8-4bb0-b73e-40a0693696b1%40googlegroups.com.
val trainSet = DataSet.rdd(trainingRDD) -> ToSample(nrows, ncols) -> SampleToBatch(batchSize)
val path_savedModel = "model_test"
val loaded_model = Module.load[Double](path_savedModel)
Recalling trainSet from above, shouldn't I be able to predict it doing something like this:
loaded_model.forward(trainSet)
but I get this exception:
found : com.intel.analytics.bigdl.dataset.AbstractDataSet[com.intel.analytics.bigdl.dataset.MiniBatch[Double],_$5] where type _$5
required: com.intel.analytics.bigdl.nn.abstractnn.Activity
And Im not sure how to go from AbstractDataSet to an Activity.
Thanks for the support.
Hi Alessandro,
I just use script from https://gist.github.com/zhangxiaoli73/6802ff39e0ac5c3f0640041d1b34a6ca, and do some test.
Spark local, 1 node and 1 core, 378K sentences, batchSize=10, epoch=5, just take 36.58 minutes to finish train and test.
So, please describe your question more clearly and give your code if possible.
Go from AbstractDataSet to an Activity, you can use trainset.toDistributed.data(train=false) to get RDD[MiniBatch], example code :
val path_saveModel = "model_test"
val loaded_model = Module.load[Double](path_saveModel)
val tmpRDD = trainSet.toDistributed().data(train = false) //if not for train, please set train = false
val tmpOutput = tmpRDD.mapPartitions(dataIter => {
dataIter.map(batch => {
val input = batch.data
loaded_model.forward(input).toTensor[Double]
})
}).collect()
If you want to test your model, you can also refer to some test example code (such as: https://github.com/intel-analytics/BigDL/tree/master/dl/src/main/scala/com/intel/analytics/bigdl/models/lenet)
Cherry
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/a10ffe19-96f5-45aa-a610-47fa37f43d2a%40googlegroups.com.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/a10ffe19-96f5-45aa-a610-47fa37f43d2a%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
--
You received this message because you are subscribed to the Google Groups "BigDL User Group" group.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/1E524C3F99D09F48A87AADC7CDAE45A838560613%40shsmsx102.ccr.corp.intel.com.To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
val nodeNum = 1
val coreNum = 8
val sc = new SparkContext(Engine.init(nodeNum, coreNum, true).get.setMaster("local[*]").setAppName("test").set("spark.task.maxFailures", "1"))
val sqlContext = new SQLContext(sc)val path = "~/Desktop/PATH_TSV.tsv"val data = sqlContext.read.format("com.databricks.spark.csv").option("header", "true").option("delimiter", "\t").load(path)
val vectorizedRdd :RDD[(Array[Double], Double)] = labeled.select("labels","vectors").rdd.map(r => (r(1).asInstanceOf[WrappedArray[Double]].toArray,r(0).asInstanceOf[Double] + 1.0))
val Array(trainingRDD, valRDD) = vectorizedRdd.randomSplit(Array(trainingSplit, 1 - trainingSplit))
val nrows = 10
val ncols = 100
val batchSize = 64
val trainSet = DataSet.rdd(trainingRDD) -> ToSample(nrows, ncols) -> SampleToBatch(batchSize)
val valSet = DataSet.rdd(valRDD) -> ToSample(nrows, ncols) -> SampleToBatch(batchSize)
7-02-08 14:14:31 INFO DistriOptimizer$:241 - [Epoch 4 157248/244390][Iteration 13915][Wall Clock 2789.300859796s] Train 64 in 0.194042336seconds. Throughput is 329.8249305759749 records/second. Loss is 1.755370063192032.
7-02-08 14:37:17 INFO DistriOptimizer$:241 - [Epoch 6 89472/244390][Iteration 20494][Wall Clock 4155.918198255s] Train 64 in 0.21744152seconds. Throughput is 294.33201165996263 records/second. Loss is 1.9625739991949165
2017-02-08 11:18:43 INFO ThreadPool$:87 - Set mkl threads to 1 on thread 1
2017-02-08 11:18:43 WARN Engine$:344 - Invalid env setting. Please use bigdl.sh set the env. For spark application, please use Engine.sparkConf() to initialize your sparkConf
2017-02-08 11:18:43 INFO ThreadPool$:87 - Set mkl threads to 1 on thread 1
2017-02-08 11:18:43 WARN SparkConf:66 - The configuration key 'spark.akka.frameSize' has been deprecated as of Spark 1.6 and may be removed in the future. Please use the new key 'spark.rpc.message.maxSize' instead.
2017-02-08 11:18:43 INFO SparkContext:54 - Running Spark version 2.0.0
2017-02-08 11:18:43 WARN NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2017-02-08 11:18:44 WARN Utils:66 - Your hostname, XXXXXXXX resolves to a loopback address: 127.0.0.1; using 192.168.1.XXX instead (on interface en0)
2017-02-08 11:18:44 WARN Utils:66 - Set SPARK_LOCAL_IP if you need to bind to another address
2017-02-08 11:18:44 INFO SecurityManager:54 - Changing view acls to: user
2017-02-08 11:18:44 INFO SecurityManager:54 - Changing modify acls to: user
2017-02-08 11:18:44 INFO SecurityManager:54 - Changing view acls groups to:
2017-02-08 11:18:44 INFO SecurityManager:54 - Changing modify acls groups to:
2017-02-08 11:18:44 INFO SecurityManager:54 - SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(user); groups with view permissions: Set(); users with modify permissions: Set(user); groups with modify permissions: Set()
2017-02-08 11:18:44 INFO Utils:54 - Successfully started service 'sparkDriver' on port 49347.
2017-02-08 11:18:44 INFO SparkEnv:54 - Registering MapOutputTracker
2017-02-08 11:18:44 INFO SparkEnv:54 - Registering BlockManagerMaster
2017-02-08 11:18:44 INFO DiskBlockManager:54 - Created local directory at /private/var/folders/dy/13g7lrsj31d34ccjfm33hqcsqr4dwy/T/blockmgr-0ed2f0ac-5e71-4148-b68e-ae0949e041c1
2017-02-08 11:18:44 INFO MemoryStore:54 - MemoryStore started with capacity 8.4 GB
2017-02-08 11:18:44 INFO SparkEnv:54 - Registering OutputCommitCoordinator
2017-02-08 11:18:44 INFO log:186 - Logging initialized @18330ms
2017-02-08 11:18:44 INFO Server:327 - jetty-9.2.z-SNAPSHOT
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@495e8a3{/jobs,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@6a7aa675{/jobs/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@6eded11a{/jobs/job,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@602a3237{/jobs/job/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@4b511e61{/stages,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@74a74070{/stages/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@6c6919ff{/stages/stage,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@5de335cf{/stages/stage/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@1e029a04{/stages/pool,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@50e8ed74{/stages/pool/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@74eab077{/storage,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@3063be68{/storage/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@3a3bc0da{/storage/rdd,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@2d2f09a4{/storage/rdd/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@c677d7e{/environment,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@215a0264{/environment/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@45832b85{/executors,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@103478b8{/executors/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@270f7b4d{/executors/threadDump,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@56b704ea{/executors/threadDump/json,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@ab4d2ba{/static,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@39f68aec{/,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@65ff4b8c{/api,null,AVAILABLE}
2017-02-08 11:18:44 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@b81938d{/stages/stage/kill,null,AVAILABLE}
2017-02-08 11:18:44 INFO ServerConnector:266 - Started ServerConnector@3d8bd881{HTTP/1.1}{0.0.0.0:4040}
2017-02-08 11:18:44 INFO Server:379 - Started @18429ms
2017-02-08 11:18:44 INFO Utils:54 - Successfully started service 'SparkUI' on port 4040.
2017-02-08 11:18:44 INFO SparkUI:54 - Bound SparkUI to 0.0.0.0, and started at http://192.168.1.66:4040
2017-02-08 11:18:44 INFO Executor:54 - Starting executor ID driver on host localhost
2017-02-08 11:18:44 INFO Utils:54 - Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 49348.
Hi Alessandro,
To post to this group, send email to bigdl-u...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/a10ffe19-96f5-45aa-a610-47fa37f43d2a%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
--
You received this message because you are subscribed to the Google Groups "BigDL User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-gro...@googlegroups.com.
To post to this group, send email to bigdl-us...@googlegroups.com.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/94cfcf05-bd59-45ed-98bc-453056497b8a%40googlegroups.com.
scala -cp /dl/target/bigdl-0.1.0-SNAPSHOT-jar-with-dependencies-and-spark.jar -J-Xmx16g
2017-02-07 10:12:44 WARN Utils:66 - Your hostname, MACHINE_ID resolves to a loopback address: 127.0.0.1; using 10.36.XX.XXX instead (on interface en0)
2017-02-07 10:12:44 WARN Utils:66 - Set SPARK_LOCAL_IP if you need to bind to another address
2017-02-07 10:12:44 INFO SecurityManager:54 - Changing view acls to: MY_USER
2017-02-07 10:12:44 INFO SecurityManager:54 - Changing modify acls to: MY_USER
2017-02-07 10:12:44 INFO SecurityManager:54 - Changing view acls groups to:
2017-02-07 10:12:44 INFO SecurityManager:54 - Changing modify acls groups to:
2017-02-07 10:12:44 INFO SecurityManager:54 - SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(MY_USER); groups with view permissions: Set(); users with modify permissions: Set(MY_USER); groups with modify permissions: Set()
scala> vectorizedRdd.coalesce(1, true).count()
2017-02-09 10:36:30 INFO SparkContext:54 - Starting job: count at <console>:61
2017-02-09 11:01:44 INFO DAGScheduler:54 - Job 5 finished: count at <console>:61, took 1514.288633 s
res0: Long = 257291
...
--
You received this message because you are subscribed to the Google Groups "BigDL User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/9333f417-2d63-4ea4-a99f-329e9ac5df34%40googlegroups.com.
--
You received this message because you are subscribed to the Google Groups "BigDL User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/9333f417-2d63-4ea4-a99f-329e9ac5df34%40googlegroups.com.


scala -cp /dl/target/bigdl-0.1.0-SNAPSHOT-jar-with-dependencies-and-spark.jar -J-Xmx16g
mvn clean package -DskipTests -P mac -P spark_2.0
scala> val sc = new SparkContext(Engine.init(1, 1, true).get.setMaster("local[*]").setAppName("W2V").set("spark.task.maxFailures", "1"))
2017-02-10 10:45:41 INFO ThreadPool$:87 - Set mkl threads to 1 on thread 1
2017-02-10 10:45:41 WARN Engine$:344 - Invalid env setting. Please use bigdl.sh set the env. For spark application, please use Engine.sparkConf() to initialize your sparkConf
2017-02-10 10:45:41 INFO ThreadPool$:87 - Set mkl threads to 1 on thread 1
2017-02-10 10:45:41 WARN SparkConf:66 - The configuration key 'spark.akka.frameSize' has been deprecated as of Spark 1.6 and may be removed in the future. Please use the new key 'spark.rpc.message.maxSize' instead.
2017-02-10 10:45:41 INFO SparkContext:54 - Running Spark version 2.0.0
2017-02-10 10:45:41 WARN NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2017-02-10 10:45:42 WARN Utils:66 - Your hostname, MY_MACHINE_ID resolves to a loopback address: 127.0.0.1; using 10.36.XX.XXX instead (on interface en0)
2017-02-10 10:45:42 WARN Utils:66 - Set SPARK_LOCAL_IP if you need to bind to another address
2017-02-10 10:45:42 INFO SecurityManager:54 - Changing view acls to: MY_USER_ID
2017-02-10 10:45:42 INFO SecurityManager:54 - Changing modify acls to: MY_USER_ID
2017-02-10 10:45:42 INFO SecurityManager:54 - Changing view acls groups to:
2017-02-10 10:45:42 INFO SecurityManager:54 - Changing modify acls groups to:
2017-02-10 10:45:42 INFO SecurityManager:54 - SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(MY_USER_ID); groups with view permissions: Set(); users with modify permissions: Set(MY_USER_ID); groups with modify permissions: Set()
2017-02-10 10:45:42 INFO Utils:54 - Successfully started service 'sparkDriver' on port 64233.
2017-02-10 10:45:42 INFO SparkEnv:54 - Registering MapOutputTracker
2017-02-10 10:45:42 INFO SparkEnv:54 - Registering BlockManagerMaster
2017-02-10 10:45:42 INFO DiskBlockManager:54 - Created local directory at /private/var/folders/dy/13g7lrsj31d34ccjfm33hqcsqr4dwy/T/blockmgr-52ce438e-3b24-4403-834c-400184117cfb
2017-02-10 10:45:42 INFO MemoryStore:54 - MemoryStore started with capacity 8.4 GB
2017-02-10 10:45:42 INFO SparkEnv:54 - Registering OutputCommitCoordinator
2017-02-10 10:45:42 INFO log:186 - Logging initialized @20908ms
2017-02-10 10:45:42 INFO Server:327 - jetty-9.2.z-SNAPSHOT
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@6722db6e{/jobs,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@18f20260{/jobs/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@4ae33a11{/jobs/job,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@7a48e6e2{/jobs/job/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@b40bb6e{/stages,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@3a94964{/stages/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@5049d8b2{/stages/stage,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@6d0b5baf{/stages/stage/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@631e06ab{/stages/pool,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@2a3591c5{/stages/pool/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@34a75079{/storage,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@346a361{/storage/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@107ed6fc{/storage/rdd,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@1643d68f{/storage/rdd/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@186978a6{/environment,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@2e029d61{/environment/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@482d776b{/executors,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@4052274f{/executors/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@132ddbab{/executors/threadDump,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@297ea53a{/executors/threadDump/json,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@acb0951{/static,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@5bf22f18{/,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@267f474e{/api,null,AVAILABLE}
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@7a7471ce{/stages/stage/kill,null,AVAILABLE}
2017-02-10 10:45:42 INFO ServerConnector:266 - Started ServerConnector@3f2ef586{HTTP/1.1}{0.0.0.0:4040}
2017-02-10 10:45:42 INFO Server:379 - Started @21002ms
2017-02-10 10:45:42 INFO Utils:54 - Successfully started service 'SparkUI' on port 4040.
2017-02-10 10:45:42 INFO SparkUI:54 - Bound SparkUI to 0.0.0.0, and started at http://10.36.XX.XXX:4040
2017-02-10 10:45:42 INFO Executor:54 - Starting executor ID driver on host localhost
2017-02-10 10:45:42 INFO Utils:54 - Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 64234.
2017-02-10 10:45:42 INFO NettyBlockTransferService:54 - Server created on 10.36.XX.XXX:64234
2017-02-10 10:45:42 INFO BlockManagerMaster:54 - Registering BlockManager BlockManagerId(driver,10.36.XX.XXX, 64234)
2017-02-10 10:45:42 INFO BlockManagerMasterEndpoint:54 - Registering block manager 10.36.XX.XXX:64234 with 8.4 GB RAM, BlockManagerId(driver, 10.36.XX.XXX, 64234)
2017-02-10 10:45:42 INFO BlockManagerMaster:54 - Registered BlockManager BlockManagerId(driver, 10.36.XX.XXX, 64234)
2017-02-10 10:45:42 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@6b739528{/metrics/json,null,AVAILABLE}
sc: org.apache.spark.SparkContext = org.apache.spark.SparkContext@3be8821f
scala> 2017-02-10 10:39:45 WARN HttpChannel:384 - /
java.lang.NoSuchMethodError: javax.servlet.http.HttpServletRequest.isAsyncStarted()Z
at org.spark_project.jetty.servlets.gzip.GzipHandler.handle(GzipHandler.java:484)
at org.spark_project.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:215)
at org.spark_project.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:97)
at org.spark_project.jetty.server.Server.handle(Server.java:499)
at org.spark_project.jetty.server.HttpChannel.handle(HttpChannel.java:311)
at org.spark_project.jetty.server.HttpConnection.onFillable(HttpConnection.java:257)
at org.spark_project.jetty.io.AbstractConnection$2.run(AbstractConnection.java:544)
at org.spark_project.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:635)
at org.spark_project.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:555)
at java.lang.Thread.run(Thread.java:745)
2017-02-10 10:39:45 WARN HttpChannel:482 - Could not send response error 500: java.lang.NoSuchMethodError: javax.servlet.http.HttpServletRequest.isAsyncStarted()Z
2017-02-10 10:39:45 WARN HttpChannel:384 - /jobs/
java.lang.NoSuchMethodError: javax.servlet.http.HttpServletRequest.isAsyncStarted()Z
at org.spark_project.jetty.servlets.gzip.GzipHandler.handle(GzipHandler.java:484)
at org.spark_project.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:215)
at org.spark_project.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:97)
at org.spark_project.jetty.server.Server.handle(Server.java:499)
at org.spark_project.jetty.server.HttpChannel.handle(HttpChannel.java:311)
at org.spark_project.jetty.server.HttpConnection.onFillable(HttpConnection.java:257)
at org.spark_project.jetty.io.AbstractConnection$2.run(AbstractConnection.java:544)
at org.spark_project.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:635)
at org.spark_project.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:555)
at java.lang.Thread.run(Thread.java:745)
...
--
You received this message because you are subscribed to the Google Groups "BigDL User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/dbc218d7-6101-44c0-a81c-50fd6ad0d4ae%40googlegroups.com.
--
You received this message because you are subscribed to the Google Groups "BigDL User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/d8cf270f-5a62-49e5-bbb6-c18aa6d042cc%40googlegroups.com.
mvn clean package -DskipTests -P mac -P spark_2.0
source scripts/bigdl.sh
2017-02-14 12:40:38 INFO Server:379 - Started @23817ms
2017-02-14 12:40:38 INFO Utils:54 - Successfully started service 'SparkUI' on port 4041.
2017-02-14 12:40:38 INFO SparkUI:54 - Bound SparkUI to 0.0.0.0, and started at http://10.36.XX.XXX:4041
2017-02-14 12:40:38 INFO Executor:54 - Starting executor ID driver on host localhost
2017-02-14 12:40:38 INFO Utils:54 - Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 58878.
2017-02-14 12:40:38 INFO NettyBlockTransferService:54 - Server created on 10.36.XX.XXX:58878
2017-02-14 12:40:38 INFO BlockManagerMaster:54 - Registering BlockManager BlockManagerId(driver,10.36.XX.XXX, 58878)
2017-02-14 12:40:38 INFO BlockManagerMasterEndpoint:54 - Registering block manager 10.36.XX.XXX:58878 with 8.4 GB RAM, BlockManagerId(driver, 10.36.XX.XXX, 58878)
2017-02-14 12:40:38 INFO BlockManagerMaster:54 - Registered BlockManager BlockManagerId(driver, 10.36.XX.XXX, 58878)
2017-02-14 12:40:38 INFO ContextHandler:744 - Started o.s.j.s.ServletContextHandler@7bc2ae16{/metrics/json,null,AVAILABLE}
but as soon as I try to access the UI from the browser I get this:
scala> 2017-02-14 12:40:50 WARN HttpChannel:384 - /
java.lang.NoSuchMethodError: javax.servlet.http.HttpServletRequest.isAsyncStarted()Z
at org.spark_project.jetty.servlets.gzip.GzipHandler.handle(GzipHandler.java:484)
at org.spark_project.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:215)
at org.spark_project.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:97)
at org.spark_project.jetty.server.Server.handle(Server.java:499)
at org.spark_project.jetty.server.HttpChannel.handle(HttpChannel.java:311)
at org.spark_project.jetty.server.HttpConnection.onFillable(HttpConnection.java:257)
at org.spark_project.jetty.io.AbstractConnection$2.run(AbstractConnection.java:544)
at org.spark_project.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:635)
at org.spark_project.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:555)
at java.lang.Thread.run(Thread.java:745)
2017-02-14 12:40:50 WARN HttpChannel:482 - Could not send response error 500: java.lang.NoSuchMethodError: javax.servlet.http.HttpServletRequest.isAsyncStarted()Z
2017-02-14 12:40:50 WARN HttpChannel:384 - /jobs/
java.lang.NoSuchMethodError: javax.servlet.http.HttpServletRequest.isAsyncStarted()Z
at org.spark_project.jetty.servlets.gzip.GzipHandler.handle(GzipHandler.java:484)
at org.spark_project.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:215)
at org.spark_project.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:97)
at org.spark_project.jetty.server.Server.handle(Server.java:499)
at org.spark_project.jetty.server.HttpChannel.handle(HttpChannel.java:311)
at org.spark_project.jetty.server.HttpConnection.onFillable(HttpConnection.java:257)
at org.spark_project.jetty.io.AbstractConnection$2.run(AbstractConnection.java:544)
at org.spark_project.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:635)
at org.spark_project.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:555)
at java.lang.Thread.run(Thread.java:745)
2017-02-14 12:40:50 WARN QueuedThreadPool:610 -
java.lang.NoSuchMethodError: javax.servlet.http.HttpServletResponse.getStatus()I
at org.spark_project.jetty.server.handler.ErrorHandler.handle(ErrorHandler.java:112)
at org.spark_project.jetty.server.Response.sendError(Response.java:597)
at org.spark_project.jetty.server.HttpChannel.handleException(HttpChannel.java:487)
at org.spark_project.jetty.server.HttpConnection$HttpChannelOverHttp.handleException(HttpConnection.java:594)
at org.spark_project.jetty.server.HttpChannel.handle(HttpChannel.java:387)
at org.spark_project.jetty.server.HttpConnection.onFillable(HttpConnection.java:257)
at org.spark_project.jetty.io.AbstractConnection$2.run(AbstractConnection.java:544)
at org.spark_project.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:635)
at org.spark_project.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:555)
at java.lang.Thread.run(Thread.java:745)
2017-02-14 12:40:50 WARN QueuedThreadPool:617 - Unexpected thread death: org.spark_project.jetty.util.thread.QueuedThreadPool$3@639f749f in SparkUI{STARTED,8<=8<=200,i=2,q=0}
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-gro...@googlegroups.com.
To post to this group, send email to bigdl-us...@googlegroups.com.
Hi Alessandro,
I tested on Ubuntu, might try Mac later.
In the mean while, if you want to run on Spark in interactive way, it would be more formal to execute like this:
source scripts/bigdl.sh
$SPARK_HOME/bin/spark-shell --jars ~/bin/god/BigDL/dist/lib/bigdl-0.1.0-SNAPSHOT-jar-with-dependencies.jar
Thanks,
Zhichao
To unsubscribe from this group and stop receiving emails from it, send an email to bigdl-user-group+unsubscribe@googlegroups.com.
To post to this group, send email to bigdl-user-group@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/bigdl-user-group/7a691c83-656d-4c7c-b502-92b87026551d%40googlegroups.com.
val sc = new SparkContext(Engine.init(nodeNum, coreNum, true).get.setMaster("local[*]").setAppName("test").set("spark.task.maxFailures", "1&