versions:spark2.0.0alluxio1.2.0java1.7.0_45centos6.7
when create sparksession, use this config:builder.config("alluxio.user.file.writetype.default", "CACHE_THROUGH");
alluxio-env.sh
ALLUXIO_UNDERFS_ADDRESS="/disk2/alluxio-underFSStorage"
after a dataset created,
String path = "alluxio://ip:port/dataset/";
ds.write().mode(SaveMode.Overwrite).save(path + "ds1.rec");and then an exception come out:Caused by: java.io.FileNotFoundException: /disk2/alluxio-underFSStorage/dataset/ds1.rec/_temporary/0/_temporary/attempt_201611240909_0001_m_000000_3/part-r-00000-0605ea28-283a-48c4-8bae-7fc0a5c6d0e5.snappy.parquet.alluxio.0x7B25791C16070780.tmp (No such file or directory)
at java.io.FileOutputStream.open(Native Method) at java.io.FileOutputStream.<init>(FileOutputStream.java:221) at java.io.FileOutputStream.<init>(FileOutputStream.java:110) at alluxio.underfs.local.LocalUnderFileSystem.create(LocalUnderFileSystem.java:77) at alluxio.client.file.FileOutStream.<init>(FileOutStream.java:124) at alluxio.client.file.BaseFileSystem.createFile(BaseFileSystem.java:102) at alluxio.hadoop.AbstractFileSystem.create(AbstractFileSystem.java:153) at alluxio.hadoop.FileSystem.create(FileSystem.java:25) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:786) at org.apache.parquet.hadoop.ParquetFileWriter.<init>(ParquetFileWriter.java:176) at org.apache.parquet.hadoop.ParquetFileWriter.<init>(ParquetFileWriter.java:160) at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:289) at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:262) at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(ParquetFileFormat.scala:548) at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:138) at org.apache.spark.sql.execution.datasources.BaseWriterContainer.newOutputWriter(WriterContainer.scala:131) at org.apache.spark.sql.execution.datasources.DefaultWriterContainer.writeRows(WriterContainer.scala:247) at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143) at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand$$anonfun$run$1$$anonfun$apply$mcV$sp$1.apply(InsertIntoHadoopFsRelationCommand.scala:143) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70) at org.apache.spark.scheduler.Task.run(Task.scala:85) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274) ... 3 more
how could I solve it?
--
You received this message because you are subscribed to the Google Groups "Alluxio Users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to alluxio-users+unsubscribe@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
--