druid.storage.type=s3
druid.storage.bucket=druid-data
druid.storage.baseKey=druid/segments
druid.indexer.logs.type=s3
druid.indexer.logs.s3Bucket=druid-data
druid.indexer.logs.s3Prefix=druid/indexing-logs
"ioConfig": {
"type": "hadoop",
"inputSpec": {
"type": "static",
"paths": "s3n://druid-data/input/005084yyuB2SAZmac2ok8qNccA6uHTawU=.csv.gz"
}
}
"tuningConfig": {
"type": "hadoop",
"jobProperties" : {
"fs.s3n.awsAccessKeyId" : "XXX",
"fs.s3n.awsSecretAccessKey" : "XXXX"
}
}But the job fails:2016-06-07T07:02:49,238 ERROR [pool-19-thread-1] io.druid.indexer.JobHelper - Exception in retry loop java.lang.NullPointerException at org.apache.hadoop.fs.s3native.NativeS3FileSystem.getFileStatus(NativeS3FileSystem.java:433) ~[hadoop-common-2.3.0.jar:?] at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1399) ~[hadoop-common-2.3.0.jar:?] at org.apache.hadoop.fs.s3native.NativeS3FileSystem.create(NativeS3FileSystem.java:341) ~[hadoop-common-2.3.0.jar:?] at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:907) ~[hadoop-common-2.3.0.jar:?] at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:868) ~[hadoop-common-2.3.0.jar:?] at io.druid.indexer.JobHelper$4.push(JobHelper.java:368) [druid-indexing-hadoop-0.9.0.jar:0.9.0] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.7.0_101] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) ~[?:1.7.0_101] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.7.0_101] at java.lang.reflect.Method.invoke(Method.java:606) ~[?:1.7.0_101] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:186) [hadoop-common-2.3.0.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102) [hadoop-common-2.3.0.jar:?] at com.sun.proxy.$Proxy191.push(Unknown Source) [?:?] at io.druid.indexer.JobHelper.serializeOutIndex(JobHelper.java:386) [druid-indexing-hadoop-0.9.0.jar:0.9.0] at io.druid.indexer.IndexGeneratorJob$IndexGeneratorReducer.reduce(IndexGeneratorJob.java:703) [druid-indexing-hadoop-0.9.0.jar:0.9.0] at io.druid.indexer.IndexGeneratorJob$IndexGeneratorReducer.reduce(IndexGeneratorJob.java:469) [druid-indexing-hadoop-0.9.0.jar:0.9.0] at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171) [hadoop-mapreduce-client-core-2.3.0.jar:?] at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:627) [hadoop-mapreduce-client-core-2.3.0.jar:?] at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:389) [hadoop-mapreduce-client-core-2.3.0.jar:?] at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319) [hadoop-mapreduce-client-common-2.3.0.jar:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) [?:1.7.0_101] at java.util.concurrent.FutureTask.run(FutureTask.java:262) [?:1.7.0_101] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) [?:1.7.0_101] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) [?:1.7.0_101] at java.lang.Thread.run(Thread.java:745) [?:1.7.0_101]It seems like it cannot find some file or folder in S3, but I don't understand which one.Can somebody hint to a correct direction?Thank you in advance,Nikita