- Omitting hadoop-config path altogether when starting the indexer
- Point to a local hadoop install (not running) top-level directory
- Point to the local install's config directory (with nothing running)
- Specfically add hadoop-common-2.3.0.jar to the class path
java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream at io.druid.indexing.common.task.IndexTask.determinePartitions(IndexTask.java:268) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.indexing.common.task.IndexTask.run(IndexTask.java:192) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:235) [druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:214) [druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at java.util.concurrent.FutureTask.run(FutureTask.java:262) [?:1.7.0_51] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) [?:1.7.0_51] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) [?:1.7.0_51] at java.lang.Thread.run(Thread.java:744) [?:1.7.0_51] Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.fs.FSDataInputStream at java.net.URLClassLoader$1.run(URLClassLoader.java:366) ~[?:1.7.0_51] at java.net.URLClassLoader$1.run(URLClassLoader.java:355) ~[?:1.7.0_51] at java.security.AccessController.doPrivileged(Native Method) ~[?:1.7.0_51] at java.net.URLClassLoader.findClass(URLClassLoader.java:354) ~[?:1.7.0_51] at java.lang.ClassLoader.loadClass(ClassLoader.java:425) ~[?:1.7.0_51] at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308) ~[?:1.7.0_51] at java.lang.ClassLoader.loadClass(ClassLoader.java:358) ~[?:1.7.0_51] ... 8 more 2015-07-10T17:19:50,352 ERROR [main] io.druid.cli.CliPeon - Error when starting up. Failing. java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream at com.google.common.base.Throwables.propagate(Throwables.java:160) ~[guava-16.0.1.jar:?] at io.druid.indexing.worker.executor.ExecutorLifecycle.join(ExecutorLifecycle.java:162) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.cli.CliPeon.run(CliPeon.java:212) [druid-services-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.cli.Main.main(Main.java:91) [druid-services-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] Caused by: java.util.concurrent.ExecutionException: java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream at com.google.common.util.concurrent.AbstractFuture$Sync.getValue(AbstractFuture.java:299) ~[guava-16.0.1.jar:?] at com.google.common.util.concurrent.AbstractFuture$Sync.get(AbstractFuture.java:286) ~[guava-16.0.1.jar:?] at com.google.common.util.concurrent.AbstractFuture.get(AbstractFuture.java:116) ~[guava-16.0.1.jar:?] at io.druid.indexing.worker.executor.ExecutorLifecycle.join(ExecutorLifecycle.java:159) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] ... 2 more Caused by: java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream at io.druid.indexing.common.task.IndexTask.determinePartitions(IndexTask.java:268) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.indexing.common.task.IndexTask.run(IndexTask.java:192) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:235) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:214) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at java.util.concurrent.FutureTask.run(FutureTask.java:262) ~[?:1.7.0_51] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) ~[?:1.7.0_51] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) ~[?:1.7.0_51] at java.lang.Thread.run(Thread.java:744) ~[?:1.7.0_51] Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.fs.FSDataInputStream at java.net.URLClassLoader$1.run(URLClassLoader.java:366) ~[?:1.7.0_51] at java.net.URLClassLoader$1.run(URLClassLoader.java:355) ~[?:1.7.0_51] at java.security.AccessController.doPrivileged(Native Method) ~[?:1.7.0_51] at java.net.URLClassLoader.findClass(URLClassLoader.java:354) ~[?:1.7.0_51] at java.lang.ClassLoader.loadClass(ClassLoader.java:425) ~[?:1.7.0_51] at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308) ~[?:1.7.0_51] at java.lang.ClassLoader.loadClass(ClassLoader.java:358) ~[?:1.7.0_51] at io.druid.indexing.common.task.IndexTask.determinePartitions(IndexTask.java:268) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.indexing.common.task.IndexTask.run(IndexTask.java:192) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:235) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:214) ~[druid-indexing-service-0.8.0-rc2-SNAPSHOT.jar:0.8.0-rc2-SNAPSHOT] at java.util.concurrent.FutureTask.run(FutureTask.java:262) ~[?:1.7.0_51] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) ~[?:1.7.0_51] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) ~[?:1.7.0_51] at java.lang.Thread.run(Thread.java:744) ~[?:1.7.0_51]
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/e0c8629e-5c7a-45a1-b1ac-9a61ae88017a%40googlegroups.com.--
You received this message because you are subscribed to a topic in the Google Groups "Druid User" group.
To unsubscribe from this topic, visit https://groups.google.com/d/topic/druid-user/Yt5yF9EReUY/unsubscribe.
To unsubscribe from this group and all its topics, send an email to druid-user+...@googlegroups.com.
To post to this group, send email to druid...@googlegroups.com.
Fangjin,I have another question regarding loading batch data.The wiki says to expect a "Received SUCCESS status for task:" message on the indexer logs. I do not get one. Also, the indexer console shows the task as still running, but clicking on the logs show that the task is completed.I do see the "Announcing segment" log in the historical node logs, and the timeBoundary query works as well.
Can you help me here?thanksAlso, to note:I started the co-ordinator, and updated the number of replicants for _default to 1, stopped it, and restarted it, and then did the batch operation.Clicking on 'update' in the co-ordinator console did not update the mysql row. Instead, it created a new rule with a different version, and the co-ordinator continued to pick up the first version it found, because that shows up first in mysql query. So, I had to delete the older mysql rule row.
--
You received this message because you are subscribed to the Google Groups "Druid User" group.
To unsubscribe from this group and stop receiving emails from it, send an email to druid-user+...@googlegroups.com.
To post to this group, send email to druid...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/CAMLk_USKc1Dbwk27tiFEicfv26BWNtrf%2BqFcG8SqVhD35EQWng%40mail.gmail.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/CAGMm%3DjQL7ON7umh88wvnFxWYR8TWRce-uhxo6zedLCerym%2BxXQ%40mail.gmail.com.
Fangjin,I understand that the rule update in co-ordinator does not happen immediately. What I wanted to point out was that it seems that the update never happens.When I click on 'update' in the UI, the old row in mysql is not removed. Instead, a new row is added with the new rule that allows one replicant for the segment.
The co-ordinator always matches the first rule that it comes across when reading mysql database, so the new rule is never applied , even if the co-ordinator is restarted.I had to manually remove the old row in mysql using the mysql 'delete' command.Here is the mysql result when I clicked on 'update' in the UImysql> select * from druid_rules;+-----------------------------------+------------+--------------------------+-----------------------------------------------------------------+| id | dataSource | version | payload |+-----------------------------------+------------+--------------------------+-----------------------------------------------------------------+| _default_2015-07-10T22:05:44.110Z | _default | 2015-07-10T22:05:44.110Z | [{"tieredReplicants":{"_default_tier":2},"type":"loadForever"}] || _default_2015-07-10T22:06:58.879Z | _default | 2015-07-10T22:06:58.879Z | [{"tieredReplicants":{"_default_tier":1},"type":"loadForever"}] |+-----------------------------------+------------+--------------------------+-----------------------------------------------------------------+2 rows in set (0.00 sec)And then I had to do this in order for the new rule to take effect
2015-07-13T15:50:04,447 INFO [task-runner-0] io.druid.indexing.worker.executor.ExecutorLifecycle - Task completed with status: { "id" : "index_wikipedia_2015-07-13T15:49:53.261Z", "status" : "SUCCESS", "duration" : 648 }
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/c5d18323-b386-4d6a-8b23-b2e1c650862b%40googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/252465dd-2927-43fd-9dc3-f8ada2e6e195%40googlegroups.com.
2015-07-13T15:50:52,697 WARN [main-SendThread(ssubrama-ld1.linkedin.biz:2181)] org.apache.zookeeper.ClientCnxn - Session 0x14e79f3d5e1000f for server ssubrama-ld1.linkedin.biz/127.0.0.1:2181, unexpected error, closing socket connection and attempting reconnect java.io.IOException: Xid out of order. Got Xid 56 with err 0 expected Xid 55 for a packet with details: clientPath:null serverPath:null finished:false header:: 55,14 replyHeader:: 0,0,-4 request:: org.apache.zookeeper.MultiTransactionRecord@731bdbc5 response:: org.apache.zookeeper.MultiResponse@0 at org.apache.zookeeper.ClientCnxn$SendThread.readResponse(ClientCnxn.java:798) ~[zookeeper-3.4.6.jar:3.4.6-1569965] at org.apache.zookeeper.ClientCnxnSocketNIO.doIO(ClientCnxnSocketNIO.java:94) ~[zookeeper-3.4.6.jar:3.4.6-1569965] at org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:366) ~[zookeeper-3.4.6.jar:3.4.6-1569965] at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1081) [zookeeper-3.4.6.jar:3.4.6-1569965] 2015-07-13T15:50:52,798 INFO [main-EventThread] org.apache.curator.framework.state.ConnectionStateManager - State change: SUSPENDED 2015-07-13T15:50:53,643 INFO [main-SendThread(ssubrama-ld1.linkedin.biz:2181)] org.apache.zookeeper.ClientCnxn - Opening socket connection to server ssubrama-ld1.linkedin.biz/0:0:0:0:0:0:0:1:2181. Will not attempt to authenticate using SASL (unknown error) 2015-07-13T15:50:53,643 INFO [main-SendThread(ssubrama-ld1.linkedin.biz:2181)] org.apache.zookeeper.ClientCnxn - Socket connection established to ssubrama-ld1.linkedin.biz/0:0:0:0:0:0:0:1:2181, initiating session 2015-07-13T15:50:53,645 WARN [main-SendThread(ssubrama-ld1.linkedin.biz:2181)] org.apache.zookeeper.ClientCnxnSocket - Connected to an old server; r-o mode will be unavailable 2015-07-13T
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/CAMLk_USYKyMOdLEaJAKb8mkfCsmdLrtNkUO6-ta1VLONYo6PRw%40mail.gmail.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/ffd25cb2-b8c6-4718-8ac3-8724a88b6570%40googlegroups.com.
All versions of zk are pretty old... ;) In all seriousness though, I know there are a number of bug fixes in zk in the 3.4.x series, including previous issues with ephemeral nodes and other node issues. I've been hesitant to bump due to a nasty little problem with reverse DNS introduced in 3.4.x which impacts one of my clusters, and is unresolved even in 3.5-alpha (based on this problem consistently remaining).
I will bump zk tonight and verify that it resolved this issue for me. I do want you to be aware though, in case others have been holding back their Zookeeper version - right or wrong, the Kafka docs still recommend 3.3.4.
Thanks,
Paul
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/CAPP9sfZUJgtB1PPi41eDh5zi%2B--%2BkRQVi%3DTq9h%3DZ6-YB7t01cA%40mail.gmail.com.
To unsubscribe from this group and stop receiving emails from it, send an email to druid-user+unsubscribe@googlegroups.com.
To post to this group, send email to druid...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/ffd25cb2-b8c6-4718-8ac3-8724a88b6570%40googlegroups.com.
--
You received this message because you are subscribed to a topic in the Google Groups "Druid User" group.
To unsubscribe from this topic, visit https://groups.google.com/d/topic/druid-user/Yt5yF9EReUY/unsubscribe.
To unsubscribe from this group and all its topics, send an email to druid-user+unsubscribe@googlegroups.com.