I am new to Druid.io and trying to setup a cluster of 3 nodes using aws ec2.
I am just getting drop message exception nothing else. where should i look for to resolve this issue as coordinator, overload, historical and middlemanager logs are showing any exception.
but i am getting below mentioned exception.
2016-07-05 23:14:25,445 [KafkaConsumer-1] ERROR c.m.tranquility.kafka.KafkaConsumer - Exception:
java.lang.RuntimeException: com.metamx.tranquility.tranquilizer.MessageDroppedException: Message dropped
at com.google.common.base.Throwables.propagate(Throwables.java:160) ~[com.google.guava.guava-16.0.1.jar:na]
at com.metamx.tranquility.kafka.writer.TranquilityEventWriter.maybeThrow(TranquilityEventWriter.java:138) ~[io.druid.tranquility-kafka-0.8.2.jar:0.8.2]
at com.metamx.tranquility.kafka.writer.TranquilityEventWriter.send(TranquilityEventWriter.java:105) ~[io.druid.tranquility-kafka-0.8.2.jar:0.8.2]
at com.metamx.tranquility.kafka.KafkaConsumer$2.run(KafkaConsumer.java:231) ~[io.druid.tranquility-kafka-0.8.2.jar:0.8.2]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) [na:1.8.0_66]
at java.util.concurrent.FutureTask.run(FutureTask.java:266) [na:1.8.0_66]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) [na:1.8.0_66]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) [na:1.8.0_66]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_66]
Caused by: com.metamx.tranquility.tranquilizer.MessageDroppedException: Message dropped
at com.twitter.finagle.NoStacktrace(Unknown Source) ~[na:na]
my kafka.json
{
"dataSources" : {
"splunk_data" : {
"spec" : {
"dataSchema" : {
"dataSource" : "splunk_data",
"parser" : {
"type" : "string",
"parseSpec" : {
"timestampSpec" : {
"column" : "nDateTime",
"format" : "YYYY-MM-DD HH:mm:SS"
},
"dimensionsSpec" : {
"dimensions" : [],
"dimensionExclusions" : []
},
"format" : "json"
}
},
"granularitySpec" : {
"type" : "uniform",
"segmentGranularity" : "day",
"queryGranularity" : "none"
},
"metricsSpec" : [
{
"type" : "count",
"name" : "count"
},
{
"name" : "nEventCount",
"type" : "longSum",
"fieldName" : "nEventCount"
},
{
"fieldName" : "nEventErrorCount",
"name" : "nEventErrorCount",
"type" : "longSum"
}
]
},
"ioConfig" : {
"type" : "realtime"
},
"tuningConfig" : {
"type" : "realtime",
"maxRowsInMemory" : "100000",
"intermediatePersistPeriod" : "PT60M",
"windowPeriod" : "PT60M"
}
},
"properties" : {
"task.partitions" : "1",
"task.replicants" : "1",
"topicPattern" : "DruidInput_1"
}
}
},
"properties" : {
"zookeeper.connect" : "xx.x.x.xx:2181",
"druid.discovery.curator.path" : "/druid/discovery",
"druid.selectors.indexing.serviceName" : "druid/overlord",
"commit.periodMillis" : "15000",
"consumer.numThreads" : "2",
"kafka.zookeeper.connect" : "xx.x.x.xx:2181",
, "reportDropsAsExceptions":"true"
}
}
and sending data
{"nDateTime":"2016-07-05 22:48:00","nEventCount":"1","nEventErrorCount":"0"}
Thanks
Tarun