2015-05-20 15:15:26 c.m.t.b.ClusteredBeam [ERROR] Failed to update cluster state: overlord/track_opening_v0_test175com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111) ~[stormjar.jar:na] at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863) ~[stormjar.jar:na] at com.twitter.util.Try$.apply(Try.scala:13) ~[stormjar.jar:na] at com.twitter.util.Future$.apply(Future.scala:90) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.k(Promise.scala:100) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) ~[stormjar.jar:na] at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) ~[stormjar.jar:na] at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) ~[stormjar.jar:na] at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) ~[stormjar.jar:na] at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) ~[stormjar.jar:na] at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) ~[stormjar.jar:na] at com.twitter.util.Promise.runq(Promise.scala:331) ~[stormjar.jar:na] at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) ~[stormjar.jar:na] at com.twitter.util.Promise.update(Promise.scala:615) ~[stormjar.jar:na] at com.twitter.util.Promise.setValue(Promise.scala:591) ~[stormjar.jar:na] at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76) ~[stormjar.jar:na] at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) ~[stormjar.jar:na] at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) ~[stormjar.jar:na] at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) ~[stormjar.jar:na] at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268) ~[stormjar.jar:na] at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) ~[stormjar.jar:na] at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) ~[stormjar.jar:na] at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) ~[stormjar.jar:na] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) [na:1.7.0_79] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) [na:1.7.0_79] at java.lang.Thread.run(Thread.java:745) [na:1.7.0_79]2015-05-20 15:15:26 c.m.t.b.ClusteredBeam [WARN] Emitting alert: [anomaly] Failed to create merged beam: overlord/track_opening_v0_test175{ }java.lang.IllegalStateException: Failed to save new beam for identifier[overlord/track_opening_v0_test175] timestamp[2015-05-20T15:00:00.000Z] at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:264) ~[stormjar.jar:na] at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:261) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:843) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:841) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.k(Promise.scala:100) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) ~[stormjar.jar:na] at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) [stormjar.jar:na] at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) [stormjar.jar:na] at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) [stormjar.jar:na] at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) [stormjar.jar:na] at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) [stormjar.jar:na] at com.twitter.util.Promise.runq(Promise.scala:331) [stormjar.jar:na] at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) [stormjar.jar:na] at com.twitter.util.ExecutorServiceFuturePool$$anon$2.run(FuturePool.scala:112) [stormjar.jar:na] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) [na:1.7.0_79] at java.util.concurrent.FutureTask.run(FutureTask.java:262) [na:1.7.0_79] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) [na:1.7.0_79] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) [na:1.7.0_79] at java.lang.Thread.run(Thread.java:745) [na:1.7.0_79]Caused by: com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111) ~[stormjar.jar:na] at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863) ~[stormjar.jar:na] at com.twitter.util.Try$.apply(Try.scala:13) ~[stormjar.jar:na] at com.twitter.util.Future$.apply(Future.scala:90) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824) ~[stormjar.jar:na] at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.k(Promise.scala:100) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) ~[stormjar.jar:na] at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) ~[stormjar.jar:na] at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) [stormjar.jar:na] at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) [stormjar.jar:na] at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) [stormjar.jar:na] at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) [stormjar.jar:na] at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) [stormjar.jar:na] at com.twitter.util.Promise.runq(Promise.scala:331) [stormjar.jar:na] at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) [stormjar.jar:na] at com.twitter.util.Promise.update(Promise.scala:615) [stormjar.jar:na] at com.twitter.util.Promise.setValue(Promise.scala:591) [stormjar.jar:na] at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76) ~[stormjar.jar:na] at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na] at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) ~[stormjar.jar:na] at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) ~[stormjar.jar:na] at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35) ~[stormjar.jar:na] at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na] at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) ~[stormjar.jar:na] at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268) ~[stormjar.jar:na] at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) ~[stormjar.jar:na] at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) ~[stormjar.jar:na] at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) ~[stormjar.jar:na] at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) ~[stormjar.jar:na] ... 3 common frames omitted
15/05/28 18:15:40 INFO hdfs.HdfsDataSegmentPusher: Creating descriptor file at[hdfs://myhdfs/indexedData/track_opening_v0_test175/20150528T170000.000Z_20150528T180000.000Z/2015-05-28T17_00_11.823Z/0/descriptor.json]15/05/28 18:15:41 INFO actions.RemoteTaskActionClient: Performing action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0]: SegmentInsertAction{segments=[DataSegment{size=38736423, shardSpec=LinearShardSpec{partitionNum=0}, metrics=[agg_count, agg_distinct_idfv], dimensions=[idSupport, idfv, openSource.affected, openSource.id.campaign, openSource.id.message, openSource.id.send, openSource.timestamp, openSource.type, timetoaction], version='2015-05-28T17:00:11.823Z', loadSpec={type=hdfs, path=hdfs://myhdfs/indexedData/track_opening_v0_test175/20150528T170000.000Z_20150528T180000.000Z/2015-05-28T17_00_11.823Z/0/index.zip}, interval=2015-05-28T17:00:00.000Z/2015-05-28T18:00:00.000Z, dataSource='track_opening_v0_test175', binaryVersion='9'}]}15/05/28 18:15:41 INFO actions.RemoteTaskActionClient: Submitting action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0] to overlord[http://overlords-64uv:8080/druid/indexer/v1/action]: SegmentInsertAction{segments=[DataSegment{size=38736423, shardSpec=LinearShardSpec{partitionNum=0}, metrics=[agg_count, agg_distinct_idfv], dimensions=[idSupport, idfv, openSource.affected, openSource.id.campaign, openSource.id.message, openSource.id.send, openSource.timestamp, openSource.type, timetoaction], version='2015-05-28T17:00:11.823Z', loadSpec={type=hdfs, path=hdfs://myhdfs/indexedData/track_opening_v0_test175/20150528T170000.000Z_20150528T180000.000Z/2015-05-28T17_00_11.823Z/0/index.zip}, interval=2015-05-28T17:00:00.000Z/2015-05-28T18:00:00.000Z, dataSource='track_opening_v0_test175', binaryVersion='9'}]}15/05/28 18:15:41 INFO pool.ChannelResourceFactory: Generating: http://overlords-64uv:808015/05/28 18:30:41 WARN actions.RemoteTaskActionClient: Exception submitting action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0]org.jboss.netty.handler.timeout.ReadTimeoutException at org.jboss.netty.handler.timeout.ReadTimeoutHandler.<clinit>(ReadTimeoutHandler.java:84) at com.metamx.http.client.NettyHttpClient.go(NettyHttpClient.java:173) at io.druid.indexing.common.actions.RemoteTaskActionClient.submit(RemoteTaskActionClient.java:97) at io.druid.indexing.common.task.AbstractTask.getTaskLocks(AbstractTask.java:167) at io.druid.indexing.common.task.RealtimeIndexTask.run(RealtimeIndexTask.java:164) at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:235) at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:214) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745)15/05/28 18:30:41 INFO actions.RemoteTaskActionClient: Will try again in [PT60S].15/05/28 18:31:41 INFO actions.RemoteTaskActionClient: Submitting action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0] to overlord[http://overlords-64uv:8080/druid/indexer/v1/action]: SegmentInsertAction{segments=[DataSegment{size=38736423, shardSpec=LinearShardSpec{partitionNum=0}, metrics=[agg_count, agg_distinct_idfv], dimensions=[idSupport, idfv, openSource.affected, openSource.id.campaign, openSource.id.message, openSource.id.send, openSource.timestamp, openSource.type, timetoaction], version='2015-05-28T17:00:11.823Z', loadSpec={type=hdfs, path=hdfs://myhdfs/indexedData/track_opening_v0_test175/20150528T170000.000Z_20150528T180000.000Z/2015-05-28T17_00_11.823Z/0/index.zip}, interval=2015-05-28T17:00:00.000Z/2015-05-28T18:00:00.000Z, dataSource='track_opening_v0_test175', binaryVersion='9'}]}15/05/28 18:31:41 INFO pool.ChannelResourceFactory: Generating: http://overlords-64uv:808015/05/28 18:46:41 WARN actions.RemoteTaskActionClient: Exception submitting action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0]org.jboss.netty.handler.timeout.ReadTimeoutException at org.jboss.netty.handler.timeout.ReadTimeoutHandler.<clinit>(ReadTimeoutHandler.java:84) at com.metamx.http.client.NettyHttpClient.go(NettyHttpClient.java:173) at io.druid.indexing.common.actions.RemoteTaskActionClient.submit(RemoteTaskActionClient.java:97) at io.druid.indexing.common.task.AbstractTask.getTaskLocks(AbstractTask.java:167) at io.druid.indexing.common.task.RealtimeIndexTask.run(RealtimeIndexTask.java:164) at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:235) at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:214) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745)15/05/28 18:46:41 INFO actions.RemoteTaskActionClient: Will try again in [PT120S].15/05/28 18:48:08 INFO inventory.CuratorInventoryManager: Created new InventoryCacheListener for /druid/segments/managed-middlemanagers-v1-lnrw:8100
...
...
...
...
druid.host=##HOSTNAME##:8080
druid.port=8080
druid.service=overlord
druid.indexer.autoscale.doAutoscale=true
druid.indexer.autoscale.strategy=gce
druid.indexer.autoscale.workerIdleTimeout=PT6M
druid.indexer.autoscale.maxScalingDuration=PT5M
druid.indexer.autoscale.provisionPeriod=PT1M
druid.indexer.autoscale.terminatePeriod=PT1M
druid.indexer.autoscale.pendingTaskTimeout=PT10M
druid.indexer.autoscale.workerVersion=1
druid.indexer.autoscale.workerPort=8081
druid.indexer.logs.type=hdfs
druid.indexer.logs.directory=gs://host/overlordLogs
druid.indexer.runner.type=remote
druid.indexer.runner.minWorkerVersion=1
druid.indexer.storage.type=metadata
druid.indexer.processing.sizeBytes=100000000
# Zookeeper
druid.zk.service.host=ZK_HOST1:2181,ZK_HOST2:2181,ZK_HOST3:2181
druid.storage.type=hdfs
druid.storage.storageDirectory=gs://INDEX_HOST/indexedData
druid.cache.type=local
druid.cache.sizeInBytes=10000000
druid.metadata.storage.type=mysql
druid.metadata.storage.connector.connectURI=jdbc:mysql://MYSQL_SERVER:MYSQL_PORT/druid?characterEncoding=UTF-8
druid.metadata.storage.connector.user=MYSQL_USER
druid.metadata.storage.connector.password=MYSQL_PASSWORD
druid.selectors.indexing.serviceName=overlord
druid.emitter=noop
druid.processing.buffer.sizeBytes=500000000
druid.announcer.type=batch
...
...
...
--
You received this message because you are subscribed to the Google Groups "Druid User" group.
To unsubscribe from this group and stop receiving emails from it, send an email to druid-user+...@googlegroups.com.
To post to this group, send email to druid...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/207edb48-de5d-45a8-81cf-0206865ef34a%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
Caused by: java.lang.IllegalStateException: Failed to save new beam for identifier[overlord/rb_flow_NS-453110232756458] timestamp[2015-11-10T13:00:00.000Z] at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:264) at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:261) at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:843) at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:841) at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) at com.twitter.util.Promise$Transformer.k(Promise.scala:100) at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) at com.twitter.util.Promise.runq(Promise.scala:331) at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) at com.twitter.util.ExecutorServiceFuturePool$$anon$2.run(FuturePool.scala:112) at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source) at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source) at java.util.concurrent.FutureTask.run(Unknown Source) at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source) at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source) at java.lang.Thread.run(Unknown Source)
Caused by: com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111)
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86) at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863) at com.twitter.util.Try$.apply(Try.scala:13) at com.twitter.util.Future$.apply(Future.scala:90) at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824) at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823) at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) at com.twitter.util.Promise$Transformer.k(Promise.scala:100) at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) at com.twitter.util.Promise.runq(Promise.scala:331) at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) at com.twitter.util.Promise.update(Promise.scala:615) at com.twitter.util.Promise.setValue(Promise.scala:591) at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76) at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459) at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536) at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86) at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35) at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) ... 3 more2015-11-10 13:42:47 SamzaContainer [INFO] Shutting down.2015-11-10 13:42:47 ClusteredBeam [ERROR] Failed to update cluster state: overlord/rb_flow_NS-453110232756458
com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111)
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86) at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863) at com.twitter.util.Try$.apply(Try.scala:13) at com.twitter.util.Future$.apply(Future.scala:90) at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824) at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823) at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) at com.twitter.util.Promise$Transformer.k(Promise.scala:100) at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) at com.twitter.util.Promise.runq(Promise.scala:331) at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) at com.twitter.util.Promise.update(Promise.scala:615) at com.twitter.util.Promise.setValue(Promise.scala:591) at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76) at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459) at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536) at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86) at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35) at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source) at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source) at java.lang.Thread.run(Unknown Source)2015-11-10 13:42:47 SamzaContainer [INFO] Shutting down consumer multiplexer.2015-11-10 13:42:47 BrokerProxy [INFO] Shutting down BrokerProxy for RBSCALEKAFKA02.redborder.cluster:90922015-11-10 13:42:47 DefaultFetchSimpleConsumer [INFO] Reconnect due to socket error: java.nio.channels.ClosedByInterruptException2015-11-10 13:42:47 BrokerProxy [WARN] Restarting consumer due to java.nio.channels.ClosedChannelException. Releasing ownership of all partitions, and restarting consumer. Turn on debugging to get a full stack trace.2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_state_post,1]2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_state_post,1] -> 0)2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_state_post,3]2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_state_post,1] -> 0, [rb_state_post,3] -> 0)2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_flow_post,1]2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780)2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_flow_post,3]2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)2015-11-10 13:42:47 BrokerProxy [INFO] Shutting down due to interrupt.2015-11-10 13:42:47 BrokerProxy [INFO] Shutting down BrokerProxy for RBSCALEKAFKA01.redborder.cluster:90922015-11-10 13:42:47 DefaultFetchSimpleConsumer [INFO] Reconnect due to socket error: java.nio.channels.ClosedByInterruptException2015-11-10 13:42:47 BrokerProxy [WARN] Restarting consumer due to java.nio.channels.ClosedChannelException. Releasing ownership of all partitions, and restarting consumer. Turn on debugging to get a full stack trace.2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_flow_post,0]2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_monitor,0]2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_monitor,0] -> 28138, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_state_post,0]2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_monitor,0] -> 28138, [rb_state_post,0] -> 0, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_state_post,2]2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_monitor,0] -> 28138, [rb_state_post,0] -> 0, [rb_state_post,2] -> 0, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_flow_post,2]2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_monitor,0] -> 28138, [rb_state_post,0] -> 0, [rb_state_post,2] -> 0, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980, [rb_flow_post,2] -> 111784)2015-11-10 13:42:47 BrokerProxy [INFO] Shutting down due to interrupt.2015-11-10 13:42:47 SamzaContainer [INFO] Shutting down task instance stream tasks.2015-11-10 13:42:47 SamzaContainer [INFO] Shutting down producer multiplexer.2015-11-10 13:42:47 DruidBeam [INFO] Closing Druid beam for datasource[rb_flow_NS-3656365540927863] interval[2015-11-10T13:00:00.000Z/2015-11-10T14:00:00.000Z] (tasks = index_realtime_rb_flow_NS-3656365540927863_2015-11-10T13:00:00.000Z_0_0)2015-11-10 13:42:47 FinagleRegistry [INFO] Closing client for service: druid:local:firehose:rb_flow_NS-3656365540927863-13-0000-00002015-11-10 13:42:47 DiscoResolver [INFO] No longer monitoring service[druid:local:firehose:rb_flow_NS-3656365540927863-13-0000-0000]2015-11-10 13:42:47 FinagleRegistry [INFO] Closing client for service: overlord2015-11-10 13:42:47 DiscoResolver [INFO] No longer monitoring service[overlord]2015-11-10 13:42:47 BeamPacketizer [WARN] ClusteredBeam(overlord/rb_flow_NS-453110232756458): Failed to send 2,000 messages.2015-11-10 13:42:47 SamzaContainerExceptionHandler [ERROR] Uncaught exception in thread (name=main). Exiting process now.com.metamx.common.ISE: Failed to send message[{dot11_status=PROBING, bytes=0, wireless_station=00:ba:20:00:f8:00, service_provider_uuid=SP-2045569389811222, pkts=0, type=mse10-location, namespace=NS:E2E_MT3, timestamp=1447159636, namespace_uuid=NS-453110232756458, client_mac=00:ca:00:05:36:7b, organization=E2E_MT3, organization_uuid=ORG-453110232756458, market_uuid=MR-2399161717387152, deployment=Building 18, building=Building 18, service_provider=E2E_2_MT, sensor_uuid=2365958529805087852, floor=8th Floor, sensor_name=e2e_mse2, floor_uuid=FL-3582721092232054, building_uuid=BL-1292744184876768, campus=Cisco Systems, subscriptionName=motus-e2e-2-rb-loc, deployment_uuid=DL-3411194937420418}]. at com.metamx.tranquility.samza.BeamProducer$$anonfun$1$$anon$1.fail(BeamProducer.scala:60) at com.metamx.tranquility.beam.BeamPacketizer$$anonfun$awaitPendingBatches$4$$anonfun$apply$3.apply(BeamPacketizer.scala:119) at com.metamx.tranquility.beam.BeamPacketizer$$anonfun$awaitPendingBatches$4$$anonfun$apply$3.apply(BeamPacketizer.scala:119) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47) at com.metamx.tranquility.beam.BeamPacketizer$$anonfun$awaitPendingBatches$4.apply(BeamPacketizer.scala:119) at com.metamx.tranquility.beam.BeamPacketizer$$anonfun$awaitPendingBatches$4.apply(BeamPacketizer.scala:115) at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:772) at scala.collection.mutable.ArraySeq.foreach(ArraySeq.scala:73) at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:771) at com.metamx.tranquility.beam.BeamPacketizer.awaitPendingBatches(BeamPacketizer.scala:115) at com.metamx.tranquility.beam.BeamPacketizer.flush(BeamPacketizer.scala:73) at com.metamx.tranquility.beam.BeamPacketizer.close(BeamPacketizer.scala:78) at com.metamx.tranquility.samza.BeamProducer$$anonfun$stop$1.apply(BeamProducer.scala:45) at com.metamx.tranquility.samza.BeamProducer$$anonfun$stop$1.apply(BeamProducer.scala:44) at scala.collection.mutable.HashMap$$anon$2$$anonfun$foreach$3.apply(HashMap.scala:107) at scala.collection.mutable.HashMap$$anon$2$$anonfun$foreach$3.apply(HashMap.scala:107) at scala.collection.mutable.HashTable$class.foreachEntry(HashTable.scala:226) at scala.collection.mutable.HashMap.foreachEntry(HashMap.scala:39) at scala.collection.mutable.HashMap$$anon$2.foreach(HashMap.scala:107) at com.metamx.tranquility.samza.BeamProducer.stop(BeamProducer.scala:44) at org.apache.samza.system.SystemProducers$$anonfun$stop$2.apply(SystemProducers.scala:47) at org.apache.samza.system.SystemProducers$$anonfun$stop$2.apply(SystemProducers.scala:47) at scala.collection.Iterator$class.foreach(Iterator.scala:727) at scala.collection.AbstractIterator.foreach(Iterator.scala:1157) at scala.collection.MapLike$DefaultValuesIterable.foreach(MapLike.scala:206) at org.apache.samza.system.SystemProducers.stop(SystemProducers.scala:47) at org.apache.samza.container.SamzaContainer.shutdownProducers(SamzaContainer.scala:644) at org.apache.samza.container.SamzaContainer.run(SamzaContainer.scala:565) at org.apache.samza.container.SamzaContainer$.safeMain(SamzaContainer.scala:93) at org.apache.samza.container.SamzaContainer$.main(SamzaContainer.scala:67) at org.apache.samza.container.SamzaContainer.main(SamzaContainer.scala)Caused by: java.lang.IllegalStateException: Failed to save new beam for identifier[overlord/rb_flow_NS-453110232756458] timestamp[2015-11-10T13:00:00.000Z] at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:264) at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:261) at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:843) at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:841) at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) at com.twitter.util.Promise$Transformer.k(Promise.scala:100) at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) at com.twitter.util.Promise.runq(Promise.scala:331) at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) at com.twitter.util.ExecutorServiceFuturePool$$anon$2.run(FuturePool.scala:112) at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source) at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source) at java.util.concurrent.FutureTask.run(Unknown Source) at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source) at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source) at java.lang.Thread.run(Unknown Source)
Caused by: com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111)
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86) at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863) at com.twitter.util.Try$.apply(Try.scala:13) at com.twitter.util.Future$.apply(Future.scala:90) at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824) at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823) at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) at com.twitter.util.Promise$Transformer.k(Promise.scala:100) at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) at com.twitter.util.Promise.runq(Promise.scala:331) at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) at com.twitter.util.Promise.update(Promise.scala:615) at com.twitter.util.Promise.setValue(Promise.scala:591) at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76) at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459) at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536) at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435) at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86) at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35) at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268) at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255) at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) ... 3 more2015-11-10 13:42:47 RunLoop [INFO] Shutting down, will wait up to 5000 ms2015-11-10 13:42:52 RunLoop [WARN] Did not shut down within 5000 ms, exiting
<span class="Apple
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/04bd6eb8-0fd3-41df-b863-81eb9ad2fcc9%40googlegroups.com.
En 10 de noviembre de 2015 en 23:41:03, Gian Merlino (gi...@imply.io) escrito:
You received this message because you are subscribed to a topic in the Google Groups "Druid User" group.
To unsubscribe from this topic, visit https://groups.google.com/d/topic/druid-user/0dz1so1abNM/unsubscribe.
To unsubscribe from this group and all its topics, send an email to druid-user+...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/CACZNdYDsEtOEnW0fUXOX-LFqroRugDOMSnkP--dqfJOH0fBKNg%40mail.gmail.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/etPan.5642f28a.74365a23.1e9%40MBP-de-Andres.redborder.lan.
En 12 de noviembre de 2015 en 2:14:41, Gian Merlino (gi...@imply.io) escrito:
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/CACZNdYDLc8N1AUfNL%3DEFswdMU59N5HBStM%2B%2BnC%3DhVX7qN4RbJQ%40mail.gmail.com.
...
Hi,We are running Druid 0.7.1.1 behind a Storm cluster with tranquility 4.2.We have been testing the system on our production traffic lately and everything runs fine most of the time. However we have recently found out that some segment were missing. As we checked the logs we found:- Tasks related to the missing segments have a status set to SUCCESS- No data is present on the middlemanagers though the folders corresponding to the tasks exists but remains empty.- Storm's logs shows this kind of message
2015-05-20 15:15:26 c.m.t.b.ClusteredBeam [ERROR] Failed to update cluster state: overlord/track_opening_v0_test175
com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request
java.lang.IllegalStateException: Failed to save new beam for identifier[overlord/track_opening_v0_test175] timestamp[2015-05-20T15:00:00.000Z]
at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:264) ~[stormjar.jar:na]at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:261) ~[stormjar.jar:na]at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:843) ~[stormjar.jar:na]at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:841) ~[stormjar.jar:na]at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) ~[stormjar.jar:na]at com.twitter.util.Promise$Transformer.k(Promise.scala:100) ~[stormjar.jar:na]at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) ~[stormjar.jar:na]at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) ~[stormjar.jar:na]at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) [stormjar.jar:na]at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) [stormjar.jar:na]at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) [stormjar.jar:na]at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) [stormjar.jar:na]at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) [stormjar.jar:na]at com.twitter.util.Promise.runq(Promise.scala:331) [stormjar.jar:na]at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) [stormjar.jar:na]at com.twitter.util.ExecutorServiceFuturePool$$anon$2.run(FuturePool.scala:112) [stormjar.jar:na]at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) [na:1.7.0_79]at java.util.concurrent.FutureTask.run(FutureTask.java:262) [na:1.7.0_79]at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) [na:1.7.0_79]at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) [na:1.7.0_79]at java.lang.Thread.run(Thread.java:745) [na:1.7.0_79]
Caused by: com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request
at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35) ~[stormjar.jar:na]at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na]at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) ~[stormjar.jar:na]at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268) ~[stormjar.jar:na]at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255) ~[stormjar.jar:na]at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88) ~[stormjar.jar:na]at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) ~[stormjar.jar:na]at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) ~[stormjar.jar:na]at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) ~[stormjar.jar:na]at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) ~[stormjar.jar:na]at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) ~[stormjar.jar:na]at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) ~[stormjar.jar:na]
... 3 common frames omitted
- The overlords' logs when returning the 400 Bad Request seems to read that the task already exists.
- The attempts generating bad requests are replayed every second till the end of the hour (the segment size is 1h)