Bad Request from overlord when creating tranquility beams triggers missing segments

2,043 views
Skip to first unread message

Pilou

unread,
May 20, 2015, 12:01:09 PM5/20/15
to druid...@googlegroups.com
Hi,

We are running Druid 0.7.1.1 behind a Storm cluster with tranquility 4.2.
We have been testing the system on our production traffic lately and everything runs fine most of the time. However we have recently found out that some segment were missing. As we checked the logs we found:
- Tasks related to the missing segments have a status set to SUCCESS
- No data is present on the middlemanagers though the folders corresponding to the tasks exists but remains empty.
- Storm's logs shows this kind of message

2015-05-20 15:15:26 c.m.t.b.ClusteredBeam [ERROR] Failed to update cluster state: overlord/track_opening_v0_test175
com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111) ~[stormjar.jar:na]
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863) ~[stormjar.jar:na]
at com.twitter.util.Try$.apply(Try.scala:13) ~[stormjar.jar:na]
at com.twitter.util.Future$.apply(Future.scala:90) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.k(Promise.scala:100) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) ~[stormjar.jar:na]
at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) ~[stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) ~[stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) ~[stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) ~[stormjar.jar:na]
at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) ~[stormjar.jar:na]
at com.twitter.util.Promise.runq(Promise.scala:331) ~[stormjar.jar:na]
at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) ~[stormjar.jar:na]
at com.twitter.util.Promise.update(Promise.scala:615) ~[stormjar.jar:na]
at com.twitter.util.Promise.setValue(Promise.scala:591) ~[stormjar.jar:na]
at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76) ~[stormjar.jar:na]
at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) ~[stormjar.jar:na]
at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) ~[stormjar.jar:na]
at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) ~[stormjar.jar:na]
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) ~[stormjar.jar:na]
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) ~[stormjar.jar:na]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) [na:1.7.0_79]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) [na:1.7.0_79]
at java.lang.Thread.run(Thread.java:745) [na:1.7.0_79]
2015-05-20 15:15:26 c.m.t.b.ClusteredBeam [WARN] Emitting alert: [anomaly] Failed to create merged beam: overlord/track_opening_v0_test175
{ }
java.lang.IllegalStateException: Failed to save new beam for identifier[overlord/track_opening_v0_test175] timestamp[2015-05-20T15:00:00.000Z]
at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:264) ~[stormjar.jar:na]
at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:261) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:843) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:841) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.k(Promise.scala:100) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) ~[stormjar.jar:na]
at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) [stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) [stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) [stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) [stormjar.jar:na]
at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) [stormjar.jar:na]
at com.twitter.util.Promise.runq(Promise.scala:331) [stormjar.jar:na]
at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) [stormjar.jar:na]
at com.twitter.util.ExecutorServiceFuturePool$$anon$2.run(FuturePool.scala:112) [stormjar.jar:na]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) [na:1.7.0_79]
at java.util.concurrent.FutureTask.run(FutureTask.java:262) [na:1.7.0_79]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) [na:1.7.0_79]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) [na:1.7.0_79]
at java.lang.Thread.run(Thread.java:745) [na:1.7.0_79]
Caused by: com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111) ~[stormjar.jar:na]
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863) ~[stormjar.jar:na]
at com.twitter.util.Try$.apply(Try.scala:13) ~[stormjar.jar:na]
at com.twitter.util.Future$.apply(Future.scala:90) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.k(Promise.scala:100) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) ~[stormjar.jar:na]
at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) [stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) [stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) [stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) [stormjar.jar:na]
at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) [stormjar.jar:na]
at com.twitter.util.Promise.runq(Promise.scala:331) [stormjar.jar:na]
at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) [stormjar.jar:na]
at com.twitter.util.Promise.update(Promise.scala:615) [stormjar.jar:na]
at com.twitter.util.Promise.setValue(Promise.scala:591) [stormjar.jar:na]
at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76) ~[stormjar.jar:na]
at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70) ~[stormjar.jar:na]
at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) ~[stormjar.jar:na]
at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142) ~[stormjar.jar:na]
at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) ~[stormjar.jar:na]
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) ~[stormjar.jar:na]
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) ~[stormjar.jar:na]
... 3 common frames omitted

- The overlords' logs when returning the 400 Bad Request seems to read that the task already exists.
- The attempts generating bad requests are replayed every second till the end of the hour (the segment size is 1h)
- Our zookeepers show a surge of CPU during those periods.

From what we have witnessed so far, it happens quite unregularly, sometimes with only a few hours between error surges and sometimes 6-7 hours.
Our storm topology have multiple bolts pushing on the same tasks. Tasks can be partitioned or not, it doesn't seem to change anything.

My guess is that a desynchronization happens between druid and tranquility, and the tasks creation is triggered but Tranquility doesn't get it and keeps trying to create a new one which triggers the 400.
Is it due to a misconfiguration (memory space, tranquility beam in topology, etc)?

Thanks,

Pierre-Edouard Montabrun


Fangjin Yang

unread,
May 22, 2015, 1:54:09 AM5/22/15
to druid...@googlegroups.com, pemon...@gmail.com
Gian is on vacation so I think all tranquility related questions will be a little delayed.

For one of these bad tasks, do you happent o have the overlord logs around the time the task was running. Are there any interesting messages in the logs of the task? It is strange that the error occurs sporadically though. Do you see any errors in your logs about ZK disconnect problems?

Pilou

unread,
May 22, 2015, 9:44:40 AM5/22/15
to druid...@googlegroups.com, pemon...@gmail.com
Hi Fangjin,

I have switched the overlord to a debug log level but there doesn't seem to be a many clues about what is going on at the time of the error. You will find a few seconds of log in the attachment with 400 requests and a zookeeper ping. (Since debug mode generates a lot of lines, tell me if you need more, like on hour, to cover a whole segment lifecycle).
I have checked our zookeeper logs but nothing seems to go wrong at the time of the error.


Pierre-Edouard
extract_log_overlord.log

Fangjin Yang

unread,
May 25, 2015, 11:57:43 AM5/25/15
to druid...@googlegroups.com, pemon...@gmail.com
Hi Pilou, can you go through your overlord logs for instances of:
index_realtime_track_cart_v0_test175_2015-05-22T09:00:00.000Z_0_0

Can you also provide the task logs for this task to see what happened during execution?

Pilou

unread,
May 26, 2015, 5:31:39 AM5/26/15
to druid...@googlegroups.com, pemon...@gmail.com
Hi Fangjin,


We have configured Druid to upload task logs to hdfs but I have checked for every error I have had so far and the pattern is always the same:
- There is no corresponding log on hdfs
- However there is a folder named after the task in the task directory on the middle manager instance but there is nothing inside (unlike valid running tasks where I can find the logs and a task.json)

Pierre-Edouard

Fangjin Yang

unread,
May 27, 2015, 1:33:24 AM5/27/15
to druid...@googlegroups.com, pemon...@gmail.com
Hi Pierre, do you have any logs in the overlord corresponding to a bad task? Grepping for those logs should yield some information about what is happening.

Pilou

unread,
May 28, 2015, 1:04:43 PM5/28/15
to druid...@googlegroups.com, pemon...@gmail.com
Hi Fangjin,

Unfortunately, we don't have any other sources of logs than what I showed you before. Though we have found a lot of ReadTimeOutException from Overlord when looking at tasks ending just before the bad tasks so we have though we might have undersized our overlord instances. We have switched to bigger instances and we are running test with production traffic to see if the errors are still here. I keep you updated in the next days.

Thanks,

Pierre-Edouard

Pilou

unread,
May 29, 2015, 4:41:08 AM5/29/15
to druid...@googlegroups.com
Hi,

Increasing the Overlord Capacity didn't seem to change anything :/
Here is an extract of a task processed just before a bad task:

15/05/28 18:15:40 INFO hdfs.HdfsDataSegmentPusher: Creating descriptor file at[hdfs://myhdfs/indexedData/track_opening_v0_test175/20150528T170000.000Z_20150528T180000.000Z/2015-05-28T17_00_11.823Z/0/descriptor.json]
15/05/28 18:15:41 INFO actions.RemoteTaskActionClient: Performing action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0]: SegmentInsertAction{segments=[DataSegment{size=38736423, shardSpec=LinearShardSpec{partitionNum=0}, metrics=[agg_count, agg_distinct_idfv], dimensions=[idSupport, idfv, openSource.affected, openSource.id.campaign, openSource.id.message, openSource.id.send, openSource.timestamp, openSource.type, timetoaction], version='2015-05-28T17:00:11.823Z', loadSpec={type=hdfs, path=hdfs://myhdfs/indexedData/track_opening_v0_test175/20150528T170000.000Z_20150528T180000.000Z/2015-05-28T17_00_11.823Z/0/index.zip}, interval=2015-05-28T17:00:00.000Z/2015-05-28T18:00:00.000Z, dataSource='track_opening_v0_test175', binaryVersion='9'}]}
15/05/28 18:15:41 INFO actions.RemoteTaskActionClient: Submitting action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0] to overlord[http://overlords-64uv:8080/druid/indexer/v1/action]: SegmentInsertAction{segments=[DataSegment{size=38736423, shardSpec=LinearShardSpec{partitionNum=0}, metrics=[agg_count, agg_distinct_idfv], dimensions=[idSupport, idfv, openSource.affected, openSource.id.campaign, openSource.id.message, openSource.id.send, openSource.timestamp, openSource.type, timetoaction], version='2015-05-28T17:00:11.823Z', loadSpec={type=hdfs, path=hdfs://myhdfs/indexedData/track_opening_v0_test175/20150528T170000.000Z_20150528T180000.000Z/2015-05-28T17_00_11.823Z/0/index.zip}, interval=2015-05-28T17:00:00.000Z/2015-05-28T18:00:00.000Z, dataSource='track_opening_v0_test175', binaryVersion='9'}]}
15/05/28 18:15:41 INFO pool.ChannelResourceFactory: Generating: http://overlords-64uv:8080
15/05/28 18:30:41 WARN actions.RemoteTaskActionClient: Exception submitting action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0]
org.jboss.netty.handler.timeout.ReadTimeoutException
at org.jboss.netty.handler.timeout.ReadTimeoutHandler.<clinit>(ReadTimeoutHandler.java:84)
at com.metamx.http.client.NettyHttpClient.go(NettyHttpClient.java:173)
at io.druid.indexing.common.actions.RemoteTaskActionClient.submit(RemoteTaskActionClient.java:97)
at io.druid.indexing.common.task.AbstractTask.getTaskLocks(AbstractTask.java:167)
at io.druid.indexing.common.task.RealtimeIndexTask.run(RealtimeIndexTask.java:164)
at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:235)
at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:214)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
15/05/28 18:30:41 INFO actions.RemoteTaskActionClient: Will try again in [PT60S].
15/05/28 18:31:41 INFO actions.RemoteTaskActionClient: Submitting action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0] to overlord[http://overlords-64uv:8080/druid/indexer/v1/action]: SegmentInsertAction{segments=[DataSegment{size=38736423, shardSpec=LinearShardSpec{partitionNum=0}, metrics=[agg_count, agg_distinct_idfv], dimensions=[idSupport, idfv, openSource.affected, openSource.id.campaign, openSource.id.message, openSource.id.send, openSource.timestamp, openSource.type, timetoaction], version='2015-05-28T17:00:11.823Z', loadSpec={type=hdfs, path=hdfs://myhdfs/indexedData/track_opening_v0_test175/20150528T170000.000Z_20150528T180000.000Z/2015-05-28T17_00_11.823Z/0/index.zip}, interval=2015-05-28T17:00:00.000Z/2015-05-28T18:00:00.000Z, dataSource='track_opening_v0_test175', binaryVersion='9'}]}
15/05/28 18:31:41 INFO pool.ChannelResourceFactory: Generating: http://overlords-64uv:8080
15/05/28 18:46:41 WARN actions.RemoteTaskActionClient: Exception submitting action for task[index_realtime_track_opening_v0_test175_2015-05-28T17:00:00.000Z_0_0]
org.jboss.netty.handler.timeout.ReadTimeoutException
at org.jboss.netty.handler.timeout.ReadTimeoutHandler.<clinit>(ReadTimeoutHandler.java:84)
at com.metamx.http.client.NettyHttpClient.go(NettyHttpClient.java:173)
at io.druid.indexing.common.actions.RemoteTaskActionClient.submit(RemoteTaskActionClient.java:97)
at io.druid.indexing.common.task.AbstractTask.getTaskLocks(AbstractTask.java:167)
at io.druid.indexing.common.task.RealtimeIndexTask.run(RealtimeIndexTask.java:164)
at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:235)
at io.druid.indexing.overlord.ThreadPoolTaskRunner$ThreadPoolTaskRunnerCallable.call(ThreadPoolTaskRunner.java:214)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
15/05/28 18:46:41 INFO actions.RemoteTaskActionClient: Will try again in [PT120S].
15/05/28 18:48:08 INFO inventory.CuratorInventoryManager: Created new InventoryCacheListener for /druid/segments/managed-middlemanagers-v1-lnrw:8100


When this kind of error happens, the next tasks starts with delay and I can see a task in the "Waiting Tasks - Tasks waiting on locks"  section.
We thought it was an issue  concerning the overlord hardware being undersized given the load but it didn't solve the issue. 
Do you have any idea abotu what could cause this ReadTimeoutException?

Thanks,

Pierre-Edouard

Fangjin Yang

unread,
May 31, 2015, 2:14:10 PM5/31/15
to druid...@googlegroups.com, pemon...@gmail.com
These are problems with http connections. Do you have any permissions set around certain ports?

Pilou

unread,
Jun 1, 2015, 12:38:37 PM6/1/15
to druid...@googlegroups.com, pemon...@gmail.com
We have checked if there is any permission set on ports but there isn't.
Apparently the server freezes when this kind of error happens: the overlord is still very low on CPU but we can't get any response from it, even the UI is frozen. Killing the overlord instance seems to solve the issue but leads to a loss of data.
The second overlord doesn't seem to handle the load if the master is not killed.
...

Fangjin Yang

unread,
Jun 1, 2015, 11:15:34 PM6/1/15
to druid...@googlegroups.com, pemon...@gmail.com
Pilou, losing the overlord should definitely not lead to a loss of data unless things are misconfigured. How are you setting up your overlord? The logs point to network problems between your nodes. How are you running the overlord and middle managers? Are they in the same datacenter?
...

Pilou

unread,
Jun 2, 2015, 12:30:42 PM6/2/15
to druid...@googlegroups.com, pemon...@gmail.com
We are not losing data when switching between overlords usually. It's just that when we notice the overlord is freezing, if we kill the instance and have our second overlord take the load, we lose the data transmitted while frozen.
Our overlords and middle managers are in the same datacenter and we are using the autoscale option.
Having several overlords is only useful for fallback right? Because I don't think they are sharing the load when running in our cluster.
...

Fangjin Yang

unread,
Jun 4, 2015, 10:47:57 PM6/4/15
to druid...@googlegroups.com, pemon...@gmail.com
Hmm, no data should be transmitted through the overlord unless you are for some reason running in local mode with no middle managers. Can you share your overlord runtime.properties?
...

Pilou

unread,
Jun 5, 2015, 4:13:54 AM6/5/15
to druid...@googlegroups.com, pemon...@gmail.com

Our overlord is running in remote mode but here is our configs anyway.
We have our own strategy called "gce" in order to use google cloud instances:

druid.host=##HOSTNAME##:8080
druid
.port=8080
druid
.service=overlord

druid
.indexer.autoscale.doAutoscale=true
druid
.indexer.autoscale.strategy=gce
druid
.indexer.autoscale.workerIdleTimeout=PT6M
druid
.indexer.autoscale.maxScalingDuration=PT5M
druid
.indexer.autoscale.provisionPeriod=PT1M
druid
.indexer.autoscale.terminatePeriod=PT1M
druid
.indexer.autoscale.pendingTaskTimeout=PT10M
druid
.indexer.autoscale.workerVersion=1
druid
.indexer.autoscale.workerPort=8081

druid
.indexer.logs.type=hdfs
druid
.indexer.logs.directory=gs://host/overlordLogs

druid
.indexer.runner.type=remote
druid
.indexer.runner.minWorkerVersion=1

druid
.indexer.storage.type=metadata
druid
.indexer.processing.sizeBytes=100000000

And our common config, if you need it:

# Zookeeper
druid
.zk.service.host=ZK_HOST1:2181,ZK_HOST2:2181,ZK_HOST3:2181


druid
.storage.type=hdfs
druid
.storage.storageDirectory=gs://
INDEX_HOST/indexedData

druid
.cache.type=local
druid
.cache.sizeInBytes=10000000

druid
.metadata.storage.type=mysql
druid
.metadata.storage.connector.connectURI=jdbc:mysql://
MYSQL_SERVER:MYSQL_PORT/druid?characterEncoding=UTF-8
druid
.metadata.storage.connector.user=
MYSQL_USER
druid
.metadata.storage.connector.password=MYSQL_PASSWORD

druid
.selectors.indexing.serviceName=overlord


druid
.emitter=noop

druid
.processing.buffer.sizeBytes=500000000

druid
.announcer.type=batch


We use druid-hdfs-storage with slight modifications for Google Cloud Storage (which is compatible with hdfs) and druid-mysql extension but both are included in the classpath.

...

Gian Merlino

unread,
Jun 16, 2015, 1:31:58 PM6/16/15
to druid...@googlegroups.com
Hi Pilou,

How long is the overlord freezing for? If it's substantially longer than your windowPeriod, it's possible that tranquility will get stuck when a new segmentGranularity period rolls over and a new segment must be created. It needs the overlord to help coordinate the creation of a new segment. If it's stuck for long enough, messages can time out and drop.

I think it'd be useful to see a thread dump of your overlord during the time it's frozen (jstack -l [pid]). Maybe all of its http threads are stuck on something? Since you said the overlord CPU is low, maybe it's a network or disk issue of some sort.
...

Vincent Coquart

unread,
Jun 18, 2015, 10:57:39 AM6/18/15
to druid...@googlegroups.com
Hi Gian,

I work with Pilou on Druid.
Here is the thread dump. I have no idea how to read it.

Thanks for your help :)

Attaching to process ID 2021, please wait...
Debugger attached successfully.
Server compiler detected.
JVM version is 25.31-b07
Deadlock Detection:


No deadlocks found.


Thread 4049: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.park(java.lang.Object) @bci=14,
line=175 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
await() @bci=42, line=2039 (Compiled frame)
- java.util.concurrent.LinkedBlockingQueue.take() @bci=29, line=442 (
Compiled frame)
- org.apache.zookeeper.ClientCnxn$EventThread.run() @bci=9, line=494 (
Interpreted frame)


Locked ownable synchronizers:
- None


Thread 4048: (state = IN_NATIVE)
- sun.nio.ch.EPollArrayWrapper.epollWait(long, int, long, int) @bci=0 (
Compiled frame; information may be imprecise)
- sun.nio.ch.EPollArrayWrapper.poll(long) @bci=18, line=269 (Compiled frame
)
- sun.nio.ch.EPollSelectorImpl.doSelect(long) @bci=28, line=79 (Compiled
frame)
- sun.nio.ch.SelectorImpl.lockAndDoSelect(long) @bci=37, line=86 (Compiled
frame)
- sun.nio.ch.SelectorImpl.select(long) @bci=30, line=97 (Compiled frame)
- org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(int, java.util.List,
java.util.LinkedList, org.apache.zookeeper.ClientCnxn) @bci=6, line=349 (
Compiled frame)
- org.apache.zookeeper.ClientCnxn$SendThread.run() @bci=604, line=1081 (
Compiled frame)


Locked ownable synchronizers:
- None


Thread 32386: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.park(java.lang.Object) @bci=14,
line=175 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
await() @bci=42, line=2039 (Compiled frame)
- java.util.concurrent.LinkedBlockingQueue.take() @bci=29, line=442 (
Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.getTask() @bci=149, line=1067 (
Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=26, line=1127 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 32385: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.park(java.lang.Object) @bci=14,
line=175 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
await() @bci=42, line=2039 (Compiled frame)
- java.util.concurrent.LinkedBlockingQueue.take() @bci=29, line=442 (
Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.getTask() @bci=149, line=1067 (
Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=26, line=1127 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 3504: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take()
@bci=124, line=1093 (Compiled frame)
- java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take()
@bci=1, line=809 (Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.getTask() @bci=149, line=1067 (
Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=26, line=1127 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2471: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take()
@bci=124, line=1093 (Compiled frame)
- java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take()
@bci=1, line=809 (Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.getTask() @bci=149, line=1067 (
Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=26, line=1127 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2470: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2469: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2468: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2467: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2466: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2465: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2464: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2463: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2462: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2461: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2460: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2459: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2458: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2457: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2456: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2455: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2454: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2453: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2452: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2451: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2450: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2449: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2448: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2447: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2446: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2445: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2444: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2443: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2442: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2441: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2440: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2439: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2438: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2437: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2436: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2435: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2434: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2433: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- org.eclipse.jetty.util.BlockingArrayQueue.poll(long, java.util.concurrent
.TimeUnit) @bci=57, line=389 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll() @bci=12,
line=516 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.access$700(org.eclipse.
jetty.util.thread.QueuedThreadPool) @bci=1, line=47 (Compiled frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=300, line=575
(Compiled frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2432: (state = IN_NATIVE)
- sun.nio.ch.ServerSocketChannelImpl.accept0(java.io.FileDescriptor, java.
io.FileDescriptor, java.net.InetSocketAddress[]) @bci=0 (Compiled frame;
information may be imprecise)
- sun.nio.ch.ServerSocketChannelImpl.accept() @bci=130, line=241 (Compiled
frame)
- org.eclipse.jetty.server.ServerConnector.accept(int) @bci=17, line=377 (
Compiled frame)
- org.eclipse.jetty.server.AbstractConnector$Acceptor.run() @bci=154, line=
500 (Interpreted frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(java.lang.Runnable)
@bci=1, line=620 (Interpreted frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=75, line=540
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2431: (state = IN_NATIVE)
- sun.nio.ch.EPollArrayWrapper.epollWait(long, int, long, int) @bci=0 (
Compiled frame; information may be imprecise)
- sun.nio.ch.EPollArrayWrapper.poll(long) @bci=18, line=269 (Compiled frame
)
- sun.nio.ch.EPollSelectorImpl.doSelect(long) @bci=28, line=79 (Compiled
frame)
- sun.nio.ch.SelectorImpl.lockAndDoSelect(long) @bci=37, line=86 (Compiled
frame)
- sun.nio.ch.SelectorImpl.select(long) @bci=30, line=97 (Interpreted frame)
- sun.nio.ch.SelectorImpl.select() @bci=2, line=101 (Compiled frame)
- org.eclipse.jetty.io.SelectorManager$ManagedSelector.select() @bci=174,
line=596 (Compiled frame)
- org.eclipse.jetty.io.SelectorManager$ManagedSelector.run() @bci=158, line
=545 (Compiled frame)
- org.eclipse.jetty.util.thread.NonBlockingThread.run() @bci=13, line=52 (
Interpreted frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(java.lang.Runnable)
@bci=1, line=620 (Interpreted frame)
- org.eclipse.jetty.util.thread.QueuedThreadPool$3.run() @bci=75, line=540
(Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2429: (state = BLOCKED)
- java.lang.Object.wait(long) @bci=0 (Compiled frame; information may be
imprecise)
- java.lang.Object.wait() @bci=2, line=502 (Compiled frame)
- org.apache.curator.framework.recipes.locks.LockInternals.internalLockLoop
(long, java.lang.Long, java.lang.String) @bci=242, line=307 (Interpreted
frame)
- org.apache.curator.framework.recipes.locks.LockInternals.attemptLock(long
, java.util.concurrent.TimeUnit, byte[]) @bci=91, line=217 (Interpreted
frame)
- org.apache.curator.framework.recipes.locks.InterProcessMutex.internalLock
(long, java.util.concurrent.TimeUnit) @bci=47, line=232 (Interpreted frame)
- org.apache.curator.framework.recipes.locks.InterProcessMutex.acquire()
@bci=5, line=89 (Interpreted frame)
- org.apache.curator.framework.recipes.leader.LeaderSelector.doWork() @bci=
9, line=386 (Interpreted frame)
- org.apache.curator.framework.recipes.leader.LeaderSelector.doWorkLoop()
@bci=3, line=443 (Interpreted frame)
- org.apache.curator.framework.recipes.leader.LeaderSelector.access$100(org
.apache.curator.framework.recipes.leader.LeaderSelector) @bci=1, line=64 (
Interpreted frame)
- org.apache.curator.framework.recipes.leader.LeaderSelector$2.call() @bci=
4, line=245 (Interpreted frame)
- org.apache.curator.framework.recipes.leader.LeaderSelector$2.call() @bci=
1, line=239 (Interpreted frame)
- java.util.concurrent.FutureTask.run() @bci=42, line=266 (Interpreted
frame)
- java.util.concurrent.Executors$RunnableAdapter.call() @bci=4, line=511 (
Compiled frame)
- java.util.concurrent.FutureTask.run() @bci=42, line=266 (Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=95, line=1142 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- <0x0000000090829c60>, (a java/util/concurrent/
ThreadPoolExecutor$Worker)


Thread 2428: (state = BLOCKED)
- java.lang.Thread.sleep(long) @bci=0 (Compiled frame; information may be
imprecise)
- org.jboss.netty.util.HashedWheelTimer$Worker.waitForNextTick() @bci=81,
line=445 (Compiled frame)
- org.jboss.netty.util.HashedWheelTimer$Worker.run() @bci=43, line=364 (
Compiled frame)
- org.jboss.netty.util.ThreadRenamingRunnable.run() @bci=55, line=108 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2427: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.park(java.lang.Object) @bci=14,
line=175 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
await() @bci=42, line=2039 (Compiled frame)
- java.util.concurrent.DelayQueue.take() @bci=28, line=211 (Interpreted
frame)
- java.util.concurrent.DelayQueue.take() @bci=1, line=70 (Interpreted frame
)
- org.apache.curator.framework.imps.CuratorFrameworkImpl.
backgroundOperationsLoop() @bci=13, line=790 (Interpreted frame)
- org.apache.curator.framework.imps.CuratorFrameworkImpl.access$400(org.
apache.curator.framework.imps.CuratorFrameworkImpl) @bci=1, line=61 (
Interpreted frame)
- org.apache.curator.framework.imps.CuratorFrameworkImpl$4.call() @bci=4,
line=272 (Interpreted frame)
- java.util.concurrent.FutureTask.run() @bci=42, line=266 (Interpreted
frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=95, line=1142 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- <0x0000000090829c90>, (a java/util/concurrent/
ThreadPoolExecutor$Worker)


Thread 2424: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.park(java.lang.Object) @bci=14,
line=175 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
await() @bci=42, line=2039 (Compiled frame)
- java.util.concurrent.ArrayBlockingQueue.take() @bci=20, line=403 (
Interpreted frame)
- org.apache.curator.framework.state.ConnectionStateManager.processEvents()
@bci=13, line=245 (Interpreted frame)
- org.apache.curator.framework.state.ConnectionStateManager.access$000(org.
apache.curator.framework.state.ConnectionStateManager) @bci=1, line=43 (
Interpreted frame)
- org.apache.curator.framework.state.ConnectionStateManager$1.call() @bci=4
, line=111 (Interpreted frame)
- java.util.concurrent.FutureTask.run() @bci=42, line=266 (Interpreted
frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=95, line=1142 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- <0x0000000090829e70>, (a java/util/concurrent/
ThreadPoolExecutor$Worker)


Thread 2423: (state = BLOCKED)
- sun.misc.Unsafe.park(boolean, long) @bci=0 (Compiled frame; information
may be imprecise)
- java.util.concurrent.locks.LockSupport.parkNanos(java.lang.Object, long)
@bci=20, line=215 (Compiled frame)
- java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.
awaitNanos(long) @bci=78, line=2078 (Compiled frame)
- java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take()
@bci=124, line=1093 (Compiled frame)
- java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take()
@bci=1, line=809 (Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.getTask() @bci=149, line=1067 (
Compiled frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=26, line=1127 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- None


Thread 2422: (state = IN_NATIVE)
- sun.nio.ch.EPollArrayWrapper.epollWait(long, int, long, int) @bci=0 (
Compiled frame; information may be imprecise)
- sun.nio.ch.EPollArrayWrapper.poll(long) @bci=18, line=269 (Compiled frame
)
- sun.nio.ch.EPollSelectorImpl.doSelect(long) @bci=28, line=79 (Compiled
frame)
- sun.nio.ch.SelectorImpl.lockAndDoSelect(long) @bci=37, line=86 (Compiled
frame)
- sun.nio.ch.SelectorImpl.select(long) @bci=30, line=97 (Compiled frame)
- org.jboss.netty.channel.socket.nio.SelectorUtil.select(java.nio.channels.
Selector) @bci=4, line=68 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.select(java.nio.
channels.Selector) @bci=1, line=434 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.run() @bci=56,
line=212 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioWorker.run() @bci=1, line=
89 (Interpreted frame)
- org.jboss.netty.channel.socket.nio.NioWorker.run() @bci=1, line=178 (
Interpreted frame)
- org.jboss.netty.util.ThreadRenamingRunnable.run() @bci=55, line=108 (
Interpreted frame)
- org.jboss.netty.util.internal.DeadLockProofWorker$1.run() @bci=14, line=
42 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=95, line=1142 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- <0x000000009082b640>, (a java/util/concurrent/
ThreadPoolExecutor$Worker)


Thread 2421: (state = IN_NATIVE)
- sun.nio.ch.EPollArrayWrapper.epollWait(long, int, long, int) @bci=0 (
Compiled frame; information may be imprecise)
- sun.nio.ch.EPollArrayWrapper.poll(long) @bci=18, line=269 (Compiled frame
)
- sun.nio.ch.EPollSelectorImpl.doSelect(long) @bci=28, line=79 (Compiled
frame)
- sun.nio.ch.SelectorImpl.lockAndDoSelect(long) @bci=37, line=86 (Compiled
frame)
- sun.nio.ch.SelectorImpl.select(long) @bci=30, line=97 (Compiled frame)
- org.jboss.netty.channel.socket.nio.SelectorUtil.select(java.nio.channels.
Selector) @bci=4, line=68 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.select(java.nio.
channels.Selector) @bci=1, line=434 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.run() @bci=56,
line=212 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioWorker.run() @bci=1, line=
89 (Interpreted frame)
- org.jboss.netty.channel.socket.nio.NioWorker.run() @bci=1, line=178 (
Interpreted frame)
- org.jboss.netty.util.ThreadRenamingRunnable.run() @bci=55, line=108 (
Interpreted frame)
- org.jboss.netty.util.internal.DeadLockProofWorker$1.run() @bci=14, line=
42 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=95, line=1142 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- <0x000000009082b400>, (a java/util/concurrent/
ThreadPoolExecutor$Worker)


Thread 2420: (state = IN_NATIVE)
- sun.nio.ch.EPollArrayWrapper.epollWait(long, int, long, int) @bci=0 (
Compiled frame; information may be imprecise)
- sun.nio.ch.EPollArrayWrapper.poll(long) @bci=18, line=269 (Compiled frame
)
- sun.nio.ch.EPollSelectorImpl.doSelect(long) @bci=28, line=79 (Compiled
frame)
- sun.nio.ch.SelectorImpl.lockAndDoSelect(long) @bci=37, line=86 (Compiled
frame)
- sun.nio.ch.SelectorImpl.select(long) @bci=30, line=97 (Compiled frame)
- org.jboss.netty.channel.socket.nio.SelectorUtil.select(java.nio.channels.
Selector) @bci=4, line=68 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.select(java.nio.
channels.Selector) @bci=1, line=434 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.run() @bci=56,
line=212 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioWorker.run() @bci=1, line=
89 (Interpreted frame)
- org.jboss.netty.channel.socket.nio.NioWorker.run() @bci=1, line=178 (
Interpreted frame)
- org.jboss.netty.util.ThreadRenamingRunnable.run() @bci=55, line=108 (
Interpreted frame)
- org.jboss.netty.util.internal.DeadLockProofWorker$1.run() @bci=14, line=
42 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=95, line=1142 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- <0x000000009082b220>, (a java/util/concurrent/
ThreadPoolExecutor$Worker)


Thread 2419: (state = IN_NATIVE)
- sun.nio.ch.EPollArrayWrapper.epollWait(long, int, long, int) @bci=0 (
Compiled frame; information may be imprecise)
- sun.nio.ch.EPollArrayWrapper.poll(long) @bci=18, line=269 (Compiled frame
)
- sun.nio.ch.EPollSelectorImpl.doSelect(long) @bci=28, line=79 (Compiled
frame)
- sun.nio.ch.SelectorImpl.lockAndDoSelect(long) @bci=37, line=86 (Compiled
frame)
- sun.nio.ch.SelectorImpl.select(long) @bci=30, line=97 (Compiled frame)
- org.jboss.netty.channel.socket.nio.SelectorUtil.select(java.nio.channels.
Selector) @bci=4, line=68 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.select(java.nio.
channels.Selector) @bci=1, line=434 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.run() @bci=56,
line=212 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioWorker.run() @bci=1, line=
89 (Interpreted frame)
- org.jboss.netty.channel.socket.nio.NioWorker.run() @bci=1, line=178 (
Interpreted frame)
- org.jboss.netty.util.ThreadRenamingRunnable.run() @bci=55, line=108 (
Interpreted frame)
- org.jboss.netty.util.internal.DeadLockProofWorker$1.run() @bci=14, line=
42 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=95, line=1142 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- <0x000000009082b520>, (a java/util/concurrent/
ThreadPoolExecutor$Worker)


Thread 2418: (state = IN_NATIVE)
- sun.nio.ch.EPollArrayWrapper.epollWait(long, int, long, int) @bci=0 (
Compiled frame; information may be imprecise)
- sun.nio.ch.EPollArrayWrapper.poll(long) @bci=18, line=269 (Compiled frame
)
- sun.nio.ch.EPollSelectorImpl.doSelect(long) @bci=28, line=79 (Compiled
frame)
- sun.nio.ch.SelectorImpl.lockAndDoSelect(long) @bci=37, line=86 (Compiled
frame)
- sun.nio.ch.SelectorImpl.select(long) @bci=30, line=97 (Compiled frame)
- org.jboss.netty.channel.socket.nio.SelectorUtil.select(java.nio.channels.
Selector) @bci=4, line=68 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.select(java.nio.
channels.Selector) @bci=1, line=434 (Compiled frame)
- org.jboss.netty.channel.socket.nio.AbstractNioSelector.run() @bci=56,
line=212 (Compiled frame)
- org.jboss.netty.channel.socket.nio.NioClientBoss.run() @bci=1, line=42 (
Interpreted frame)
- org.jboss.netty.util.ThreadRenamingRunnable.run() @bci=55, line=108 (
Interpreted frame)
- org.jboss.netty.util.internal.DeadLockProofWorker$1.run() @bci=14, line=
42 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.
ThreadPoolExecutor$Worker) @bci=95, line=1142 (Interpreted frame)
- java.util.concurrent.ThreadPoolExecutor$Worker.run() @bci=5, line=617 (
Interpreted frame)
- java.lang.Thread.run() @bci=11, line=745 (Interpreted frame)


Locked ownable synchronizers:
- <0x000000009082b7c0>, (a java/util/concurrent/
ThreadPoolExecutor$Worker)


Thread 2417: (state = BLOCKED)
- java.lang.Object.wait(long) @bci=0 (Compiled frame; information may be
imprecise)
- java.lang.ref.ReferenceQueue.remove(long) @bci=44, line=142 (Compiled
frame)
- com.mysql.jdbc.AbandonedConnectionCleanupThread.run() @bci=16, line=43 (
Compiled frame)


Locked ownable synchronizers:
- None


Thread 2084: (state = BLOCKED)


Locked ownable synchronizers:
- None


Thread 2083: (state = BLOCKED)


Locked ownable synchronizers:
- None


Thread 2078: (state = BLOCKED)
- java.lang.Object.wait(long) @bci=0 (Compiled frame; information may be
imprecise)
- java.lang.ref.ReferenceQueue.remove(long) @bci=44, line=142 (Compiled
frame)
- java.lang.ref.ReferenceQueue.remove() @bci=2, line=158 (Compiled frame)
- java.lang.ref.Finalizer$FinalizerThread.run() @bci=36, line=209 (Compiled
frame)


Locked ownable synchronizers:
- None


Thread 2077: (state = BLOCKED)
- java.lang.Object.wait(long) @bci=0 (Compiled frame; information may be
imprecise)
- java.lang.Object.wait() @bci=2, line=502 (Compiled frame)
- java.lang.ref.Reference$ReferenceHandler.run() @bci=36, line=157 (
Compiled frame)


Locked ownable synchronizers:
- None


Thread 2045: (state = BLOCKED)
- java.lang.Object.wait(long) @bci=0 (Compiled frame; information may be
imprecise)
- java.lang.Thread.join(long) @bci=38, line=1245 (Interpreted frame)
- java.lang.Thread.join() @bci=2, line=1319 (Interpreted frame)
- com.metamx.common.lifecycle.Lifecycle.join() @bci=24, line=299 (
Interpreted frame)
- io.druid.cli.ServerRunnable.run() @bci=12, line=41 (Interpreted frame)
- io.druid.cli.Main.main(java.lang.String[]) @bci=320, line=88 (Interpreted
frame)


Locked ownable synchronizers:
- None
> ...

Gian Merlino

unread,
Jun 28, 2015, 8:39:53 PM6/28/15
to druid...@googlegroups.com
Hi Vincent,

Hmm, that's the thread dump of the overlord while it's stuck? I don't see any smoking guns there- usually stuck overlords have all their jetty threads busy doing something or other. The jetty threads on this machine look idle. Can you use the overlord's web console while it's stuck? There may be some kind of network issue or something like that…

Pilou

unread,
Jun 29, 2015, 3:49:44 AM6/29/15
to druid...@googlegroups.com
Hi Gian,

When the overlord is stuck, neither the web console nor the status page are accessible. (for now we have a python script checking regularly the status page  and killing the overlord instance when this issue occurs).
...

Gian Merlino

unread,
Jun 29, 2015, 4:04:10 AM6/29/15
to druid...@googlegroups.com
Hi Pilou,

Are you sure that stack trace is from an overlord that was stuck at the time? If so, then I'm not really sure what's going on, but it doesn't look like there are any stuck requests. Do you have a copy of the logs from that jvm around the time it got stuck?

--
You received this message because you are subscribed to the Google Groups "Druid User" group.
To unsubscribe from this group and stop receiving emails from it, send an email to druid-user+...@googlegroups.com.
To post to this group, send email to druid...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/druid-user/207edb48-de5d-45a8-81cf-0206865ef34a%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Andres Gomez

unread,
Nov 10, 2015, 10:11:42 AM11/10/15
to Druid User
Hi all, 

Any update about this topic??? I have the similar issue on my cluster .. but I'm using Samza (Tranquility) I get errors like this:

Caused by: java.lang.IllegalStateException: Failed to save new beam for identifier[overlord/rb_flow_NS-453110232756458] timestamp[2015-11-10T13:00:00.000Z]
at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:264)
at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:261)
at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:843)
at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:841)
at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100)
at com.twitter.util.Promise$Transformer.k(Promise.scala:100)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:110)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:91)
at com.twitter.util.Promise$$anon$2.run(Promise.scala:345)
at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186)
at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157)
at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212)
at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86)
at com.twitter.util.Promise.runq(Promise.scala:331)
at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642)
at com.twitter.util.ExecutorServiceFuturePool$$anon$2.run(FuturePool.scala:112)
at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source)
at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source)
at java.util.concurrent.FutureTask.run(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.lang.Thread.run(Unknown Source)
Caused by: com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111)
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86)
at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863)
at com.twitter.util.Try$.apply(Try.scala:13)
at com.twitter.util.Future$.apply(Future.scala:90)
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863)
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863)
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824)
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823)
at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100)
at com.twitter.util.Promise$Transformer.k(Promise.scala:100)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:110)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:91)
at com.twitter.util.Promise$$anon$2.run(Promise.scala:345)
at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186)
at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157)
at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212)
at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86)
at com.twitter.util.Promise.runq(Promise.scala:331)
at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642)
at com.twitter.util.Promise.update(Promise.scala:615)
at com.twitter.util.Promise.setValue(Promise.scala:591)
at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76)
at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296)
at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296)
at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459)
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536)
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142)
at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142)
at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255)
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108)
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89)
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178)
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)
... 3 more
2015-11-10 13:42:47 SamzaContainer [INFO] Shutting down.
2015-11-10 13:42:47 ClusteredBeam [ERROR] Failed to update cluster state: overlord/rb_flow_NS-453110232756458
com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111)
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86)
at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863)
at com.twitter.util.Try$.apply(Try.scala:13)
at com.twitter.util.Future$.apply(Future.scala:90)
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863)
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863)
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824)
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823)
at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100)
at com.twitter.util.Promise$Transformer.k(Promise.scala:100)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:110)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:91)
at com.twitter.util.Promise$$anon$2.run(Promise.scala:345)
at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186)
at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157)
at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212)
at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86)
at com.twitter.util.Promise.runq(Promise.scala:331)
at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642)
at com.twitter.util.Promise.update(Promise.scala:615)
at com.twitter.util.Promise.setValue(Promise.scala:591)
at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76)
at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296)
at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296)
at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459)
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536)
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142)
at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142)
at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255)
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108)
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89)
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178)
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)
at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.lang.Thread.run(Unknown Source)
2015-11-10 13:42:47 SamzaContainer [INFO] Shutting down consumer multiplexer.
2015-11-10 13:42:47 BrokerProxy [INFO] Shutting down BrokerProxy for RBSCALEKAFKA02.redborder.cluster:9092
2015-11-10 13:42:47 DefaultFetchSimpleConsumer [INFO] Reconnect due to socket error: java.nio.channels.ClosedByInterruptException
2015-11-10 13:42:47 BrokerProxy [WARN] Restarting consumer due to java.nio.channels.ClosedChannelException. Releasing ownership of all partitions, and restarting consumer. Turn on debugging to get a full stack trace.
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_state_post,1]
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_state_post,1] -> 0)
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_state_post,3]
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_state_post,1] -> 0, [rb_state_post,3] -> 0)
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_flow_post,1]
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780)
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_flow_post,3]
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)
2015-11-10 13:42:47 BrokerProxy [INFO] Shutting down due to interrupt.
2015-11-10 13:42:47 BrokerProxy [INFO] Shutting down BrokerProxy for RBSCALEKAFKA01.redborder.cluster:9092
2015-11-10 13:42:47 DefaultFetchSimpleConsumer [INFO] Reconnect due to socket error: java.nio.channels.ClosedByInterruptException
2015-11-10 13:42:47 BrokerProxy [WARN] Restarting consumer due to java.nio.channels.ClosedChannelException. Releasing ownership of all partitions, and restarting consumer. Turn on debugging to get a full stack trace.
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_flow_post,0]
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_monitor,0]
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_monitor,0] -> 28138, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_state_post,0]
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_monitor,0] -> 28138, [rb_state_post,0] -> 0, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_state_post,2]
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_monitor,0] -> 28138, [rb_state_post,0] -> 0, [rb_state_post,2] -> 0, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980)
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Abdicating for [rb_flow_post,2]
2015-11-10 13:42:47 KafkaSystemConsumer [INFO] Refreshing brokers for: Map([rb_flow_post,0] -> 111889, [rb_monitor,0] -> 28138, [rb_state_post,0] -> 0, [rb_state_post,2] -> 0, [rb_state_post,1] -> 0, [rb_state_post,3] -> 0, [rb_flow_post,1] -> 111780, [rb_flow_post,3] -> 111980, [rb_flow_post,2] -> 111784)
2015-11-10 13:42:47 BrokerProxy [INFO] Shutting down due to interrupt.
2015-11-10 13:42:47 SamzaContainer [INFO] Shutting down task instance stream tasks.
2015-11-10 13:42:47 SamzaContainer [INFO] Shutting down producer multiplexer.
2015-11-10 13:42:47 DruidBeam [INFO] Closing Druid beam for datasource[rb_flow_NS-3656365540927863] interval[2015-11-10T13:00:00.000Z/2015-11-10T14:00:00.000Z] (tasks = index_realtime_rb_flow_NS-3656365540927863_2015-11-10T13:00:00.000Z_0_0)
2015-11-10 13:42:47 FinagleRegistry [INFO] Closing client for service: druid:local:firehose:rb_flow_NS-3656365540927863-13-0000-0000
2015-11-10 13:42:47 DiscoResolver [INFO] No longer monitoring service[druid:local:firehose:rb_flow_NS-3656365540927863-13-0000-0000]
2015-11-10 13:42:47 FinagleRegistry [INFO] Closing client for service: overlord
2015-11-10 13:42:47 DiscoResolver [INFO] No longer monitoring service[overlord]
2015-11-10 13:42:47 BeamPacketizer [WARN] ClusteredBeam(overlord/rb_flow_NS-453110232756458): Failed to send 2,000 messages.
2015-11-10 13:42:47 SamzaContainerExceptionHandler [ERROR] Uncaught exception in thread (name=main). Exiting process now.
com.metamx.common.ISE: Failed to send message[{dot11_status=PROBING, bytes=0, wireless_station=00:ba:20:00:f8:00, service_provider_uuid=SP-2045569389811222, pkts=0, type=mse10-location, namespace=NS:E2E_MT3, timestamp=1447159636, namespace_uuid=NS-453110232756458, client_mac=00:ca:00:05:36:7b, organization=E2E_MT3, organization_uuid=ORG-453110232756458, market_uuid=MR-2399161717387152, deployment=Building 18, building=Building 18, service_provider=E2E_2_MT, sensor_uuid=2365958529805087852, floor=8th Floor, sensor_name=e2e_mse2, floor_uuid=FL-3582721092232054, building_uuid=BL-1292744184876768, campus=Cisco Systems, subscriptionName=motus-e2e-2-rb-loc, deployment_uuid=DL-3411194937420418}].
at com.metamx.tranquility.samza.BeamProducer$$anonfun$1$$anon$1.fail(BeamProducer.scala:60)
at com.metamx.tranquility.beam.BeamPacketizer$$anonfun$awaitPendingBatches$4$$anonfun$apply$3.apply(BeamPacketizer.scala:119)
at com.metamx.tranquility.beam.BeamPacketizer$$anonfun$awaitPendingBatches$4$$anonfun$apply$3.apply(BeamPacketizer.scala:119)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at com.metamx.tranquility.beam.BeamPacketizer$$anonfun$awaitPendingBatches$4.apply(BeamPacketizer.scala:119)
at com.metamx.tranquility.beam.BeamPacketizer$$anonfun$awaitPendingBatches$4.apply(BeamPacketizer.scala:115)
at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:772)
at scala.collection.mutable.ArraySeq.foreach(ArraySeq.scala:73)
at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:771)
at com.metamx.tranquility.beam.BeamPacketizer.awaitPendingBatches(BeamPacketizer.scala:115)
at com.metamx.tranquility.beam.BeamPacketizer.flush(BeamPacketizer.scala:73)
at com.metamx.tranquility.beam.BeamPacketizer.close(BeamPacketizer.scala:78)
at com.metamx.tranquility.samza.BeamProducer$$anonfun$stop$1.apply(BeamProducer.scala:45)
at com.metamx.tranquility.samza.BeamProducer$$anonfun$stop$1.apply(BeamProducer.scala:44)
at scala.collection.mutable.HashMap$$anon$2$$anonfun$foreach$3.apply(HashMap.scala:107)
at scala.collection.mutable.HashMap$$anon$2$$anonfun$foreach$3.apply(HashMap.scala:107)
at scala.collection.mutable.HashTable$class.foreachEntry(HashTable.scala:226)
at scala.collection.mutable.HashMap.foreachEntry(HashMap.scala:39)
at scala.collection.mutable.HashMap$$anon$2.foreach(HashMap.scala:107)
at com.metamx.tranquility.samza.BeamProducer.stop(BeamProducer.scala:44)
at org.apache.samza.system.SystemProducers$$anonfun$stop$2.apply(SystemProducers.scala:47)
at org.apache.samza.system.SystemProducers$$anonfun$stop$2.apply(SystemProducers.scala:47)
at scala.collection.Iterator$class.foreach(Iterator.scala:727)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
at scala.collection.MapLike$DefaultValuesIterable.foreach(MapLike.scala:206)
at org.apache.samza.system.SystemProducers.stop(SystemProducers.scala:47)
at org.apache.samza.container.SamzaContainer.shutdownProducers(SamzaContainer.scala:644)
at org.apache.samza.container.SamzaContainer.run(SamzaContainer.scala:565)
at org.apache.samza.container.SamzaContainer$.safeMain(SamzaContainer.scala:93)
at org.apache.samza.container.SamzaContainer$.main(SamzaContainer.scala:67)
at org.apache.samza.container.SamzaContainer.main(SamzaContainer.scala)
Caused by: java.lang.IllegalStateException: Failed to save new beam for identifier[overlord/rb_flow_NS-453110232756458] timestamp[2015-11-10T13:00:00.000Z]
at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:264)
at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:261)
at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:843)
at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:841)
at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100)
at com.twitter.util.Promise$Transformer.k(Promise.scala:100)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:110)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:91)
at com.twitter.util.Promise$$anon$2.run(Promise.scala:345)
at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186)
at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157)
at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212)
at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86)
at com.twitter.util.Promise.runq(Promise.scala:331)
at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642)
at com.twitter.util.ExecutorServiceFuturePool$$anon$2.run(FuturePool.scala:112)
at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source)
at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source)
at java.util.concurrent.FutureTask.run(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.lang.Thread.run(Unknown Source)
Caused by: com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:111)
at com.metamx.tranquility.druid.IndexService$$anonfun$call$1$$anonfun$apply$7.apply(IndexService.scala:86)
at com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$6.apply(Future.scala:863)
at com.twitter.util.Try$.apply(Try.scala:13)
at com.twitter.util.Future$.apply(Future.scala:90)
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863)
at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:863)
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:824)
at com.twitter.util.Future$$anonfun$flatMap$1.apply(Future.scala:823)
at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100)
at com.twitter.util.Promise$Transformer.k(Promise.scala:100)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:110)
at com.twitter.util.Promise$Transformer.apply(Promise.scala:91)
at com.twitter.util.Promise$$anon$2.run(Promise.scala:345)
at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186)
at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157)
at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212)
at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86)
at com.twitter.util.Promise.runq(Promise.scala:331)
at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642)
at com.twitter.util.Promise.update(Promise.scala:615)
at com.twitter.util.Promise.setValue(Promise.scala:591)
at com.twitter.concurrent.AsyncQueue.offer(AsyncQueue.scala:76)
at com.twitter.finagle.transport.ChannelTransport.handleUpstream(ChannelTransport.scala:45)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.handler.codec.http.HttpContentDecoder.messageReceived(HttpContentDecoder.java:108)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296)
at org.jboss.netty.handler.codec.http.HttpChunkAggregator.messageReceived(HttpChunkAggregator.java:194)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:296)
at org.jboss.netty.handler.codec.frame.FrameDecoder.unfoldAndFireMessageReceived(FrameDecoder.java:459)
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.callDecode(ReplayingDecoder.java:536)
at org.jboss.netty.handler.codec.replay.ReplayingDecoder.messageReceived(ReplayingDecoder.java:435)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.handler.codec.http.HttpClientCodec.handleUpstream(HttpClientCodec.java:92)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142)
at com.twitter.finagle.channel.ChannelStatsHandler.messageReceived(ChannelStatsHandler.scala:86)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142)
at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255)
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108)
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89)
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178)
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)
... 3 more
2015-11-10 13:42:47 RunLoop [INFO] Shutting down, will wait up to 5000 ms
2015-11-10 13:42:52 RunLoop [WARN] Did not shut down within 5000 ms, exiting

And when this exception is launched my samza job die ... I can do a tryCatch the exception but I think that we can try search the problem on tranquility or overlord and solve it!. I am using 0.7.1.1 too ... maybe is some know bug?

Regards,

Andrés

Andres Gomez

unread,
Nov 10, 2015, 10:13:35 AM11/10/15
to Druid User
If I can contribute with some logs file .. tell me! 

Regards and thanks!!,

Andres
<span class="Apple

Gian Merlino

unread,
Nov 10, 2015, 5:41:02 PM11/10/15
to druid...@googlegroups.com
Hey Andres,

Usually a Bad Request means that the task you were trying to submit already existed. This can happen if you wipe out ZK metadata for tranquility. If you did do that, then the problem should resolve at the next segmentGranularity turnover, or, if you want to work around it for now, you can turn on "randomizeTaskId" in the DruidBeamConfig. But do note that this config has a small chance of causing a race condition where two copies of the same task exist, so it is not suggested for production.

Gian

Andres Gomez Ferrer

unread,
Nov 11, 2015, 2:47:25 AM11/11/15
to druid...@googlegroups.com
Then I should tryCatch the exception inside my samza tasks to avoid that it dies, isn’t? And on the next segmentGranularity the problem should resolve. The second option doesn’t convince me so much :) 

Regards,



Piénsalo antes de imprimir este mensaje
 
Este correo electrónico, incluidos sus anexos, se dirige exclusivamente a su destinatario. Contiene información CONFIDENCIAL cuya divulgación está prohibida por la ley o puede estar sometida a secreto profesional. Si ha recibido este mensaje por error, le rogamos nos lo comunique inmediatamente y proceda a su destrucción.
 
This email, including attachments, is intended exclusively for its addressee. It contains information that is CONFIDENTIAL whose disclosure is prohibited by law and may be covered by legal privilege. If you have received this email in error, please notify the sender and delete it from your system. 


En 10 de noviembre de 2015 en 23:41:03, Gian Merlino (gi...@imply.io) escrito:

You received this message because you are subscribed to a topic in the Google Groups "Druid User" group.
To unsubscribe from this topic, visit https://groups.google.com/d/topic/druid-user/0dz1so1abNM/unsubscribe.
To unsubscribe from this group and all its topics, send an email to druid-user+...@googlegroups.com.

To post to this group, send email to druid...@googlegroups.com.

Gian Merlino

unread,
Nov 11, 2015, 8:14:39 PM11/11/15
to druid...@googlegroups.com
Hey Andres,

Yes, it should resolve on its own if this is due to someone manually removing the ZK metadata.

Gian

Andres Gomez Ferrer

unread,
Nov 12, 2015, 2:40:56 AM11/12/15
to druid...@googlegroups.com
Ok Gian!!

Thanks I’m going to tryCatch the exception and test it!! 

Regards,



Piénsalo antes de imprimir este mensaje
 
Este correo electrónico, incluidos sus anexos, se dirige exclusivamente a su destinatario. Contiene información CONFIDENCIAL cuya divulgación está prohibida por la ley o puede estar sometida a secreto profesional. Si ha recibido este mensaje por error, le rogamos nos lo comunique inmediatamente y proceda a su destrucción.
 
This email, including attachments, is intended exclusively for its addressee. It contains information that is CONFIDENTIAL whose disclosure is prohibited by law and may be covered by legal privilege. If you have received this email in error, please notify the sender and delete it from your system. 


En 12 de noviembre de 2015 en 2:14:41, Gian Merlino (gi...@imply.io) escrito:

5A2798B5-721E-4F1B-8C16-DC62635EE82C
777AB69E-C754-47B9-A668-35818FB9229C
F4DF1196-5AA3-454A-9C60-EE94D7BE3CC1
434184C4-DD62-435D-BF7C-02E760E43BA0

5921...@qq.com

unread,
Aug 7, 2017, 9:16:45 AM8/7/17
to Druid User
how can you slove it ?

在 2015年11月12日星期四 UTC+8下午3:40:56,Andres Gomez写道:
...

Corolla Zhao

unread,
Jul 9, 2018, 3:52:08 AM7/9/18
to Druid User
check overlord conf overlord/jvm.config 
-Djava.io.tmpdir=var/tmp
maybe the path 'var/tmp' haven't been created


在 2015年5月21日星期四 UTC+8上午12:01:09,Pilou写道:
Hi,

We are running Druid 0.7.1.1 behind a Storm cluster with tranquility 4.2.
We have been testing the system on our production traffic lately and everything runs fine most of the time. However we have recently found out that some segment were missing. As we checked the logs we found:
- Tasks related to the missing segments have a status set to SUCCESS
- No data is present on the middlemanagers though the folders corresponding to the tasks exists but remains empty.
- Storm's logs shows this kind of message

2015-05-20 15:15:26 c.m.t.b.ClusteredBeam [ERROR] Failed to update cluster state: overlord/track_opening_v0_test175
com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request
java.lang.IllegalStateException: Failed to save new beam for identifier[overlord/track_opening_v0_test175] timestamp[2015-05-20T15:00:00.000Z]
at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:264) ~[stormjar.jar:na]
at com.metamx.tranquility.beam.ClusteredBeam$$anonfun$2.applyOrElse(ClusteredBeam.scala:261) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:843) ~[stormjar.jar:na]
at com.twitter.util.Future$$anonfun$rescue$1.apply(Future.scala:841) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:100) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.k(Promise.scala:100) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.apply(Promise.scala:110) ~[stormjar.jar:na]
at com.twitter.util.Promise$Transformer.apply(Promise.scala:91) ~[stormjar.jar:na]
at com.twitter.util.Promise$$anon$2.run(Promise.scala:345) [stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:186) [stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:157) [stormjar.jar:na]
at com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:212) [stormjar.jar:na]
at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:86) [stormjar.jar:na]
at com.twitter.util.Promise.runq(Promise.scala:331) [stormjar.jar:na]
at com.twitter.util.Promise.updateIfEmpty(Promise.scala:642) [stormjar.jar:na]
at com.twitter.util.ExecutorServiceFuturePool$$anon$2.run(FuturePool.scala:112) [stormjar.jar:na]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) [na:1.7.0_79]
at java.util.concurrent.FutureTask.run(FutureTask.java:262) [na:1.7.0_79]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) [na:1.7.0_79]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) [na:1.7.0_79]
at java.lang.Thread.run(Thread.java:745) [na:1.7.0_79]
Caused by: com.metamx.tranquility.druid.IndexServicePermanentException: Service call failed with status: 400 Bad Request
at com.twitter.finagle.channel.ChannelRequestStatsHandler.messageReceived(ChannelRequestStatsHandler.scala:35) ~[stormjar.jar:na]
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564) ~[stormjar.jar:na]
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268) ~[stormjar.jar:na]
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89) ~[stormjar.jar:na]
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178) ~[stormjar.jar:na]
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108) ~[stormjar.jar:na]
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42) ~[stormjar.jar:na]
... 3 common frames omitted

- The overlords' logs when returning the 400 Bad Request seems to read that the task already exists.
- The attempts generating bad requests are replayed every second till the end of the hour (the segment size is 1h)
Reply all
Reply to author
Forward
0 new messages