java.lang.IllegalArgumentException: No security protocol defined for listener PLAINTEXTSASL
listeners=http://0.0.0.0:8081
kafkastore.connection.url=localhost:2181
kafkastore.topic=_schemas
debug=fals
./bin/schema-registry-start etc/schema-registry/schema-registry.properties
[2017-06-05 10:30:26,731] INFO SchemaRegistryConfig values:
metric.reporters = []
kafkastore.sasl.kerberos.kinit.cmd = /usr/bin/kinit
response.mediatype.default = application/vnd.schemaregistry.v1+json
kafkastore.ssl.trustmanager.algorithm = PKIX
authentication.realm =
ssl.keystore.type = JKS
kafkastore.topic = _schemas
metrics.jmx.prefix = kafka.schema.registry
kafkastore.ssl.enabled.protocols = TLSv1.2,TLSv1.1,TLSv1
kafkastore.topic.replication.factor = 3
ssl.truststore.password =
kafkastore.timeout.ms = 500
host.name = sri-hadoop-nodes-231v.c.ampool-141120.internal
kafkastore.bootstrap.servers = []
schema.registry.zk.namespace = schema_registry
kafkastore.sasl.kerberos.ticket.renew.window.factor = 0.8
kafkastore.sasl.kerberos.service.name =
ssl.endpoint.identification.algorithm =
compression.enable = false
kafkastore.ssl.truststore.type = JKS
avro.compatibility.level = backward
kafkastore.ssl.protocol = TLS
kafkastore.ssl.provider =
kafkastore.ssl.truststore.location =
response.mediatype.preferred = [application/vnd.schemaregistry.v1+json, application/vnd.schemaregistry+json, application/json]
kafkastore.ssl.keystore.type = JKS
ssl.truststore.type = JKS
kafkastore.ssl.truststore.password =
access.control.allow.origin =
ssl.truststore.location =
ssl.keystore.password =
port = 8081
kafkastore.ssl.keystore.location =
master.eligibility = true
ssl.client.auth = false
kafkastore.ssl.keystore.password =
kafkastore.security.protocol = PLAINTEXT
ssl.trustmanager.algorithm =
authentication.method = NONE
request.logger.name = io.confluent.rest-utils.requests
ssl.key.password =
kafkastore.zk.session.timeout.ms = 30000
kafkastore.sasl.mechanism = GSSAPI
kafkastore.sasl.kerberos.ticket.renew.jitter = 0.05
kafkastore.ssl.key.password =
zookeeper.set.acl = false
authentication.roles = [*]
metrics.num.samples = 2
ssl.protocol = TLS
kafkastore.ssl.keymanager.algorithm = SunX509
kafkastore.connection.url = localhost:2181
debug = false
listeners = [PLAINTEXT://localhost:9091, http://0.0.0.0:8081]
ssl.provider =
ssl.enabled.protocols = []
shutdown.graceful.ms = 1000
ssl.keystore.location =
ssl.cipher.suites = []
kafkastore.ssl.endpoint.identification.algorithm =
kafkastore.ssl.cipher.suites =
access.control.allow.methods =
kafkastore.sasl.kerberos.min.time.before.relogin = 60000
ssl.keymanager.algorithm =
metrics.sample.window.ms = 30000
kafkastore.init.timeout.ms = 60000
(io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig:169)
[2017-06-05 10:30:26,961] WARN Found a listener with an unsupported scheme (supported: [http, https]). Ignoring listener 'PLAINTEXT://localhost:9091' (io.confluent.rest.Application:356)
[2017-06-05 10:30:26,971] WARN Found a listener with an unsupported scheme (supported: [http, https]). Ignoring listener 'PLAINTEXT://localhost:9091' (io.confluent.rest.Application:356)
[2017-06-05 10:30:27,327] ERROR Server died unexpectedly: (io.confluent.kafka.schemaregistry.rest.SchemaRegistryMain:51)
kafka.common.KafkaException: Failed to parse the broker info from zookeeper: {"jmx_port":-1,"timestamp":"1496646940963","endpoints":["PLAINTEXTSASL://sri-hadoop-nodes-z09n.c.ampool-141120.internal:6667"],"host":null,"version":3,"port":-1}
at kafka.cluster.Broker$.createBroker(Broker.scala:125)
at kafka.utils.ZkUtils.getBrokerInfo(ZkUtils.scala:799)
at kafka.utils.ZkUtils$$anonfun$getAllBrokersInCluster$2.apply(ZkUtils.scala:253)
at kafka.utils.ZkUtils$$anonfun$getAllBrokersInCluster$2.apply(ZkUtils.scala:253)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at kafka.utils.ZkUtils.getAllBrokersInCluster(ZkUtils.scala:253)
at io.confluent.kafka.schemaregistry.storage.KafkaStore.<init>(KafkaStore.java:121)
at io.confluent.kafka.schemaregistry.storage.KafkaSchemaRegistry.<init>(KafkaSchemaRegistry.java:144)
at io.confluent.kafka.schemaregistry.rest.SchemaRegistryRestApplication.setupResources(SchemaRegistryRestApplication.java:53)
at io.confluent.kafka.schemaregistry.rest.SchemaRegistryRestApplication.setupResources(SchemaRegistryRestApplication.java:37)
at io.confluent.rest.Application.createServer(Application.java:149)
at io.confluent.kafka.schemaregistry.rest.SchemaRegistryMain.main(SchemaRegistryMain.java:43)
Caused by: java.lang.IllegalArgumentException: No security protocol defined for listener PLAINTEXTSASL
at kafka.cluster.EndPoint$$anonfun$securityProtocol$1$1.apply(EndPoint.scala:48)
at kafka.cluster.EndPoint$$anonfun$securityProtocol$1$1.apply(EndPoint.scala:48)
at scala.collection.MapLike$class.getOrElse(MapLike.scala:128)
at scala.collection.AbstractMap.getOrElse(Map.scala:59)
at kafka.cluster.EndPoint$.securityProtocol$1(EndPoint.scala:47)
at kafka.cluster.EndPoint$.createEndPoint(EndPoint.scala:56)
at kafka.cluster.Broker$$anonfun$2.apply(Broker.scala:115)
at kafka.cluster.Broker$$anonfun$2.apply(Broker.scala:115)
at scala.collection.immutable.List.map(List.scala:273)
at kafka.cluster.Broker$.createBroker(Broker.scala:115)
... 16 more
--
You received this message because you are subscribed to the Google Groups "Confluent Platform" group.
To unsubscribe from this group and stop receiving emails from it, send an email to confluent-platform+unsub...@googlegroups.com.
To post to this group, send email to confluent-platform@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/confluent-platform/CANZ-JHERYZJon%2BsYV%2B10UpJ%2Be%3DZjKkntKiAHReP19b%2BFaXgU1Q%40mail.gmail.com.
For more options, visit https://groups.google.com/d/optout.
$ ./bin/schema-registry-start etc/schema-registry/schema-registry.properties
[2017-06-06 11:55:05,304] INFO SchemaRegistryConfig values:
metric.reporters = []
kafkastore.sasl.kerberos.kinit.cmd = /usr/bin/kinit
response.mediatype.default = application/vnd.schemaregistry.v1+json
kafkastore.ssl.trustmanager.algorithm = PKIX
authentication.realm =
ssl.keystore.type = JKS
kafkastore.topic = _schemas
metrics.jmx.prefix = kafka.schema.registry
kafkastore.ssl.enabled.protocols = TLSv1.2,TLSv1.1,TLSv1
kafkastore.topic.replication.factor = 3
ssl.truststore.password =
kafkastore.timeout.ms = 500
host.name = sri-hadoop-nodes-231v.c.ampool-141120.internal
kafkastore.bootstrap.servers = [SASL_PLAINTEXT://localhost:6667]
schema.registry.zk.namespace = schema_registry
kafkastore.sasl.kerberos.ticket.renew.window.factor = 0.8
kafkastore.sasl.kerberos.service.name = kafka
ssl.endpoint.identification.algorithm =
compression.enable = false
kafkastore.ssl.truststore.type = JKS
avro.compatibility.level = backward
kafkastore.ssl.protocol = TLS
kafkastore.ssl.provider =
kafkastore.ssl.truststore.location =
response.mediatype.preferred = [application/vnd.schemaregistry.v1+json, application/vnd.schemaregistry+json, application/json]
kafkastore.ssl.keystore.type = JKS
ssl.truststore.type = JKS
kafkastore.ssl.truststore.password =
access.control.allow.origin =
ssl.truststore.location =
ssl.keystore.password =
port = 8081
kafkastore.ssl.keystore.location =
master.eligibility = true
ssl.client.auth = false
kafkastore.ssl.keystore.password =
kafkastore.security.protocol = SASL_PLAINTEXT
ssl.trustmanager.algorithm =
authentication.method = NONE
request.logger.name = io.confluent.rest-utils.requests
ssl.key.password =
kafkastore.zk.session.timeout.ms = 30000
kafkastore.sasl.mechanism = GSSAPI
kafkastore.sasl.kerberos.ticket.renew.jitter = 0.05
kafkastore.ssl.key.password =
zookeeper.set.acl = false
authentication.roles = [*]
metrics.num.samples = 2
ssl.protocol = TLS
kafkastore.ssl.keymanager.algorithm = SunX509
kafkastore.connection.url = localhost:2181
debug = true
listeners = [http://0.0.0.0:8081]
ssl.provider =
ssl.enabled.protocols = []
shutdown.graceful.ms = 1000
ssl.keystore.location =
ssl.cipher.suites = []
kafkastore.ssl.endpoint.identification.algorithm =
kafkastore.ssl.cipher.suites =
access.control.allow.methods =
kafkastore.sasl.kerberos.min.time.before.relogin = 60000
ssl.keymanager.algorithm =
metrics.sample.window.ms = 30000
kafkastore.init.timeout.ms = 60000
(io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig:169)
[2017-06-06 11:55:06,052] ERROR Server died unexpectedly: (io.confluent.kafka.schemaregistry.rest.SchemaRegistryMain:51)
kafka.common.KafkaException: Failed to parse the broker info from zookeeper: {"jmx_port":-1,"timestamp":"1496747803335","endpoints":["PLAINTEXTSASL://sri-hadoop-nodes-z09n.c.ampool-141120.internal:6667"],"host":null,"version":3,"port":-1}
kafkastore.bootstrap.servers=SASL_PLAINTEXT://localhost:6667
listeners=http://0.0.0.0:8081
kafkastore.connection.url=localhost:2181
kafkastore.topic=
_schemas
debug=true
kafkastore.bootstrap.servers=SASL_PLAINTEXT://localhost:6667
kafkastore.security.protocol=SASL_PLAINTEXT
kafkastore.sasl.mechanism=GSSAPI
kafkastore.sasl.kerberos.service.name=kafka
KAFKA_OPTS="-Djava.security.auth.login.config=/usr/hdp/current/kafka-broker/config/kafka_client_jaas.conf"
// ZooKeeper client authentication
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/etc/security/keytabs/kafka.service.keytab"
serviceName="kafka"
principal="kafka/xxx-hadoop-nodes-231v...@TEST.XXX.IO";
};
// Kafka client authorization, uses credential cache
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/etc/security/keytabs/kafka.service.keytab"
principal="kafka/xxx-hadoop-nodes-231v...@TEST.XXX.IO"
serviceName="kafka";
};
You are missing the following:kafkastore.security.protocol=SASL_PLAINTEXTkafkastore.sasl.mechanism=GSSAPIAlso you will need to create a jaas conf file for the SR similar to thisÂ
// ZooKeeper client authenticationClient {    com.sun.security.auth.module.Krb5LoginModule required    useKeyTab=true    storeKey=true    useTicketCache=false    keyTab="/home/centos/cp-sec/keytabs/kafka.keytab"
    principal="kafka/kafka1.cops.i...@COPS.IO";
};// Kafka client authorization, uses credential cacheKafkaClient {    com.sun.security.auth.module.Krb5LoginModule required    useKeyTab=true    storeKey=true    useTicketCache=false    keyTab="/home/centos/cp-sec/keytabs/kafka.keytab"
To post to this group, send email to confluent...@googlegroups.com.
2. /usr/hdp/current/kafka-broker/config/kafka_client_jaas.conf
principal="kafka/xxx-hadoop-nodes-231v.c.xxx-141120.inte...@TEST.XXX.IO";
}; // Kafka client authorization, uses credential cache KafkaClient { com.sun.security.auth.module.Krb5LoginModule required useKeyTab=true storeKey=true useTicketCache=false keyTab="/etc/security/keytabs/kafka.service.keytab"
principal="kafka/xxx-hadoop-nodes-231v.c.xxx-141120.inte...@TEST.XXX.IO" serviceName="kafka"; };