aws:
# Credentials can be hardcoded or set in environment variables
access_key_id: XXXXXXXXXX
secret_access_key: XXXXXXXXXX
s3:
region: us-east-1
buckets:
assets: s3://snowplow-hosted-assets # DO NOT CHANGE unless you are hosting the jarfiles etc yourself in your own bucket
jsonpath_assets: s3://snowplow-assets/custom-events-jsonpaths/ # If you have defined your own JSON Schemas, add the s3:// path to your own JSON Path files in your own bucket here
log: s3://snowplow-emr-logs/
raw:
in: # Multiple in buckets are permitted
- s3://elasticbeanstalk-us-east-1-238537778401/resources/environments/logs/publish/ # e.g. s3://my-archive-bucket/raw
processing: s3://snowplow-etl-emr-runner/processing
archive: s3://snowplow-etl-emr-runner/archive # e.g. s3://my-archive-bucket/raw
enriched:
good: s3://snowplow-etl-emr-runner/enriched/good # e.g. s3://my-out-bucket/enriched/good
bad: s3://snowplow-etl-emr-runner/enriched/bad # e.g. s3://my-out-bucket/enriched/bad
errors: # Leave blank unless :continue_on_unexpected_error: set to true below
archive: s3://snowplow-etl-emr-runner/enriched/archive # Where to archive enriched events to, e.g. s3://my-out-bucket/enriched/archive
shredded:
good: s3://snowplow-etl-emr-runner/shredded/good # e.g. s3://my-out-bucket/shredded/good
bad: s3://snowplow-etl-emr-runner/shredded/bad # e.g. s3://my-out-bucket/shredded/bad
errors: # Leave blank unless :continue_on_unexpected_error: set to true below
archive: s3://snowplow-etl-emr-runner/shredded/archive # Not required for Postgres currently
emr:
ami_version: 4.3.0 # Was 3.7.0 # Don't change this
region: us-east-1 # Always set this
jobflow_role: EMR_EC2_DefaultRole # Created using $ aws emr create-default-roles
service_role: EMR_DefaultRole # Created using $ aws emr create-default-roles
placement: # Set this if not running in VPC. Leave blank otherwise
ec2_subnet_id: # Set this if running in VPC. Leave blank otherwise
ec2_key_name: xxxx
bootstrap: [] # Set this to specify custom boostrap actions. Leave empty otherwise
software:
hbase: # To launch on cluster, provide version, "0.92.0", keep quotes
lingual: # To launch on cluster, provide version, "1.1", keep quotes
# Adjust your Hadoop cluster below
jobflow:
master_instance_type: m1.medium
core_instance_count: 2
core_instance_type: m1.medium
task_instance_count: 0 # Increase to use spot instances
task_instance_type: m1.medium
task_instance_bid: 0.015 # In USD. Adjust bid, or leave blank for non-spot-priced (i.e. on-demand) task instances
bootstrap_failure_tries: 3 # Number of times to attempt the job in the event of bootstrap failures
collectors:
format: clj-tomcat # Or 'clj-tomcat' for the Clojure Collector, or 'thrift' for Thrift records, or 'tsv/com.amazon.aws.cloudfront/wd_access_log' for Cloudfront access logs
enrich:
job_name: Snowplow ETL # Give your job a name
versions:
hadoop_enrich: 1.6.0 # Was 1.5.1 # Version of the Hadoop Enrichment process
hadoop_shred: 0.8.0 # Was 0.7.0 # Version of the Hadoop Shredding process
hadoop_elasticsearch: 0.1.0 # Version of the Hadoop to Elasticsearch copying process
continue_on_unexpected_error: false # Set to 'true' (and set :out_errors: above) if you don't want any exceptions thrown from ETL
output_compression: NONE # Compression only supported with Redshift, set to NONE if you have Postgres targets. Allowed formats: NONE, GZIP
storage:
download:
folder: # Postgres-only config option. Where to store the downloaded files. Leave blank for Redshift
targets:
- name: " Redshift database"
type: redshift
database: database # Name of database
port: 5439 # Default Redshift port
ssl_mode: disable # One of disable (default), require, verify-ca or verify-full
table: atomic.events
username: snowplow
password: xxxxxxxxxx
maxerror: 1 # Stop loading on first error, or increase to permit more load errors
comprows: 200000 # Default for a 1 XL node cluster. Not used unless --include compupdate specified
- name: "ELK Elasticsearch cluster" # Name for the target - used to label the corresponding jobflow step
type: elasticsearch # Marks the database type as Elasticsearch
host: "xxxxxxxxxx" # Elasticsearch host
database: snowplow # The Elasticsearch index
port: 9200 # Port used to connect to Elasticsearch
table: bad_rows # The Elasticsearch type
es_nodes_wan_only: false # Set to true if using Amazon Elasticsearch Service
username: # Not required for Elasticsearch
password: # Not required for Elasticsearch
sources: # Leave blank or specify: ["s3://out/enriched/bad/run=xxx", "s3://out/shred/bad/run=yyy"]
maxerror: # Not required for Elasticsearch
comprows: # Not required for Elasticsearch
monitoring:
tags: {} # Name-value pairs describing this job
logging:
level: DEBUG # You can optionally switch to INFO for production