I have a situation where we want to process data from 3 different queues, and the processing can be done by the same code (container).
The AWS documentation says I can define multiple containers in a single task definition but I am having problems setting up this scenario
My tests (see code snippets below) setup returns the following error (where instance_prefix='mycode') when I deploy:
* aws_ecs_service.ecs_service: InvalidParameterException: The container mycode does not exist in the task definition.
Which makes sense since the containers are named mycode0, mycode1 and mycode2.
Question1: any thoughts on how to fix this so that the deploy works?
Question2: would it be better to define three separate tasks instead? one for mycode0, one for mycode1, etc
Any help appreciated.
resource "aws_ecs_task_definition" "mytask" {
family = "${var.instance_prefix}"
container_definitions = "${template_file.task_def.rendered}"
}
module "cluster" {
source = "../modules/ecs-cluster"
cluster_name = "${var.instance_prefix}"
environment = "${var.environment}"
aws_ami = "${var.aws_ami}"
key_name = "${var.key_name}"
aws_security_group = "${var.aws_security_group}"
aws_vpc_subnets = "${var.aws_vpc_subnets}"
instance_type = "${var.instance_type}"
asg_max_size = "${var.asg_max_size}"
asg_min_size = "${var.asg_min_size}"
asg_desired_capacity = "${var.asg_desired_capacity}"
}
module "service" {
source = "../modules/ecs-service"
service_name = "${var.instance_prefix}"
task_definition = "${aws_ecs_task_definition.collector.arn}"
task_count = "${var.instance_count}"
container_name = "${var.container_name}"
container_port = "${var.container_port}"
}
# part of the module that defines the ecs_service:
resource "aws_ecs_service" "ecs_service" {
name = "${var.service_name}"
cluster = "${var.cluster_name}"
task_definition = "${var.task_definition}"
desired_count = "${var.task_count}"
iam_role = "${aws_iam_role.ecs_service.arn}"
load_balancer {
elb_name = "${var.elb_id}"
container_name = "${var.container_name}"
container_port = "${var.container_port}"
}
# Workaround for "timeout while waiting for state to become 'INACTIVE'" error frequently seen during destroy.
# role policy is destroyed before the service.
depends_on = ["aws_iam_role_policy.ecs_service"]
}
# task definition template file snippet:
[
{
"cpu": 50,
"command": [
"mycode.py"
],
"environment": [
{
"name": "MYVAR",
"value": "0"
}
],
"essential": true,
"memory": 500,
"name": "mycode0"
},
{
"cpu": 50,
"command": [
"mycode.py"
],
"environment": [
{
"name": "MYVAR",
"value": "1"
}
],
"essential": true,
"memory": 500,
"name": "mycode1"
},
{
"cpu": 50,
"command": [
"mycode.py"
],
"environment": [
{
"name": "MYVAR",
"value": "2"
}
],
"essential": true,
"memory": 500,
"name": "mycode2"
}
]