Using AWS CDK (2.66.0), when trying to add the property as log_group=self.lambda_log_group to the Lambda resource ec2_launch_fn = DockerImageFunction(), I get error:
TypeError: __init__() got an unexpected keyword argument 'log_group'.
I'm using AWS CDK version 2.66.0; also tried 2.133.0 with no luck.
Basically, I am trying to change a functional AWS Lambda to send its logs to a custom CloudWatch (CW) log group instead of sending its logs to an automatically created CW log group.
The Lambda resource ec2_launch_fn = DockerImageFunction() had a property log_retention=logs.RetentionDays.ONE_MONTH only, and it was sending its logs to an automatically created CW log group.
To change that, I created a CW log group resource self.lambda_log_group = logs.LogGroup() inside def __init__() and added the property log_group=self.lambda_log_group to the Lambda resource ec2_launch_fn. I also removed log_retention=logs.RetentionDays.ONE_MONTH from the Lambda resource.
The log group is created correctly, but the resource log_group=self.lambda_log_group is giving error as mentioned above. According to this API Reference/aws_cdk.aws_lambda/DockerImageFunction, log_group is a valid property of classaws_cdk.aws_lambda.DockerImageFunction() in AWS CDK Python.
If I do not add this property log_group=self.lambda_log_group to the Lambda resource ec2_launch_fn, I get NO error, but the Lambda continues to send it's logs to the automatically created log group.
How can I fix this error and make the Lambda to send the custom CW log group?
AWS CDK code (removed some irreverent lines):
import os
from aws_cdk import (
Duration,
RemovalPolicy,
Stack,
aws_events as events,
aws_events_targets as targets,
aws_iam as iam,
aws_ec2 as ec2,
aws_ecr_assets as ecra,
aws_logs as logs
)
from aws_cdk.aws_secretsmanager import Secret
from aws_cdk.aws_lambda import DockerImageCode, DockerImageFunction
from constructs import Construct
class AutoScalingLaunchLambdaCdkStack(Stack):
# CDK stack to create lambda functions to process the ec2 launch hook event from auto-scaling groups
def __init__(self, scope: Construct, construct_id: str, res_name: str, instance: str, config: list, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
self._res_name = res_name
self._instance = instance
self.lambda_log_group = logs.LogGroup(
self, f'{self._res_name}-EC2LifeCycleLaunchLogGroup-{self._instance}',
log_group_name=f'/aws/lambda/EC2LifeCycleLaunch-{self._instance}-log',
retention=logs.RetentionDays.ONE_MONTH,
removal_policy=RemovalPolicy.DESTROY # log group will be deleted from CW when the resource is removed from the code or the CFN stack is deleted
)
ec2_launch_fn: DockerImageFunction = self.build_auto_scaling_ec2_launch_lambda(config)
self.set_role(ec2_launch_fn)
def build_auto_scaling_ec2_launch_lambda(self, config: list) -> DockerImageFunction:
stack_path = os.path.dirname(os.path.realpath(__file__))
lambda_path = os.path.join(stack_path, "..", "lambdas", "auto_scaling_ec2_launch")
auto_scaling_vpc = ec2.Vpc.from_lookup(
self, "VPC", vpc_id=config["vpc_id"]
)
security_group = ec2.SecurityGroup(
self,
"{}-SecurityGroup-{}".format(self._res_name, self._instance),
description=f"SecurityGroup for {self._res_name} - {self._instance}",
vpc=auto_scaling_vpc,
allow_all_outbound=True,
)
ec2_launch_fn = DockerImageFunction(
self,
"{}-EC2LifeCycleLaunch-{}".format(self._res_name, self._instance),
function_name=f'EC2LifeCycleLaunch-{self._instance}',
code=DockerImageCode.from_image_asset(
directory=lambda_path,
network_mode=ecra.NetworkMode.HOST
),
description="Lambda to update Oracle database when an auto scaling group launches an EC2",
environment={
"REGION": self.region,
"SECRET_NAME": f"{self._instance}/db/gbcust",
"FO_ENVIRONMENT": self._instance,
},
vpc=auto_scaling_vpc,
vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
security_groups=[security_group],
log_group=self.lambda_log_group,
memory_size=128,
timeout=Duration.seconds(30),
retry_attempts=2
)
return ec2_launch_fn
def set_role(self, ec2_launch_fn: DockerImageFunction):
ec2_launch_fn.role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonVPCFullAccess"))
ec2_launch_fn.add_to_role_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["autoscaling:CompleteLifecycleAction"],
resources=[
f"arn:aws:autoscaling:{self.region}:{self.account}:autoScalingGroup:*:autoScalingGroupName/*"]
)
)
ec2_launch_fn.add_to_role_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["autoscaling:DescribeTags"],
resources=["*"]
)
)
ec2_launch_fn.add_to_role_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["autoscaling:DescribeAutoScalingInstances"],
resources=["*"]
)
)
ec2_launch_fn.add_to_role_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["logs:CreateLogStream", "logs:PutLogEvents"],
resources=[f"arn:aws:logs:{self.region}:{self.account}:log-group:{self.lambda_log_group.log_group_name}:*"]
)
)
Error from the pipeline:
17:51:35 + cdk deploy --force LifeCycleLaunchStack-dev --require-approval=never -c environment=dev -c service=fo-lifecycle
17:51:42 Traceback (most recent call last):
17:51:42 File "/home/jenkins/agent/workspace/_poc_environment_feature_ABC-888/app.py", line 53, in <module>
17:51:42 AutoScalingLaunchLambdaCdkStack(
17:51:42 File "/usr/local/lib/python3.9/dist-packages/jsii/_runtime.py", line 111, in __call__
17:51:42 inst = super().__call__(*args, **kwargs)
17:51:42 File "/home/jenkins/agent/workspace/_poc_environment_feature_ABC-888/build/stacks/lifecycle_launch_stack.py", line 38, in __init__
17:51:42 ec2_launch_fn: DockerImageFunction = self.build_auto_scaling_ec2_launch_lambda(config)
17:51:42 File "/home/jenkins/agent/workspace/_poc_environment_feature_ABC-888/build/stacks/lifecycle_launch_stack.py", line 62, in build_auto_scaling_ec2_launch_lambda
17:51:42 ec2_launch_fn = DockerImageFunction(
17:51:42 File "/usr/local/lib/python3.9/dist-packages/jsii/_runtime.py", line 111, in __call__
17:51:42 inst = super().__call__(*args, **kwargs)
17:51:42 TypeError: __init__() got an unexpected keyword argument 'log_group'