Skip to content

Commit

Permalink
Unify aws_conn_id type to always be str | None (#37768)
Browse files Browse the repository at this point in the history
  • Loading branch information
eejbyfeldt committed Feb 28, 2024
1 parent fb65112 commit 2bc1036
Show file tree
Hide file tree
Showing 58 changed files with 305 additions and 171 deletions.
2 changes: 1 addition & 1 deletion airflow/providers/amazon/aws/hooks/redshift_sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class RedshiftSQLHook(DbApiHook):
hook_name = "Amazon Redshift"
supports_autocommit = True

def __init__(self, *args, aws_conn_id: str = "aws_default", **kwargs) -> None:
def __init__(self, *args, aws_conn_id: str | None = "aws_default", **kwargs) -> None:
super().__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id

Expand Down
12 changes: 10 additions & 2 deletions airflow/providers/amazon/aws/operators/appflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,11 @@ class AppflowRunOperator(AppflowBaseOperator):
:param source: Obsolete, unnecessary for this operator
:param flow_name: The flow name
:param poll_interval: how often in seconds to check the query status
:param aws_conn_id: aws connection to use
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: aws region to use
:param wait_for_completion: whether to wait for the run to end to return
"""
Expand Down Expand Up @@ -222,7 +226,11 @@ class AppflowRunBeforeOperator(AppflowBaseOperator):
:param source_field: The field name to apply filters
:param filter_date: The date value (or template) to be used in filters.
:param poll_interval: how often in seconds to check the query status
:param aws_conn_id: aws connection to use
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: aws region to use
:param wait_for_completion: whether to wait for the run to end to return
"""
Expand Down
2 changes: 1 addition & 1 deletion airflow/providers/amazon/aws/operators/cloud_formation.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def __init__(
*,
stack_name: str,
cloudformation_parameters: dict | None = None,
aws_conn_id: str = "aws_default",
aws_conn_id: str | None = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
Expand Down
4 changes: 2 additions & 2 deletions airflow/providers/amazon/aws/operators/dms.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def __init__(
table_mappings: dict,
migration_type: str = "full-load",
create_task_kwargs: dict | None = None,
aws_conn_id: str = "aws_default",
aws_conn_id: str | None = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
Expand Down Expand Up @@ -226,7 +226,7 @@ def __init__(
replication_task_arn: str,
start_replication_task_type: str = "start-replication",
start_task_kwargs: dict | None = None,
aws_conn_id: str = "aws_default",
aws_conn_id: str | None = "aws_default",
**kwargs,
):
super().__init__(**kwargs)
Expand Down
48 changes: 36 additions & 12 deletions airflow/providers/amazon/aws/operators/ec2.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,11 @@ class EC2StartInstanceOperator(BaseOperator):
:ref:`howto/operator:EC2StartInstanceOperator`
:param instance_id: id of the AWS EC2 instance
:param aws_conn_id: aws connection to use
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: (optional) aws region name associated with the client
:param check_interval: time in seconds that the job should wait in
between each instance state checks until operation is completed
Expand All @@ -50,7 +54,7 @@ def __init__(
self,
*,
instance_id: str,
aws_conn_id: str = "aws_default",
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
check_interval: float = 15,
**kwargs,
Expand Down Expand Up @@ -82,7 +86,11 @@ class EC2StopInstanceOperator(BaseOperator):
:ref:`howto/operator:EC2StopInstanceOperator`
:param instance_id: id of the AWS EC2 instance
:param aws_conn_id: aws connection to use
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: (optional) aws region name associated with the client
:param check_interval: time in seconds that the job should wait in
between each instance state checks until operation is completed
Expand All @@ -96,7 +104,7 @@ def __init__(
self,
*,
instance_id: str,
aws_conn_id: str = "aws_default",
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
check_interval: float = 15,
**kwargs,
Expand Down Expand Up @@ -130,7 +138,11 @@ class EC2CreateInstanceOperator(BaseOperator):
:param image_id: ID of the AMI used to create the instance.
:param max_count: Maximum number of instances to launch. Defaults to 1.
:param min_count: Minimum number of instances to launch. Defaults to 1.
:param aws_conn_id: AWS connection to use
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region name associated with the client.
:param poll_interval: Number of seconds to wait before attempting to
check state of instance. Only used if wait_for_completion is True. Default is 20.
Expand All @@ -156,7 +168,7 @@ def __init__(
image_id: str,
max_count: int = 1,
min_count: int = 1,
aws_conn_id: str = "aws_default",
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
poll_interval: int = 20,
max_attempts: int = 20,
Expand Down Expand Up @@ -223,7 +235,11 @@ class EC2TerminateInstanceOperator(BaseOperator):
:ref:`howto/operator:EC2TerminateInstanceOperator`
:param instance_id: ID of the instance to be terminated.
:param aws_conn_id: AWS connection to use
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region name associated with the client.
:param poll_interval: Number of seconds to wait before attempting to
check state of instance. Only used if wait_for_completion is True. Default is 20.
Expand All @@ -238,7 +254,7 @@ class EC2TerminateInstanceOperator(BaseOperator):
def __init__(
self,
instance_ids: str | list[str],
aws_conn_id: str = "aws_default",
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
poll_interval: int = 20,
max_attempts: int = 20,
Expand Down Expand Up @@ -280,7 +296,11 @@ class EC2RebootInstanceOperator(BaseOperator):
:ref:`howto/operator:EC2RebootInstanceOperator`
:param instance_ids: ID of the instance(s) to be rebooted.
:param aws_conn_id: AWS connection to use
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region name associated with the client.
:param poll_interval: Number of seconds to wait before attempting to
check state of instance. Only used if wait_for_completion is True. Default is 20.
Expand All @@ -298,7 +318,7 @@ def __init__(
self,
*,
instance_ids: str | list[str],
aws_conn_id: str = "aws_default",
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
poll_interval: int = 20,
max_attempts: int = 20,
Expand Down Expand Up @@ -339,7 +359,11 @@ class EC2HibernateInstanceOperator(BaseOperator):
:ref:`howto/operator:EC2HibernateInstanceOperator`
:param instance_ids: ID of the instance(s) to be hibernated.
:param aws_conn_id: AWS connection to use
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region name associated with the client.
:param poll_interval: Number of seconds to wait before attempting to
check state of instance. Only used if wait_for_completion is True. Default is 20.
Expand All @@ -357,7 +381,7 @@ def __init__(
self,
*,
instance_ids: str | list[str],
aws_conn_id: str = "aws_default",
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
poll_interval: int = 20,
max_attempts: int = 20,
Expand Down
16 changes: 8 additions & 8 deletions airflow/providers/amazon/aws/operators/eks.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@
def _create_compute(
compute: str | None,
cluster_name: str,
aws_conn_id: str,
aws_conn_id: str | None,
region: str | None,
waiter_delay: int,
waiter_max_attempts: int,
Expand Down Expand Up @@ -231,7 +231,7 @@ def __init__(
fargate_selectors: list | None = None,
create_fargate_profile_kwargs: dict | None = None,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
aws_conn_id: str | None = DEFAULT_CONN_ID,
region: str | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
waiter_delay: int = 30,
Expand Down Expand Up @@ -483,7 +483,7 @@ def __init__(
nodegroup_name: str = DEFAULT_NODEGROUP_NAME,
create_nodegroup_kwargs: dict | None = None,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
aws_conn_id: str | None = DEFAULT_CONN_ID,
region: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 80,
Expand Down Expand Up @@ -608,7 +608,7 @@ def __init__(
fargate_profile_name: str = DEFAULT_FARGATE_PROFILE_NAME,
create_fargate_profile_kwargs: dict | None = None,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
aws_conn_id: str | None = DEFAULT_CONN_ID,
region: str | None = None,
waiter_delay: int = 10,
waiter_max_attempts: int = 60,
Expand Down Expand Up @@ -712,7 +712,7 @@ def __init__(
cluster_name: str,
force_delete_compute: bool = False,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
aws_conn_id: str | None = DEFAULT_CONN_ID,
region: str | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
waiter_delay: int = 30,
Expand Down Expand Up @@ -843,7 +843,7 @@ def __init__(
cluster_name: str,
nodegroup_name: str,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
aws_conn_id: str | None = DEFAULT_CONN_ID,
region: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 40,
Expand Down Expand Up @@ -933,7 +933,7 @@ def __init__(
cluster_name: str,
fargate_profile_name: str,
wait_for_completion: bool = False,
aws_conn_id: str = DEFAULT_CONN_ID,
aws_conn_id: str | None = DEFAULT_CONN_ID,
region: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
Expand Down Expand Up @@ -1045,7 +1045,7 @@ def __init__(
pod_context: str | None = None,
pod_name: str | None = None,
pod_username: str | None = None,
aws_conn_id: str = DEFAULT_CONN_ID,
aws_conn_id: str | None = DEFAULT_CONN_ID,
region: str | None = None,
on_finish_action: str | None = None,
is_delete_operator_pod: bool | None = None,
Expand Down
Loading

0 comments on commit 2bc1036

Please sign in to comment.
  翻译: