repo
stringclasses 32
values | instance_id
stringlengths 13
37
| base_commit
stringlengths 40
40
| patch
stringlengths 1
1.89M
| test_patch
stringclasses 1
value | problem_statement
stringlengths 304
69k
| hints_text
stringlengths 0
246k
| created_at
stringlengths 20
20
| version
stringclasses 1
value | FAIL_TO_PASS
stringclasses 1
value | PASS_TO_PASS
stringclasses 1
value | environment_setup_commit
stringclasses 1
value | traceback
stringlengths 64
23.4k
| __index_level_0__
int64 29
19k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Qiskit/qiskit | Qiskit__qiskit-6102 | 0c8bb3dbf8d688590431ca79a83ba8aede84ed20 | diff --git a/qiskit/opflow/operator_base.py b/qiskit/opflow/operator_base.py
--- a/qiskit/opflow/operator_base.py
+++ b/qiskit/opflow/operator_base.py
@@ -14,6 +14,7 @@
import itertools
from abc import ABC, abstractmethod
+from copy import deepcopy
from typing import Dict, List, Optional, Set, Tuple, Union, cast
import numpy as np
@@ -557,6 +558,10 @@ def _expand_shorter_operator_and_permute(self, other: 'OperatorBase',
new_self = self._expand_dim(other.num_qubits - self.num_qubits)
return new_self, other
+ def copy(self) -> "OperatorBase":
+ """Return a deep copy of the Operator."""
+ return deepcopy(self)
+
# Composition
def __matmul__(self, other: 'OperatorBase') -> 'OperatorBase':
diff --git a/qiskit/opflow/primitive_ops/primitive_op.py b/qiskit/opflow/primitive_ops/primitive_op.py
--- a/qiskit/opflow/primitive_ops/primitive_op.py
+++ b/qiskit/opflow/primitive_ops/primitive_op.py
@@ -241,9 +241,12 @@ def to_matrix(self, massive: bool = False) -> np.ndarray:
def to_matrix_op(self, massive: bool = False) -> OperatorBase:
""" Returns a ``MatrixOp`` equivalent to this Operator. """
- prim_mat = self.__class__(self.primitive).to_matrix(massive=massive)
+ coeff = self.coeff
+ op = self.copy()
+ op._coeff = 1
+ prim_mat = op.to_matrix(massive=massive)
from .matrix_op import MatrixOp
- return MatrixOp(prim_mat, coeff=self.coeff)
+ return MatrixOp(prim_mat, coeff=coeff)
def to_instruction(self) -> Instruction:
""" Returns an ``Instruction`` equivalent to this Operator. """
| `TaperedPauliSumOp` is incompatible with `VQE`
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: `master` @ 0c8bb3dbf8d688590431ca79a83ba8aede84ed20
- **Python version**: 3.8.8
- **Operating system**: Linux (Fedora 33): `uname -r: 5.10.23-200.fc33.x86_64`
### What is the current behavior?
Using a tapered `PauliSumOp` as the input to a `VQE` results in the following runtime error:
```python-traceback
Traceback (most recent call last):
File "terra-bug-minimal.py", line 31, in <module>
result = vqe.compute_minimum_eigenvalue(tapered_qubit_op)
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/algorithms/minimum_eigen_solvers/vqe.py", line 412, in compute_minimum_eigenvalue
self._expect_op = self.construct_expectation(self._var_form_params, operator)
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/algorithms/minimum_eigen_solvers/vqe.py", line 300, in construct_expectation
observable_meas = self.expectation.convert(StateFn(operator, is_measurement=True))
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/opflow/expectations/matrix_expectation.py", line 38, in convert
return operator.to_matrix_op()
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/opflow/state_fns/operator_state_fn.py", line 119, in to_matrix_op
return OperatorStateFn(self.primitive.to_matrix_op(massive=massive) * self.coeff,
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/opflow/primitive_ops/primitive_op.py", line 244, in to_matrix_op
prim_mat = self.__class__(self.primitive).to_matrix(massive=massive)
TypeError: __init__() missing 1 required positional argument: 'z2_symmetries'
```
### Steps to reproduce the problem
Below is a minimal code snippet to reproduce the issue:
```python
from qiskit import BasicAer
from qiskit.algorithms import VQE
from qiskit.circuit.library import TwoLocal
from qiskit.opflow import PauliSumOp, TwoQubitReduction
qubit_op = PauliSumOp.from_list([
("IIII", -0.8105479805373266),
("IIIZ", 0.17218393261915552),
("IIZZ", -0.22575349222402472),
("IZZI", 0.1721839326191556),
("ZZII", -0.22575349222402466),
("IIZI", 0.1209126326177663),
("IZZZ", 0.16892753870087912),
("IXZX", -0.045232799946057854),
("ZXIX", 0.045232799946057854),
("IXIX", 0.045232799946057854),
("ZXZX", -0.045232799946057854),
("ZZIZ", 0.16614543256382414),
("IZIZ", 0.16614543256382414),
("ZZZZ", 0.17464343068300453),
("ZIZI", 0.1209126326177663),
])
tapered_qubit_op = TwoQubitReduction(num_particles=2).convert(qubit_op)
backend = BasicAer.get_backend('statevector_simulator')
ryrz = TwoLocal(rotation_blocks=['ry', 'rz'], entanglement_blocks='cz')
vqe = VQE(ryrz, quantum_instance=backend)
result = vqe.compute_minimum_eigenvalue(tapered_qubit_op)
```
### What is the expected behavior?
I would expect that the `VQE` is able to compute the minimum eigenvalue just like for any other `opflow` operator type.
### Suggested solutions
A simple solution would be to make the `z2_symmetries` argument of `TaperedPauliSumOp.__init__` optional. However, this may lead to other unwanted side-effects.
Another possible solution would be to make the `TaperedPauliSumOp` look more like a `PauliSumOp` (at least in this context). But I am unsure how to achieve this given the `__init__` call which is raising the problem is obtained via `self.__class__`. And I don't think we should hack the behavior of this internal object.
| @ikkoham I am tagging you because (if I recall correctly) you mainly worked on the two-qubit-reduction and tapering code. | 2021-03-29T01:03:38Z | [] | [] |
Traceback (most recent call last):
File "terra-bug-minimal.py", line 31, in <module>
result = vqe.compute_minimum_eigenvalue(tapered_qubit_op)
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/algorithms/minimum_eigen_solvers/vqe.py", line 412, in compute_minimum_eigenvalue
self._expect_op = self.construct_expectation(self._var_form_params, operator)
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/algorithms/minimum_eigen_solvers/vqe.py", line 300, in construct_expectation
observable_meas = self.expectation.convert(StateFn(operator, is_measurement=True))
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/opflow/expectations/matrix_expectation.py", line 38, in convert
return operator.to_matrix_op()
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/opflow/state_fns/operator_state_fn.py", line 119, in to_matrix_op
return OperatorStateFn(self.primitive.to_matrix_op(massive=massive) * self.coeff,
File "/home/oss/Files/Qiskit/src/qiskit-terra/qiskit/opflow/primitive_ops/primitive_op.py", line 244, in to_matrix_op
prim_mat = self.__class__(self.primitive).to_matrix(massive=massive)
TypeError: __init__() missing 1 required positional argument: 'z2_symmetries'
| 1,736 |
|||
Qiskit/qiskit | Qiskit__qiskit-6213 | 2b7046f886e090bbbf22a989ff8130b6bd283d5c | diff --git a/qiskit/pulse/builder.py b/qiskit/pulse/builder.py
--- a/qiskit/pulse/builder.py
+++ b/qiskit/pulse/builder.py
@@ -740,6 +740,8 @@ def seconds_to_samples(seconds: Union[float, np.ndarray]) -> Union[int, np.ndarr
Returns:
The number of samples for the time to elapse
"""
+ if isinstance(seconds, np.ndarray):
+ return (seconds / active_backend().configuration().dt).astype(int)
return int(seconds / active_backend().configuration().dt)
diff --git a/qiskit/pulse/schedule.py b/qiskit/pulse/schedule.py
--- a/qiskit/pulse/schedule.py
+++ b/qiskit/pulse/schedule.py
@@ -1457,9 +1457,9 @@ def draw(self,
Args:
style: Stylesheet options. This can be dictionary or preset stylesheet classes. See
- :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IQXStandard`,
- :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IQXSimple`, and
- :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IQXDebugging` for details of
+ :py:class:`~qiskit.visualization.pulse_v2.stylesheets.IQXStandard`,
+ :py:class:`~qiskit.visualization.pulse_v2.stylesheets.IQXSimple`, and
+ :py:class:`~qiskit.visualization.pulse_v2.stylesheets.IQXDebugging` for details of
preset stylesheets.
backend (Optional[BaseBackend]): Backend object to play the input pulse program.
If provided, the plotter may use to make the visualization hardware aware.
| Pulse: seconds_to_samples() fails when passed an array
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: 0.16.1
- **Python version**: 3.7
- **Operating system**: Linux (under WSL)
### What is the current behavior?
Passing a numpy array to ``qiskit.pulse.seconds_to_samples()`` raises an error: ``TypeError: only size-1 arrays can be converted to Python scalars``.
### Steps to reproduce the problem
Run the following test script:
```python
import numpy as np
import qiskit.pulse as qp
import qiskit.test.mock.fake_openpulse_2q as be
with qp.build(be.FakeOpenPulse2Q()) as test:
times_s = np.array([1e-6, 1e-5])
time_dt = qp.seconds_to_samples(times_s)
```
Traceback:
```
$ python ./test_qp_seconds_to_samples.py
Traceback (most recent call last):
File "./test_qp_seconds_to_samples.py", line 7, in <module>
time_dt = qp.seconds_to_samples(times_s)
File "/nix/store/0wf80vpsyn5jmnarclzmdblwyaanhpw0-python3.7-qiskit-terra-0.16.1/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 665, in seconds_to_samples
return int(seconds / active_backend().configuration().dt)
TypeError: only size-1 arrays can be converted to Python scalars
```
### What is the expected behavior?
Based on the function signature, this should convert a list of numpy values from seconds to "dt". https://github.com/Qiskit/qiskit-terra/blob/ce15f8a5522ff2be865b6520659171288cbae43d/qiskit/pulse/builder.py#L731-L742
### Suggested solutions
Either change the function signature, or have conditional logic based on the type of the input argument ``seconds``: i.e. only apply ``int(result)`` if ``not isinstance(seconds, np.ndarray))``
| 2021-04-13T14:10:20Z | [] | [] |
Traceback (most recent call last):
File "./test_qp_seconds_to_samples.py", line 7, in <module>
time_dt = qp.seconds_to_samples(times_s)
File "/nix/store/0wf80vpsyn5jmnarclzmdblwyaanhpw0-python3.7-qiskit-terra-0.16.1/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 665, in seconds_to_samples
return int(seconds / active_backend().configuration().dt)
TypeError: only size-1 arrays can be converted to Python scalars
| 1,746 |
||||
Qiskit/qiskit | Qiskit__qiskit-6228 | cf1241cbe1b24ce25865dc95e7a425456dd5b4cf | diff --git a/qiskit/pulse/builder.py b/qiskit/pulse/builder.py
--- a/qiskit/pulse/builder.py
+++ b/qiskit/pulse/builder.py
@@ -740,6 +740,8 @@ def seconds_to_samples(seconds: Union[float, np.ndarray]) -> Union[int, np.ndarr
Returns:
The number of samples for the time to elapse
"""
+ if isinstance(seconds, np.ndarray):
+ return (seconds / active_backend().configuration().dt).astype(int)
return int(seconds / active_backend().configuration().dt)
diff --git a/qiskit/pulse/schedule.py b/qiskit/pulse/schedule.py
--- a/qiskit/pulse/schedule.py
+++ b/qiskit/pulse/schedule.py
@@ -1457,9 +1457,9 @@ def draw(self,
Args:
style: Stylesheet options. This can be dictionary or preset stylesheet classes. See
- :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IQXStandard`,
- :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IQXSimple`, and
- :py:class:~`qiskit.visualization.pulse_v2.stylesheets.IQXDebugging` for details of
+ :py:class:`~qiskit.visualization.pulse_v2.stylesheets.IQXStandard`,
+ :py:class:`~qiskit.visualization.pulse_v2.stylesheets.IQXSimple`, and
+ :py:class:`~qiskit.visualization.pulse_v2.stylesheets.IQXDebugging` for details of
preset stylesheets.
backend (Optional[BaseBackend]): Backend object to play the input pulse program.
If provided, the plotter may use to make the visualization hardware aware.
| Pulse: seconds_to_samples() fails when passed an array
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: 0.16.1
- **Python version**: 3.7
- **Operating system**: Linux (under WSL)
### What is the current behavior?
Passing a numpy array to ``qiskit.pulse.seconds_to_samples()`` raises an error: ``TypeError: only size-1 arrays can be converted to Python scalars``.
### Steps to reproduce the problem
Run the following test script:
```python
import numpy as np
import qiskit.pulse as qp
import qiskit.test.mock.fake_openpulse_2q as be
with qp.build(be.FakeOpenPulse2Q()) as test:
times_s = np.array([1e-6, 1e-5])
time_dt = qp.seconds_to_samples(times_s)
```
Traceback:
```
$ python ./test_qp_seconds_to_samples.py
Traceback (most recent call last):
File "./test_qp_seconds_to_samples.py", line 7, in <module>
time_dt = qp.seconds_to_samples(times_s)
File "/nix/store/0wf80vpsyn5jmnarclzmdblwyaanhpw0-python3.7-qiskit-terra-0.16.1/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 665, in seconds_to_samples
return int(seconds / active_backend().configuration().dt)
TypeError: only size-1 arrays can be converted to Python scalars
```
### What is the expected behavior?
Based on the function signature, this should convert a list of numpy values from seconds to "dt". https://github.com/Qiskit/qiskit-terra/blob/ce15f8a5522ff2be865b6520659171288cbae43d/qiskit/pulse/builder.py#L731-L742
### Suggested solutions
Either change the function signature, or have conditional logic based on the type of the input argument ``seconds``: i.e. only apply ``int(result)`` if ``not isinstance(seconds, np.ndarray))``
| 2021-04-14T15:32:30Z | [] | [] |
Traceback (most recent call last):
File "./test_qp_seconds_to_samples.py", line 7, in <module>
time_dt = qp.seconds_to_samples(times_s)
File "/nix/store/0wf80vpsyn5jmnarclzmdblwyaanhpw0-python3.7-qiskit-terra-0.16.1/lib/python3.7/site-packages/qiskit/pulse/builder.py", line 665, in seconds_to_samples
return int(seconds / active_backend().configuration().dt)
TypeError: only size-1 arrays can be converted to Python scalars
| 1,749 |
||||
Qiskit/qiskit | Qiskit__qiskit-6377 | f21d991d09a0ef2c47605df750687b67462fc1e6 | diff --git a/qiskit/result/__init__.py b/qiskit/result/__init__.py
--- a/qiskit/result/__init__.py
+++ b/qiskit/result/__init__.py
@@ -24,9 +24,22 @@
ResultError
Counts
marginal_counts
+
+Distributions
+=============
+
+.. autosummary::
+ :toctree: ../stubs/
+
+ ProbDistribution
+ QuasiDistribution
+
"""
from .result import Result
from .exceptions import ResultError
from .utils import marginal_counts
from .counts import Counts
+
+from .distributions.probability import ProbDistribution
+from .distributions.quasi import QuasiDistribution
diff --git a/qiskit/result/distributions/__init__.py b/qiskit/result/distributions/__init__.py
new file mode 100644
--- /dev/null
+++ b/qiskit/result/distributions/__init__.py
@@ -0,0 +1,15 @@
+# This code is part of Qiskit.
+#
+# (C) Copyright IBM 2021.
+#
+# This code is licensed under the Apache License, Version 2.0. You may
+# obtain a copy of this license in the LICENSE.txt file in the root directory
+# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
+#
+# Any modifications or derivative works of this code must retain this
+# copyright notice, and modified files need to carry a notice indicating
+# that they have been altered from the originals.
+
+"""
+Distributions
+"""
diff --git a/qiskit/result/distributions/probability.py b/qiskit/result/distributions/probability.py
new file mode 100644
--- /dev/null
+++ b/qiskit/result/distributions/probability.py
@@ -0,0 +1,88 @@
+# This code is part of Qiskit.
+#
+# (C) Copyright IBM 2021.
+#
+# This code is licensed under the Apache License, Version 2.0. You may
+# obtain a copy of this license in the LICENSE.txt file in the root directory
+# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
+#
+# Any modifications or derivative works of this code must retain this
+# copyright notice, and modified files need to carry a notice indicating
+# that they have been altered from the originals.
+"""Class for probability distributions."""
+
+import re
+
+# NOTE: A dict subclass should not overload any dunder methods like __getitem__
+# this can cause unexpected behavior and issues as the cPython dict
+# implementation has many standard methods in C for performance and the dunder
+# methods are not always used as expected. For example, update() doesn't call
+# __setitem__ so overloading __setitem__ would not always provide the expected
+# result
+class ProbDistribution(dict):
+ """A generic dict-like class for probability distributions."""
+
+ bitstring_regex = re.compile(r"^[01]+$")
+
+ def __init__(self, data, shots=None):
+ """Builds a probability distribution object.
+
+ Args:
+ data (dict): Input probability data. Where the keys
+ represent a measured classical value and the value is a
+ float for the probability of that result.
+ The keys can be one of several formats:
+
+ * A hexadecimal string of the form ``"0x4a"``
+ * A bit string prefixed with ``0b`` for example
+ ``'0b1011'``
+ * An integer
+
+ shots (int): Number of shots the distribution was derived from.
+
+ Raises:
+ TypeError: If the input keys are not a string or int
+ ValueError: If the string format of the keys is incorrect
+ """
+ self.shots = shots
+ if data:
+ first_key = next(iter(data.keys()))
+ if isinstance(first_key, int):
+ pass
+ elif isinstance(first_key, str):
+ if first_key.startswith("0x"):
+ hex_raw = data
+ data = {int(key, 0): value for key, value in hex_raw.items()}
+ elif first_key.startswith("0b"):
+ bin_raw = data
+ data = {int(key, 0): value for key, value in bin_raw.items()}
+ elif self.bitstring_regex.search(first_key):
+ bin_raw = data
+ data = {int("0b" + key, 0): value for key, value in bin_raw.items()}
+ else:
+ raise ValueError(
+ "The input keys are not a valid string format, must either "
+ "be a hex string prefixed by '0x' or a binary string "
+ "optionally prefixed with 0b"
+ )
+ else:
+ raise TypeError("Input data's keys are of invalid type, must be str or int")
+ super().__init__(data)
+
+ def binary_probabilities(self):
+ """Build a probabilities dictionary with binary string keys
+
+ Returns:
+ dict: A dictionary where the keys are binary strings in the format
+ ``"0110"``
+ """
+ return {bin(key)[2:]: value for key, value in self.items()}
+
+ def hex_probabilities(self):
+ """Build a probabilities dictionary with hexadecimal string keys
+
+ Returns:
+ dict: A dictionary where the keys are hexadecimal strings in the
+ format ``"0x1a"``
+ """
+ return {hex(key): value for key, value in self.items()}
diff --git a/qiskit/result/distributions/quasi.py b/qiskit/result/distributions/quasi.py
new file mode 100644
--- /dev/null
+++ b/qiskit/result/distributions/quasi.py
@@ -0,0 +1,125 @@
+# This code is part of Qiskit.
+#
+# (C) Copyright IBM 2021.
+#
+# This code is licensed under the Apache License, Version 2.0. You may
+# obtain a copy of this license in the LICENSE.txt file in the root directory
+# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
+#
+# Any modifications or derivative works of this code must retain this
+# copyright notice, and modified files need to carry a notice indicating
+# that they have been altered from the originals.
+"""Quasidistribution class"""
+
+from math import sqrt
+import re
+
+from .probability import ProbDistribution
+
+
+# NOTE: A dict subclass should not overload any dunder methods like __getitem__
+# this can cause unexpected behavior and issues as the cPython dict
+# implementation has many standard methods in C for performance and the dunder
+# methods are not always used as expected. For example, update() doesn't call
+# __setitem__ so overloading __setitem__ would not always provide the expected
+# result
+class QuasiDistribution(dict):
+ """A dict-like class for representing qasi-probabilities."""
+
+ bitstring_regex = re.compile(r"^[01]+$")
+
+ def __init__(self, data, shots=None):
+ """Builds a quasiprobability distribution object.
+
+ Parameters:
+ data (dict): Input quasiprobability data. Where the keys
+ represent a measured classical value and the value is a
+ float for the quasiprobability of that result.
+ The keys can be one of several formats:
+
+ * A hexadecimal string of the form ``"0x4a"``
+ * A bit string prefixed with ``0b`` for example
+ ``'0b1011'``
+ * An integer
+
+ shots (int): Number of shots the distribution was derived from.
+
+ Raises:
+ TypeError: If the input keys are not a string or int
+ ValueError: If the string format of the keys is incorrect
+ """
+ self.shots = shots
+ if data:
+ first_key = next(iter(data.keys()))
+ if isinstance(first_key, int):
+ pass
+ elif isinstance(first_key, str):
+ if first_key.startswith("0x"):
+ hex_raw = data
+ data = {int(key, 0): value for key, value in hex_raw.items()}
+ elif first_key.startswith("0b"):
+ bin_raw = data
+ data = {int(key, 0): value for key, value in bin_raw.items()}
+ elif self.bitstring_regex.search(first_key):
+ bin_raw = data
+ data = {int("0b" + key, 0): value for key, value in bin_raw.items()}
+ else:
+ raise ValueError(
+ "The input keys are not a valid string format, must either "
+ "be a hex string prefixed by '0x' or a binary string "
+ "optionally prefixed with 0b"
+ )
+ else:
+ raise TypeError("Input data's keys are of invalid type, must be str or int")
+ super().__init__(data)
+
+ def nearest_probability_distribution(self, return_distance=False):
+ """Takes a quasiprobability distribution and maps
+ it to the closest probability distribution as defined by
+ the L2-norm.
+
+ Parameters:
+ return_distance (bool): Return the L2 distance between distributions.
+
+ Returns:
+ ProbDistribution: Nearest probability distribution.
+ float: Euclidean (L2) distance of distributions.
+
+ Notes:
+ Method from Smolin et al., Phys. Rev. Lett. 108, 070502 (2012).
+ """
+ sorted_probs = dict(sorted(self.items(), key=lambda item: item[1]))
+ num_elems = len(sorted_probs)
+ new_probs = {}
+ beta = 0
+ diff = 0
+ for key, val in sorted_probs.items():
+ temp = val + beta / num_elems
+ if temp < 0:
+ beta += val
+ num_elems -= 1
+ diff += val * val
+ else:
+ diff += (beta / num_elems) * (beta / num_elems)
+ new_probs[key] = sorted_probs[key] + beta / num_elems
+ if return_distance:
+ return ProbDistribution(new_probs, self.shots), sqrt(diff)
+ return ProbDistribution(new_probs, self.shots)
+
+ def binary_probabilities(self):
+ """Build a probabilities dictionary with binary string keys
+
+ Returns:
+ dict: A dictionary where the keys are binary strings in the format
+ ``"0110"``
+ """
+ return {bin(key)[2:]: value for key, value in self.items()}
+
+ def hex_probabilities(self):
+ """Build a probabilities dictionary with hexadecimal string keys
+
+ Returns:
+ dict: A dictionary where the keys are hexadecimal strings in the
+ format ``"0x1a"``
+ """
+ return {hex(key): value for key, value in self.items()}
| `from qiskit.providers.ibmq import least_busy` raises an ImportError with Terra's main branch
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: latest(main)
- **Python version**: 3.8.10
- **Operating system**: macOS 11.3.1
- qiskit-ibmq-provider: 0.13.1
### What is the current behavior?
`from qiskit.providers.ibmq import least_busy` raises an ImportError with the main branch.
Terra's released version does not have this issue.
CI tutorial step may stop by this issue.
### Steps to reproduce the problem
```
>>> from qiskit.providers.ibmq import least_busy
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/ima/envs/dev38/lib/python3.8/site-packages/qiskit/providers/ibmq/__init__.py", line 88, in <module>
from .ibmqfactory import IBMQFactory
File "/Users/ima/envs/dev38/lib/python3.8/site-packages/qiskit/providers/ibmq/ibmqfactory.py", line 20, in <module>
from .accountprovider import AccountProvider
File "/Users/ima/envs/dev38/lib/python3.8/site-packages/qiskit/providers/ibmq/accountprovider.py", line 38, in <module>
from .runner_result import RunnerResult
File "/Users/ima/envs/dev38/lib/python3.8/site-packages/qiskit/providers/ibmq/runner_result.py", line 18, in <module>
from qiskit.result import Result, QuasiDistribution
ImportError: cannot import name 'QuasiDistribution' from 'qiskit.result' (/Users/ima/tasks/1_2021/qiskit/terra/qiskit/result/__init__.py)
cannot import name 'QuasiDistribution' from 'qiskit.result' (/Users/ima/tasks/1_2021/qiskit/terra/qiskit/result/__init__.py)
```
Terra 0.17.3 has `QuasiDistribution` as follows, but main does not.
https://github.com/Qiskit/qiskit-terra/blob/ab1539c8179b40b05ff6c921cec84815540c0e06/qiskit/result/__init__.py#L45
I think we need #6388 in main as well as stable.
### What is the expected behavior?
No error.
### Suggested solutions
| 2021-05-08T10:40:59Z | [] | [] |
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/ima/envs/dev38/lib/python3.8/site-packages/qiskit/providers/ibmq/__init__.py", line 88, in <module>
from .ibmqfactory import IBMQFactory
File "/Users/ima/envs/dev38/lib/python3.8/site-packages/qiskit/providers/ibmq/ibmqfactory.py", line 20, in <module>
from .accountprovider import AccountProvider
File "/Users/ima/envs/dev38/lib/python3.8/site-packages/qiskit/providers/ibmq/accountprovider.py", line 38, in <module>
from .runner_result import RunnerResult
File "/Users/ima/envs/dev38/lib/python3.8/site-packages/qiskit/providers/ibmq/runner_result.py", line 18, in <module>
from qiskit.result import Result, QuasiDistribution
ImportError: cannot import name 'QuasiDistribution' from 'qiskit.result' (/Users/ima/tasks/1_2021/qiskit/terra/qiskit/result/__init__.py)
| 1,779 |
||||
Qiskit/qiskit | Qiskit__qiskit-6588 | 8c062c777246e386b7306e94aa9e8094b2a16416 | diff --git a/qiskit/circuit/classicalfunction/boolean_expression.py b/qiskit/circuit/classicalfunction/boolean_expression.py
--- a/qiskit/circuit/classicalfunction/boolean_expression.py
+++ b/qiskit/circuit/classicalfunction/boolean_expression.py
@@ -12,14 +12,14 @@
"""A quantum oracle constructed from a logical expression or a string in the DIMACS format."""
+from os.path import basename, isfile
from typing import Callable, Optional
-from os.path import basename, isfile
+from tweedledum import BitVec, BoolFunction
+from tweedledum.synthesis import pkrm_synth
from qiskit.circuit import QuantumCircuit
-from qiskit.exceptions import MissingOptionalLibraryError
from .classical_element import ClassicalElement
-from .utils import HAS_TWEEDLEDUM
class BooleanExpression(ClassicalElement):
@@ -31,17 +31,7 @@ def __init__(self, expression: str, name: str = None) -> None:
expression (str): The logical expression string.
name (str): Optional. Instruction gate name. Otherwise part of
the expression is going to be used.
-
- Raises:
- MissingOptionalLibraryError: If tweedledum is not installed. Tweedledum is required.
"""
- if not HAS_TWEEDLEDUM:
- raise MissingOptionalLibraryError(
- libname="tweedledum",
- name="BooleanExpression compiler",
- pip_install="pip install tweedledum",
- )
- from tweedledum import BoolFunction
self._tweedledum_bool_expression = BoolFunction.from_expression(expression)
@@ -63,8 +53,6 @@ def simulate(self, bitstring: str) -> bool:
Returns:
bool: result of the evaluation.
"""
- from tweedledum import BitVec
-
bits = []
for bit in bitstring:
bits.append(BitVec(1, bit))
@@ -92,8 +80,7 @@ def synth(
qregs = None # TODO: Probably from self._tweedledum_bool_expression._signature
if synthesizer is None:
- from tweedledum.synthesis import pkrm_synth # pylint: disable=no-name-in-module
- from .utils import tweedledum2qiskit
+ from .utils import tweedledum2qiskit # Avoid an import cycle
truth_table = self._tweedledum_bool_expression.truth_table(output_bit=0)
return tweedledum2qiskit(pkrm_synth(truth_table), name=self.name, qregs=qregs)
@@ -113,16 +100,8 @@ def from_dimacs_file(cls, filename: str):
BooleanExpression: A gate for the input string
Raises:
- MissingOptionalLibraryError: If tweedledum is not installed. Tweedledum is required.
FileNotFoundError: If filename is not found.
"""
- if not HAS_TWEEDLEDUM:
- raise MissingOptionalLibraryError(
- libname="tweedledum",
- name="BooleanExpression compiler",
- pip_install="pip install tweedledum",
- )
- from tweedledum import BoolFunction
expr_obj = cls.__new__(cls)
if not isfile(filename):
diff --git a/qiskit/circuit/classicalfunction/classical_function_visitor.py b/qiskit/circuit/classicalfunction/classical_function_visitor.py
--- a/qiskit/circuit/classicalfunction/classical_function_visitor.py
+++ b/qiskit/circuit/classicalfunction/classical_function_visitor.py
@@ -17,8 +17,8 @@
import ast
import _ast
-from qiskit.exceptions import MissingOptionalLibraryError
-from .utils import HAS_TWEEDLEDUM
+from tweedledum.classical import LogicNetwork
+
from .exceptions import ClassicalFunctionParseError, ClassicalFunctionCompilerTypeError
@@ -36,12 +36,6 @@ class ClassicalFunctionVisitor(ast.NodeVisitor):
}
def __init__(self):
- if not HAS_TWEEDLEDUM:
- raise MissingOptionalLibraryError(
- libname="tweedledum",
- name="classical function compiler",
- pip_install="pip install tweedledum",
- )
self.scopes = []
self.args = []
self._network = None
@@ -57,14 +51,6 @@ def visit_Module(self, node):
def visit_FunctionDef(self, node):
"""The function definition should have type hints"""
- if HAS_TWEEDLEDUM:
- from tweedledum.classical import LogicNetwork # pylint: disable=no-name-in-module
- else:
- raise MissingOptionalLibraryError(
- libname="tweedledum",
- name="classical function compiler",
- pip_install="pip install tweedledum",
- )
if node.returns is None:
raise ClassicalFunctionParseError("return type is needed")
scope = {"return": (node.returns.id, None), node.returns.id: ("type", None)}
diff --git a/qiskit/circuit/classicalfunction/classicalfunction.py b/qiskit/circuit/classicalfunction/classicalfunction.py
--- a/qiskit/circuit/classicalfunction/classicalfunction.py
+++ b/qiskit/circuit/classicalfunction/classicalfunction.py
@@ -15,11 +15,14 @@
import ast
from typing import Callable, Optional
+from tweedledum.classical import simulate
+from tweedledum.synthesis import pkrm_synth
+
from qiskit.circuit import QuantumCircuit, QuantumRegister
-from qiskit.exceptions import MissingOptionalLibraryError, QiskitError
+from qiskit.exceptions import QiskitError
from .classical_element import ClassicalElement
-from .utils import HAS_TWEEDLEDUM
from .classical_function_visitor import ClassicalFunctionVisitor
+from .utils import tweedledum2qiskit
class ClassicalFunction(ClassicalElement):
@@ -35,17 +38,10 @@ def __init__(self, source, name=None):
name (str): Optional. Default: "*classicalfunction*". ClassicalFunction name.
Raises:
- MissingOptionalLibraryError: If tweedledum is not installed.
QiskitError: If source is not a string.
"""
if not isinstance(source, str):
raise QiskitError("ClassicalFunction needs a source code as a string.")
- if not HAS_TWEEDLEDUM:
- raise MissingOptionalLibraryError(
- libname="tweedledum",
- name="classical function compiler",
- pip_install="pip install tweedledum",
- )
self._ast = ast.parse(source)
self._network = None
self._scopes = None
@@ -111,8 +107,6 @@ def simulate(self, bitstring: str) -> bool:
Returns:
bool: result of the evaluation.
"""
- from tweedledum.classical import simulate # pylint: disable=no-name-in-module
-
return simulate(self._network, bitstring)
def simulate_all(self):
@@ -133,8 +127,6 @@ def simulate_all(self):
def truth_table(self):
"""Returns (and computes) the truth table"""
if self._truth_table is None:
- from tweedledum.classical import simulate # pylint: disable=no-name-in-module
-
self._truth_table = simulate(self._network)
return self._truth_table
@@ -161,9 +153,6 @@ def synth(
if synthesizer:
return synthesizer(self)
- from .utils import tweedledum2qiskit
- from tweedledum.synthesis import pkrm_synth # pylint: disable=no-name-in-module
-
return tweedledum2qiskit(pkrm_synth(self.truth_table[0]), name=self.name, qregs=qregs)
def _define(self):
diff --git a/qiskit/circuit/classicalfunction/utils.py b/qiskit/circuit/classicalfunction/utils.py
--- a/qiskit/circuit/classicalfunction/utils.py
+++ b/qiskit/circuit/classicalfunction/utils.py
@@ -12,13 +12,8 @@
"""Internal utils for Classical Function Compiler"""
-try:
- from tweedledum.ir import Qubit # pylint: disable=no-name-in-module
- from tweedledum.passes import parity_decomp # pylint: disable=no-name-in-module
-
- HAS_TWEEDLEDUM = True
-except Exception: # pylint: disable=broad-except
- HAS_TWEEDLEDUM = False
+from tweedledum.ir import Qubit
+from tweedledum.passes import parity_decomp
from qiskit.circuit import QuantumCircuit
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -77,6 +77,23 @@
flags=re.S | re.M,
)
+
+visualization_extras = [
+ "matplotlib>=2.1",
+ "ipywidgets>=7.3.0",
+ "pydot",
+ "pillow>=4.2.1",
+ "pylatexenc>=1.4",
+ "seaborn>=0.9.0",
+ "pygments>=2.4",
+]
+
+
+z3_requirements = [
+ "z3-solver>=4.7",
+]
+
+
setup(
name="qiskit-terra",
version="0.18.0",
@@ -109,19 +126,10 @@
include_package_data=True,
python_requires=">=3.6",
extras_require={
- "visualization": [
- "matplotlib>=2.1",
- "ipywidgets>=7.3.0",
- "pydot",
- "pillow>=4.2.1",
- "pylatexenc>=1.4",
- "seaborn>=0.9.0",
- "pygments>=2.4",
- ],
- "classical-function-compiler": ["tweedledum>=1.0,<2.0"],
- "full-featured-simulators": ["qiskit-aer>=0.1"],
- "crosstalk-pass": ["z3-solver>=4.7"],
+ "visualization": visualization_extras,
"bip-mapper": ["cplex", "docplex"],
+ "crosstalk-pass": z3_requirements,
+ "all": visualization_extras + z3_requirements,
},
project_urls={
"Bug Tracker": "https://github.com/Qiskit/qiskit-terra/issues",
| Missing tweedledum as required package
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: 0.17.0
- **Python version**: 3.8.8
- **Operating system**: linux
### What is the current behavior?
`qiskit.circuit.classicalfunction.BooleanExpression` fails with message that the `tweedledum` library is required. However, `tweedledum` is not listed a requirement for the `qiskit-terra` package.
### Steps to reproduce the problem
Use API's that instantiate the `BooleanExpression` class. E.g.
```
from qiskit.circuit.library import PhaseOracle
oracle = PhaseOracle('x & ~y')
```
Leads to error:
```
Traceback (most recent call last):
File "<ipython-input-97-28069418327a>", line 3, in <module>
oracle = PhaseOracle('x & ~y') # previous API: qiskit.aqua.components.oracles.LogicalExpressionOracle
File "/opt/conda/lib/python3.8/site-packages/qiskit/circuit/library/phase_oracle.py", line 55, in __init__
expression = BooleanExpression(expression)
File "/opt/conda/lib/python3.8/site-packages/qiskit/circuit/classicalfunction/boolean_expression.py", line 39, in __init__
raise MissingOptionalLibraryError(
MissingOptionalLibraryError: "The 'tweedledum' library is required to use 'BooleanExpression compiler'. You can install it with 'pip install tweedledum'."
```
### What is the expected behavior?
No error about missing library should happen.
### Suggested solutions
Add `tweedledum` to `./requirements.txt`.
Related to #5853 and #5830 where `tweedledum` is present in `./requirements-dev.txt`, but not added to `./requirements.txt`
| Tweedledum is an optional dependency, mostly because it hasn't fully stabilized the python api yet and also because there are some packaging issues for some of our supported platforms. However, it is correctly listed in the setup.py:
https://github.com/Qiskit/qiskit-terra/blob/main/setup.py#L108
which enables you to install terra with tweedledum using:
```
pip install 'qiskit-terra[classical-function-compiler]'
```
This is also documented, albeit not very clearly worded, in the documentation for the class: https://qiskit.org/documentation/stubs/qiskit.circuit.classicalfunction.BooleanExpression.html#qiskit.circuit.classicalfunction.BooleanExpression
@mtreinish I get a similar error message when running tests locally with the command `tox --epy37`. I can install `tweedledum` locally, but that doesn't solve the error message inside the `tox` environment.
> @mtreinish I get a similar error message when running tests locally with the command `tox --epy37`. I can install `tweedledum` locally, but that doesn't solve the error message inside the `tox` environment.
Did you have an existing tox venv created? If you did and it had an old version of tweedledum installed tox may not have pulled in the new requirement on subsequent runs and updated the venv. (this was a longstanding bug in tox with requirements files that I learned recently has been fixed and will be included in the upcoming tox 4.0 release). You can try rebuilding the venv with `tox -epy37 -r` which tells tox to rebuild the virtualenv.
@mtreinish I did rebuild with the `-r` option and the problem remained. But I cannot reproduce this now (tests work locally at the moment), so I am not sure what happened.
| 2021-06-16T15:41:06Z | [] | [] |
Traceback (most recent call last):
File "<ipython-input-97-28069418327a>", line 3, in <module>
oracle = PhaseOracle('x & ~y') # previous API: qiskit.aqua.components.oracles.LogicalExpressionOracle
File "/opt/conda/lib/python3.8/site-packages/qiskit/circuit/library/phase_oracle.py", line 55, in __init__
expression = BooleanExpression(expression)
File "/opt/conda/lib/python3.8/site-packages/qiskit/circuit/classicalfunction/boolean_expression.py", line 39, in __init__
raise MissingOptionalLibraryError(
MissingOptionalLibraryError: "The 'tweedledum' library is required to use 'BooleanExpression compiler'. You can install it with 'pip install tweedledum'."
| 1,811 |
|||
Qiskit/qiskit | Qiskit__qiskit-6658 | 21b875d9b0443567452e40431bb6b783563142a1 | diff --git a/qiskit/circuit/classicalfunction/boolean_expression.py b/qiskit/circuit/classicalfunction/boolean_expression.py
--- a/qiskit/circuit/classicalfunction/boolean_expression.py
+++ b/qiskit/circuit/classicalfunction/boolean_expression.py
@@ -25,15 +25,19 @@
class BooleanExpression(ClassicalElement):
"""The Boolean Expression gate."""
- def __init__(self, expression: str, name: str = None) -> None:
+ def __init__(self, expression: str, name: str = None, var_order: list = None) -> None:
"""
Args:
expression (str): The logical expression string.
- name (str): Optional. Instruction gate name. Otherwise part of
- the expression is going to be used.
+ name (str): Optional. Instruction gate name. Otherwise part of the expression is
+ going to be used.
+ var_order(list): A list with the order in which variables will be created.
+ (default: by appearance)
"""
- self._tweedledum_bool_expression = BoolFunction.from_expression(expression)
+ self._tweedledum_bool_expression = BoolFunction.from_expression(
+ expression, var_order=var_order
+ )
short_expr_for_name = (expression[:10] + "...") if len(expression) > 13 else expression
num_qubits = (
diff --git a/qiskit/circuit/library/phase_oracle.py b/qiskit/circuit/library/phase_oracle.py
--- a/qiskit/circuit/library/phase_oracle.py
+++ b/qiskit/circuit/library/phase_oracle.py
@@ -52,6 +52,7 @@ def __init__(
self,
expression: Union[str, ClassicalElement],
synthesizer: Optional[Callable[[BooleanExpression], QuantumCircuit]] = None,
+ var_order: list = None,
) -> None:
"""Creates a PhaseOracle object
@@ -59,12 +60,14 @@ def __init__(
expression: A Python-like boolean expression.
synthesizer: Optional. A function to convert a BooleanExpression into a QuantumCircuit
If None is provided, Tweedledum's `pkrm_synth` with `phase_esop` will be used.
+ var_order(list): A list with the order in which variables will be created.
+ (default: by appearance)
"""
from qiskit.circuit.classicalfunction.boolean_expression import BooleanExpression
from qiskit.circuit.classicalfunction.classical_element import ClassicalElement
if not isinstance(expression, ClassicalElement):
- expression = BooleanExpression(expression)
+ expression = BooleanExpression(expression, var_order=var_order)
self.boolean_expression = expression
| Grover's Migration from Aqua to Qiskit Terra
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Informations
- **Qiskit version**: 0.26.0
- **Python version**: 3.8
- **Operating system**: MacOS
### What is the current behavior?
Grover's algorithm does not behave as expected, at least does not take previously known as "Logical Expression" (now PhaseOracle) alphabetically. It runs fine in Aqua but not in Qiskit circuit library.
### Steps to reproduce the problem
```python
from qiskit import *
from qiskit.tools.visualization import plot_histogram
from qiskit.circuit.library import PhaseOracle
from qiskit.algorithms import Grover, AmplificationProblem
oracle = PhaseOracle('((A & C) | (B & D)) & ~(C & D)')
problem = AmplificationProblem(oracle=oracle, is_good_state=oracle.evaluate_bitstring)
backend = Aer.get_backend('qasm_simulator')
grover = Grover(quantum_instance=backend)
result = grover.amplify(problem)
print(result.circuit_results[0])
```
### What is the expected behavior?
It shouldn't give 1100 as a suggestion, since we declare C&D as not expression in PhaseOracle.
However it gives:
{'1101': 255, '0011': 260, '1100': 260, '0111': 249}
In Aqua it gives (this is the expected behaviour):
{'0101': 261, '0111': 236, '1011': 262, '1010': 265}
### Suggested solutions
There should be something wrong in PhaseOracle implementation, please consider LogicalExpression library when you try to solve this to make it alphabetically ordered correctly.
Missing tweedledum as required package
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: 0.17.0
- **Python version**: 3.8.8
- **Operating system**: linux
### What is the current behavior?
`qiskit.circuit.classicalfunction.BooleanExpression` fails with message that the `tweedledum` library is required. However, `tweedledum` is not listed a requirement for the `qiskit-terra` package.
### Steps to reproduce the problem
Use API's that instantiate the `BooleanExpression` class. E.g.
```
from qiskit.circuit.library import PhaseOracle
oracle = PhaseOracle('x & ~y')
```
Leads to error:
```
Traceback (most recent call last):
File "<ipython-input-97-28069418327a>", line 3, in <module>
oracle = PhaseOracle('x & ~y') # previous API: qiskit.aqua.components.oracles.LogicalExpressionOracle
File "/opt/conda/lib/python3.8/site-packages/qiskit/circuit/library/phase_oracle.py", line 55, in __init__
expression = BooleanExpression(expression)
File "/opt/conda/lib/python3.8/site-packages/qiskit/circuit/classicalfunction/boolean_expression.py", line 39, in __init__
raise MissingOptionalLibraryError(
MissingOptionalLibraryError: "The 'tweedledum' library is required to use 'BooleanExpression compiler'. You can install it with 'pip install tweedledum'."
```
### What is the expected behavior?
No error about missing library should happen.
### Suggested solutions
Add `tweedledum` to `./requirements.txt`.
Related to #5853 and #5830 where `tweedledum` is present in `./requirements-dev.txt`, but not added to `./requirements.txt`
| Good catch! That change was not intended (at least no silently). Thanks for reporting this, we're looking into it 👍🏻
Tweedledum is an optional dependency, mostly because it hasn't fully stabilized the python api yet and also because there are some packaging issues for some of our supported platforms. However, it is correctly listed in the setup.py:
https://github.com/Qiskit/qiskit-terra/blob/main/setup.py#L108
which enables you to install terra with tweedledum using:
```
pip install 'qiskit-terra[classical-function-compiler]'
```
This is also documented, albeit not very clearly worded, in the documentation for the class: https://qiskit.org/documentation/stubs/qiskit.circuit.classicalfunction.BooleanExpression.html#qiskit.circuit.classicalfunction.BooleanExpression
@mtreinish I get a similar error message when running tests locally with the command `tox --epy37`. I can install `tweedledum` locally, but that doesn't solve the error message inside the `tox` environment.
> @mtreinish I get a similar error message when running tests locally with the command `tox --epy37`. I can install `tweedledum` locally, but that doesn't solve the error message inside the `tox` environment.
Did you have an existing tox venv created? If you did and it had an old version of tweedledum installed tox may not have pulled in the new requirement on subsequent runs and updated the venv. (this was a longstanding bug in tox with requirements files that I learned recently has been fixed and will be included in the upcoming tox 4.0 release). You can try rebuilding the venv with `tox -epy37 -r` which tells tox to rebuild the virtualenv.
@mtreinish I did rebuild with the `-r` option and the problem remained. But I cannot reproduce this now (tests work locally at the moment), so I am not sure what happened.
| 2021-06-29T12:41:36Z | [] | [] |
Traceback (most recent call last):
File "<ipython-input-97-28069418327a>", line 3, in <module>
oracle = PhaseOracle('x & ~y') # previous API: qiskit.aqua.components.oracles.LogicalExpressionOracle
File "/opt/conda/lib/python3.8/site-packages/qiskit/circuit/library/phase_oracle.py", line 55, in __init__
expression = BooleanExpression(expression)
File "/opt/conda/lib/python3.8/site-packages/qiskit/circuit/classicalfunction/boolean_expression.py", line 39, in __init__
raise MissingOptionalLibraryError(
MissingOptionalLibraryError: "The 'tweedledum' library is required to use 'BooleanExpression compiler'. You can install it with 'pip install tweedledum'."
| 1,823 |
|||
Qiskit/qiskit | Qiskit__qiskit-6847 | 6c9c906f12dc6b8d9929cd8b1e2e28601ac9d827 | diff --git a/qiskit/circuit/parameterexpression.py b/qiskit/circuit/parameterexpression.py
--- a/qiskit/circuit/parameterexpression.py
+++ b/qiskit/circuit/parameterexpression.py
@@ -265,7 +265,7 @@ def _apply_operation(
return ParameterExpression(parameter_symbols, expr)
- def gradient(self, param) -> Union["ParameterExpression", float]:
+ def gradient(self, param) -> Union["ParameterExpression", complex]:
"""Get the derivative of a parameter expression w.r.t. a specified parameter expression.
Args:
@@ -273,6 +273,7 @@ def gradient(self, param) -> Union["ParameterExpression", float]:
Returns:
ParameterExpression representing the gradient of param_expr w.r.t. param
+ or complex or float number
"""
# Check if the parameter is contained in the parameter expression
if param not in self._parameter_symbols.keys():
@@ -299,8 +300,12 @@ def gradient(self, param) -> Union["ParameterExpression", float]:
# If the gradient corresponds to a parameter expression then return the new expression.
if len(parameter_symbols) > 0:
return ParameterExpression(parameter_symbols, expr=expr_grad)
- # If no free symbols left, return a float corresponding to the gradient.
- return float(expr_grad)
+ # If no free symbols left, return a complex or float gradient
+ expr_grad_cplx = complex(expr_grad)
+ if expr_grad_cplx.imag != 0:
+ return expr_grad_cplx
+ else:
+ return float(expr_grad)
def __add__(self, other):
return self._apply_operation(operator.add, other)
| ParameterExpression throws an error for gradients with complex coefficients.
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: 0.18
- **Python version**: 3.8
- **Operating system**: MacOS
### What is the current behavior?
When having a `ParameterExpression` with an imaginary unit, taking a gradient w.r.t. a parameter existing in a `ParameterExpression` results in an error.
### Steps to reproduce the problem
```
p = Parameter("theta")
p2 = 1j*p
p2
Out[27]: ParameterExpression(1.0*I*theta)
p2.gradient(p)
Traceback (most recent call last):
File "/qiskit-terra/venv/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 3437, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-28-058190e5d73e>", line 1, in <module>
p2.gradient(p)
File "/qiskit-terra/qiskit/circuit/parameterexpression.py", line 303, in gradient
return float(expr_grad)
File "symengine_wrapper.pyx", line 1133, in symengine.lib.symengine_wrapper.Basic.__float__
File "symengine_wrapper.pyx", line 958, in symengine.lib.symengine_wrapper.Basic.n
File "symengine_wrapper.pyx", line 4194, in symengine.lib.symengine_wrapper.evalf
RuntimeError: Not Implemented
```
### What is the expected behavior?
Calculating a gradient with a complex coefficient.
### Suggested solutions
Perhaps
```
File "/qiskit-terra/qiskit/circuit/parameterexpression.py", line 303, in gradient
return float(expr_grad)
```
should be changed to
```
File "/qiskit-terra/qiskit/circuit/parameterexpression.py", line 303, in gradient
return complex(expr_grad)
```
?
| 2021-07-30T20:34:57Z | [] | [] |
Traceback (most recent call last):
File "/qiskit-terra/venv/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 3437, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-28-058190e5d73e>", line 1, in <module>
p2.gradient(p)
File "/qiskit-terra/qiskit/circuit/parameterexpression.py", line 303, in gradient
return float(expr_grad)
File "symengine_wrapper.pyx", line 1133, in symengine.lib.symengine_wrapper.Basic.__float__
File "symengine_wrapper.pyx", line 958, in symengine.lib.symengine_wrapper.Basic.n
File "symengine_wrapper.pyx", line 4194, in symengine.lib.symengine_wrapper.evalf
RuntimeError: Not Implemented
| 1,846 |
||||
Qiskit/qiskit | Qiskit__qiskit-6930 | 1eb668171722fb60abd90f942ca5b65ac56d9b34 | diff --git a/qiskit/visualization/__init__.py b/qiskit/visualization/__init__.py
--- a/qiskit/visualization/__init__.py
+++ b/qiskit/visualization/__init__.py
@@ -126,7 +126,7 @@
from qiskit.visualization.transition_visualization import visualize_transition
from qiskit.visualization.array import array_to_latex
-from .circuit_visualization import circuit_drawer, HAS_PIL, HAS_PDFLATEX
+from .circuit_visualization import circuit_drawer, HAS_PIL, HAS_PDFLATEX, HAS_PDFTOCAIRO
from .dag_visualization import dag_drawer
from .exceptions import VisualizationError
from .gate_map import plot_gate_map, plot_circuit_layout, plot_error_map
diff --git a/qiskit/visualization/circuit_visualization.py b/qiskit/visualization/circuit_visualization.py
--- a/qiskit/visualization/circuit_visualization.py
+++ b/qiskit/visualization/circuit_visualization.py
@@ -25,7 +25,6 @@
any of the backends.
"""
-import errno
import logging
import os
import subprocess
@@ -46,15 +45,56 @@
from qiskit.visualization import utils
from qiskit.visualization import matplotlib as _matplotlib
-try:
- subprocess.run("pdflatex --version", check=True)
- HAS_PDFLATEX = True
-except OSError as ex:
- HAS_PDFLATEX = False
logger = logging.getLogger(__name__)
+class _HasPdfLatexWrapper:
+ """Wrapper to lazily detect presence of the ``pdflatex`` command."""
+
+ def __init__(self):
+ self.has_pdflatex = None
+
+ def __bool__(self):
+ if self.has_pdflatex is None:
+ try:
+ subprocess.run(
+ ["pdflatex", "-version"],
+ check=True,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ self.has_pdflatex = True
+ except (OSError, subprocess.SubprocessError):
+ self.has_pdflatex = False
+ return self.has_pdflatex
+
+
+class _HasPdfToCairoWrapper:
+ """Lazily detect the presence of the ``pdftocairo`` command."""
+
+ def __init__(self):
+ self.has_pdftocairo = None
+
+ def __bool__(self):
+ if self.has_pdftocairo is None:
+ try:
+ subprocess.run(
+ ["pdftocairo", "-v"],
+ check=True,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ self.has_pdftocairo = True
+ except (OSError, subprocess.SubprocessError):
+ self.has_pdftocairo = False
+ return self.has_pdftocairo
+
+
+HAS_PDFLATEX = _HasPdfLatexWrapper()
+HAS_PDFTOCAIRO = _HasPdfToCairoWrapper()
+
+
def circuit_drawer(
circuit,
scale=None,
@@ -395,11 +435,9 @@ def _latex_circuit_drawer(
PIL.Image: an in-memory representation of the circuit diagram
Raises:
- OSError: usually indicates that ```pdflatex``` or ```pdftocairo``` is
- missing.
- CalledProcessError: usually points to errors during diagram creation.
- MissingOptionalLibraryError: if pillow is not installed
- VisualizationError: If unsupported image format is given as filename extension.
+ MissingOptionalLibraryError: if pillow, pdflatex, or poppler are not installed
+ VisualizationError: if one of the conversion utilities failed for some internal or
+ file-access reason.
"""
tmpfilename = "circuit"
with tempfile.TemporaryDirectory() as tmpdirname:
@@ -417,8 +455,25 @@ def _latex_circuit_drawer(
initial_state=initial_state,
cregbundle=cregbundle,
)
+ if not HAS_PDFLATEX:
+ raise MissingOptionalLibraryError(
+ libname="pdflatex",
+ name="LaTeX circuit drawing",
+ msg="You will likely need to install a full LaTeX distribution for your system",
+ )
+ if not HAS_PDFTOCAIRO:
+ raise MissingOptionalLibraryError(
+ libname="pdftocairo",
+ name="LaTeX circuit drawing",
+ msg="This is part of the 'poppler' set of PDF utilities",
+ )
+ if not HAS_PIL:
+ raise MissingOptionalLibraryError(
+ libname="pillow",
+ name="LaTeX circuit drawing",
+ pip_install="pip install pillow",
+ )
try:
-
subprocess.run(
[
"pdflatex",
@@ -430,55 +485,42 @@ def _latex_circuit_drawer(
stderr=subprocess.DEVNULL,
check=True,
)
- except OSError as ex:
- if ex.errno == errno.ENOENT:
- logger.warning(
- "WARNING: Unable to compile latex. "
- "Is `pdflatex` installed? "
- "Skipping latex circuit drawing..."
- )
- raise
- except subprocess.CalledProcessError as ex:
+ except OSError as exc:
+ # OSError should generally not occur, because it's usually only triggered if `pdflatex`
+ # doesn't exist as a command, but we've already checked that.
+ raise VisualizationError("`pdflatex` command could not be run.") from exc
+ except subprocess.CalledProcessError as exc:
with open("latex_error.log", "wb") as error_file:
- error_file.write(ex.stdout)
+ error_file.write(exc.stdout)
logger.warning(
- "WARNING Unable to compile latex. "
- "The output from the pdflatex command can "
- "be found in latex_error.log"
+ "Unable to compile LaTeX. Perhaps you are missing the `qcircuit` package."
+ " The output from the `pdflatex` command is in `latex_error.log`."
)
- raise
- else:
- if not HAS_PIL:
- raise MissingOptionalLibraryError(
- libname="pillow",
- name="latex drawer",
- pip_install="pip install pillow",
- )
- try:
- base = os.path.join(tmpdirname, tmpfilename)
- subprocess.run(
- ["pdftocairo", "-singlefile", "-png", "-q", base + ".pdf", base], check=True
- )
- image = Image.open(base + ".png")
- image = utils._trim(image)
- os.remove(base + ".png")
- if filename:
- if filename.endswith(".pdf"):
- os.rename(base + ".pdf", filename)
- else:
- try:
- image.save(filename)
- except VisualizationError as ve:
- raise VisualizationError(
- "ERROR: filename parameter does not use a supported extension."
- ) from ve
- except (OSError, subprocess.CalledProcessError) as ex:
- logger.warning(
- "WARNING: Unable to convert pdf to image. "
- "Is `poppler` installed? "
- "Skipping circuit drawing..."
- )
- raise
+ raise VisualizationError(
+ "`pdflatex` call did not succeed: see `latex_error.log`."
+ ) from exc
+ base = os.path.join(tmpdirname, tmpfilename)
+ try:
+ subprocess.run(
+ ["pdftocairo", "-singlefile", "-png", "-q", base + ".pdf", base],
+ check=True,
+ )
+ except (OSError, subprocess.CalledProcessError) as exc:
+ message = "`pdftocairo` failed to produce an image."
+ logger.warning(message)
+ raise VisualizationError(message) from exc
+ image = Image.open(base + ".png")
+ image = utils._trim(image)
+ if filename:
+ if filename.endswith(".pdf"):
+ os.rename(base + ".pdf", filename)
+ else:
+ try:
+ image.save(filename)
+ except (ValueError, OSError) as exc:
+ raise VisualizationError(
+ f"Pillow could not write the image file '{filename}'."
+ ) from exc
return image
| Transpiler raises an exception "Command 'pdflatex -v' returned non-zero exit status 1" on Windows
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: main branch (commit b419e3968f7a88ff84d7968107c17ee226871513)
- **Python version**: 3.7
- **Operating system**: Windows
### What is the current behavior?
A simple call to the transpiler causes an exception even if `pdflatex` is installed. Likely, this behavior was introduced in #6487.
```
C:\...\envs\dev-terra\python.exe C:/.../qiskit-terra/_sandbox/pdflatex_bug.py
Sorry, but pdflatex did not succeed.
The log file hopefully contains the information to get MiKTeX going again:
C:\...\MiKTeX\2.9\miktex\log\pdflatex.log
pdflatex: major issue: So far, no MiKTeX administrator has checked for updates.
Traceback (most recent call last):
File "C:/.../qiskit-terra/_sandbox/pdflatex_bug.py", line 1, in <module>
from qiskit import QuantumCircuit, transpile
File "C:\...\qiskit-terra\qiskit\__init__.py", line 52, in <module>
from qiskit.execute_function import execute # noqa
File "C:\...\qiskit-terra\qiskit\execute_function.py", line 24, in <module>
from qiskit.compiler import transpile, assemble, schedule
File "C:\...\qiskit-terra\qiskit\compiler\__init__.py", line 34, in <module>
from .transpiler import transpile
File "C:\...\qiskit-terra\qiskit\compiler\transpiler.py", line 30, in <module>
from qiskit.transpiler import Layout, CouplingMap, PropertySet, PassManager
File "C:\...\qiskit-terra\qiskit\transpiler\__init__.py", line 418, in <module>
from .passmanager import PassManager
File "C:\...\qiskit-terra\qiskit\transpiler\passmanager.py", line 19, in <module>
from qiskit.visualization import pass_manager_drawer
File "C:\...\qiskit-terra\qiskit\visualization\__init__.py", line 129, in <module>
from .circuit_visualization import circuit_drawer, HAS_PIL, HAS_PDFLATEX
File "C:\...\qiskit-terra\qiskit\visualization\circuit_visualization.py", line 50, in <module>
subprocess.run("pdflatex -v", check=True)
File "C:\...\envs\dev-terra\lib\subprocess.py", line 512, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command 'pdflatex -v' returned non-zero exit status 1.
```
### Steps to reproduce the problem
Run a script:
```python
from qiskit import QuantumCircuit, transpile
qc = QuantumCircuit(2)
qc.cx(0, 1)
tqc = transpile(qc)
```
### What is the expected behavior?
No exception must be risen.
### Suggested solutions
Additional checks should be added to `circuit_visualization.py`
| 2021-08-20T12:39:30Z | [] | [] |
Traceback (most recent call last):
File "C:/.../qiskit-terra/_sandbox/pdflatex_bug.py", line 1, in <module>
from qiskit import QuantumCircuit, transpile
File "C:\...\qiskit-terra\qiskit\__init__.py", line 52, in <module>
from qiskit.execute_function import execute # noqa
File "C:\...\qiskit-terra\qiskit\execute_function.py", line 24, in <module>
from qiskit.compiler import transpile, assemble, schedule
File "C:\...\qiskit-terra\qiskit\compiler\__init__.py", line 34, in <module>
from .transpiler import transpile
File "C:\...\qiskit-terra\qiskit\compiler\transpiler.py", line 30, in <module>
from qiskit.transpiler import Layout, CouplingMap, PropertySet, PassManager
File "C:\...\qiskit-terra\qiskit\transpiler\__init__.py", line 418, in <module>
from .passmanager import PassManager
File "C:\...\qiskit-terra\qiskit\transpiler\passmanager.py", line 19, in <module>
from qiskit.visualization import pass_manager_drawer
File "C:\...\qiskit-terra\qiskit\visualization\__init__.py", line 129, in <module>
from .circuit_visualization import circuit_drawer, HAS_PIL, HAS_PDFLATEX
File "C:\...\qiskit-terra\qiskit\visualization\circuit_visualization.py", line 50, in <module>
subprocess.run("pdflatex -v", check=True)
File "C:\...\envs\dev-terra\lib\subprocess.py", line 512, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command 'pdflatex -v' returned non-zero exit status 1.
| 1,863 |
||||
Qiskit/qiskit | Qiskit__qiskit-710 | bddb8af58ddcb97f656bbcdd6b460edadd3f6508 | diff --git a/qiskit/_compositegate.py b/qiskit/_compositegate.py
--- a/qiskit/_compositegate.py
+++ b/qiskit/_compositegate.py
@@ -101,6 +101,11 @@ def inverse(self):
self.inverse_flag = not self.inverse_flag
return self
+ def reapply(self, circ):
+ """Reapply this gate to corresponding qubits in circ."""
+ for gate in self.data:
+ gate.reapply(circ)
+
def q_if(self, *qregs):
"""Add controls to this gate."""
self.data = [gate.q_if(qregs) for gate in self.data]
| Can not combine or extend a circuit which is built with CompositeGate
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Informations
Can not use `+` operator to combine or extend a circuit.
- **Qiskit (Python SDK) version**: 0.5.7
- **Python version**: 3.6.5
- **Operating system**: macOS 10.13
### What is the current behavior?
Program crash
```
Traceback (most recent call last):
File "qiskit_acqua/svm/svm_qkernel.py", line 327, in <module>
a.run()
File "qiskit_acqua/svm/svm_qkernel.py", line 306, in run
self.train(self.training_dataset, self.class_labels)
File "qiskit_acqua/svm/svm_qkernel.py", line 182, in train
kernel_matrix = self.construct_kernel_matrix(training_points)
File "qiskit_acqua/svm/svm_qkernel.py", line 158, in construct_kernel_matrix
circuit = self.inner_product(self.feature_extraction, self.num_qubits, x1, x2)
File "qiskit_acqua/svm/svm_qkernel.py", line 124, in inner_product
trial_circuit += feature_extractor.construct_circuit(x1, q)
File "/Users/rchen/.pythonVirtualEnv/quantum-dev/lib/python3.6/site-packages/qiskit/_quantumcircuit.py", line 173, in __iadd__
return self.extend(rhs)
File "/Users/rchen/.pythonVirtualEnv/quantum-dev/lib/python3.6/site-packages/qiskit/_quantumcircuit.py", line 164, in extend
gate.reapply(self)
AttributeError: 'CompositeGate' object has no attribute 'reapply'
```
### Steps to reproduce the problem
```python
from qiskit import CompositeGate, QuantumCircuit, QuantumRegister
from qiskit.extensions.standard.u1 import U1Gate
q = QuantumRegister(2)
composite_gate = CompositeGate("second_order_expansion", [], [q[i] for i in range(2)])
composite_gate._attach(U1Gate(0, q[0]))
qc1 = QuantumCircuit(q)
qc1._attach(composite_gate)
qc2 = QuantumCircuit(q)
qc2 = qc2 + qc1
```
### What is the expected behavior?
qc2 is the copy of qc1
### Suggested solutions
add `reapply` method in the CompositeGate.
Like
```python
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
for gate in self.data:
gate.reapply(circ)
```
| 2018-08-01T17:34:04Z | [] | [] |
Traceback (most recent call last):
File "qiskit_acqua/svm/svm_qkernel.py", line 327, in <module>
a.run()
File "qiskit_acqua/svm/svm_qkernel.py", line 306, in run
self.train(self.training_dataset, self.class_labels)
File "qiskit_acqua/svm/svm_qkernel.py", line 182, in train
kernel_matrix = self.construct_kernel_matrix(training_points)
File "qiskit_acqua/svm/svm_qkernel.py", line 158, in construct_kernel_matrix
circuit = self.inner_product(self.feature_extraction, self.num_qubits, x1, x2)
File "qiskit_acqua/svm/svm_qkernel.py", line 124, in inner_product
trial_circuit += feature_extractor.construct_circuit(x1, q)
File "/Users/rchen/.pythonVirtualEnv/quantum-dev/lib/python3.6/site-packages/qiskit/_quantumcircuit.py", line 173, in __iadd__
return self.extend(rhs)
File "/Users/rchen/.pythonVirtualEnv/quantum-dev/lib/python3.6/site-packages/qiskit/_quantumcircuit.py", line 164, in extend
gate.reapply(self)
AttributeError: 'CompositeGate' object has no attribute 'reapply'
| 1,890 |
||||
Qiskit/qiskit | Qiskit__qiskit-7389 | 5c4cd2bbcfaf32f4db112f76f8f13128872afde5 | diff --git a/qiskit/circuit/controlflow/break_loop.py b/qiskit/circuit/controlflow/break_loop.py
--- a/qiskit/circuit/controlflow/break_loop.py
+++ b/qiskit/circuit/controlflow/break_loop.py
@@ -15,7 +15,7 @@
from typing import Optional
from qiskit.circuit.instruction import Instruction
-from .builder import InstructionPlaceholder
+from .builder import InstructionPlaceholder, InstructionResources
class BreakLoopOp(Instruction):
@@ -49,7 +49,13 @@ def __init__(self, num_qubits: int, num_clbits: int, label: Optional[str] = None
class BreakLoopPlaceholder(InstructionPlaceholder):
"""A placeholder instruction for use in control-flow context managers, when the number of qubits
- and clbits is not yet known."""
+ and clbits is not yet known.
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
+ """
def __init__(self, *, label: Optional[str] = None):
super().__init__("break_loop", 0, 0, [], label=label)
@@ -57,10 +63,8 @@ def __init__(self, *, label: Optional[str] = None):
def concrete_instruction(self, qubits, clbits):
return (
self._copy_mutable_properties(BreakLoopOp(len(qubits), len(clbits), label=self.label)),
- tuple(qubits),
- tuple(clbits),
+ InstructionResources(qubits=tuple(qubits), clbits=tuple(clbits)),
)
def placeholder_resources(self):
- # Is it just me, or does this look like an owl?
- return ((), ())
+ return InstructionResources()
diff --git a/qiskit/circuit/controlflow/builder.py b/qiskit/circuit/controlflow/builder.py
--- a/qiskit/circuit/controlflow/builder.py
+++ b/qiskit/circuit/controlflow/builder.py
@@ -19,18 +19,43 @@
import abc
+import itertools
import typing
-from typing import Callable, Iterable, List, FrozenSet, Tuple, Union
+from typing import Callable, Collection, Iterable, List, FrozenSet, Tuple, Union
-from qiskit.circuit.classicalregister import Clbit
+from qiskit.circuit.classicalregister import Clbit, ClassicalRegister
from qiskit.circuit.exceptions import CircuitError
from qiskit.circuit.instruction import Instruction
-from qiskit.circuit.quantumregister import Qubit
+from qiskit.circuit.quantumregister import Qubit, QuantumRegister
+from qiskit.circuit.register import Register
+
+from .condition import condition_registers
if typing.TYPE_CHECKING:
import qiskit # pylint: disable=cyclic-import
+class InstructionResources(typing.NamedTuple):
+ """The quantum and classical resources used within a particular instruction.
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
+
+ Attributes:
+ qubits: A collection of qubits that will be used by the instruction.
+ clbits: A collection of clbits that will be used by the instruction.
+ qregs: A collection of quantum registers that are used by the instruction.
+ cregs: A collection of classical registers that are used by the instruction.
+ """
+
+ qubits: Collection[Qubit] = ()
+ clbits: Collection[Clbit] = ()
+ qregs: Collection[QuantumRegister] = ()
+ cregs: Collection[ClassicalRegister] = ()
+
+
class InstructionPlaceholder(Instruction, abc.ABC):
"""A fake instruction that lies about its number of qubits and clbits.
@@ -38,7 +63,7 @@ class InstructionPlaceholder(Instruction, abc.ABC):
process, when their lengths cannot be known until the end of the block. This is necessary to
allow constructs like::
- with qc.for_loop(None, range(5)):
+ with qc.for_loop(range(5)):
qc.h(0)
qc.measure(0, 0)
qc.break_loop().c_if(0, 0)
@@ -51,6 +76,11 @@ class InstructionPlaceholder(Instruction, abc.ABC):
calling :meth:`.InstructionPlaceholder.placeholder_instructions`. This set will be a subset of
the final resources it asks for, but it is used for initialising resources that *must* be
supplied, such as the bits used in the conditions of placeholder ``if`` statements.
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
"""
_directive = True
@@ -58,7 +88,7 @@ class InstructionPlaceholder(Instruction, abc.ABC):
@abc.abstractmethod
def concrete_instruction(
self, qubits: FrozenSet[Qubit], clbits: FrozenSet[Clbit]
- ) -> Tuple[Instruction, Tuple[Qubit, ...], Tuple[Clbit, ...]]:
+ ) -> Tuple[Instruction, InstructionResources]:
"""Get a concrete, complete instruction that is valid to act over all the given resources.
The returned resources may not be the full width of the given resources, but will certainly
@@ -76,14 +106,14 @@ def concrete_instruction(
clbits: The clbits the created instruction should be defined across.
Returns:
- Instruction: a full version of the relevant control-flow instruction. This is a
- "proper" instruction instance, as if it had been defined with the correct number of
- qubits and clbits from the beginning.
+ A full version of the relevant control-flow instruction, and the resources that it uses.
+ This is a "proper" instruction instance, as if it had been defined with the correct
+ number of qubits and clbits from the beginning.
"""
raise NotImplementedError
@abc.abstractmethod
- def placeholder_resources(self) -> Tuple[Tuple[Qubit, ...], Tuple[Clbit, ...]]:
+ def placeholder_resources(self) -> InstructionResources:
"""Get the qubit and clbit resources that this placeholder instruction should be considered
as using before construction.
@@ -93,7 +123,7 @@ def placeholder_resources(self) -> Tuple[Tuple[Qubit, ...], Tuple[Clbit, ...]]:
will be tracked by the scope managers.
Returns:
- A 2-tuple of the quantum and classical resources this placeholder instruction will
+ A collection of the quantum and classical resources this placeholder instruction will
certainly use.
"""
raise NotImplementedError
@@ -156,12 +186,18 @@ class ControlFlowBuilderBlock:
In short, :meth:`.append` adds resources, and :meth:`.build` may use only a subset of the extra
ones passed. This ensures that all instructions know about all the resources they need, even in
the case of ``break``, but do not block any resources that they do *not* need.
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
"""
__slots__ = (
"instructions",
"qubits",
"clbits",
+ "registers",
"_allow_jumps",
"_resource_requester",
"_built",
@@ -172,6 +208,7 @@ def __init__(
qubits: Iterable[Qubit],
clbits: Iterable[Clbit],
*,
+ registers: Iterable[Register] = (),
resource_requester: Callable,
allow_jumps: bool = True,
):
@@ -182,6 +219,9 @@ def __init__(
with ``qubits``, this is useful for things such as ``if`` and ``while`` loop
builders, where the classical condition has associated resources, and is known when
this scope is created.
+ registers: Any registers this scope should consider itself as using from the
+ beginning. This is useful for :obj:`.IfElseOp` and :obj:`.WhileLoopOp` instances
+ which use a classical register as their condition.
allow_jumps: Whether this builder scope should allow ``break`` and ``continue``
statements within it. This is intended to help give sensible error messages when
dangerous behaviour is encountered, such as using ``break`` inside an ``if`` context
@@ -201,6 +241,7 @@ def __init__(
self.instructions: List[Tuple[Instruction, Tuple[Qubit, ...], Tuple[Clbit, ...]]] = []
self.qubits = set(qubits)
self.clbits = set(clbits)
+ self.registers = set(registers)
self._allow_jumps = allow_jumps
self._resource_requester = resource_requester
self._built = False
@@ -267,7 +308,10 @@ def request_classical_resource(self, specifier):
raise CircuitError("Cannot add resources after the scope has been built.")
# Allow the inner resolve to propagate exceptions.
resource = self._resource_requester(specifier)
- self.add_bits((resource,) if isinstance(resource, Clbit) else resource)
+ if isinstance(resource, Clbit):
+ self.add_bits((resource,))
+ else:
+ self.add_register(resource)
return resource
def peek(self) -> Tuple[Instruction, Tuple[Qubit, ...], Tuple[Clbit, ...]]:
@@ -307,10 +351,23 @@ def add_bits(self, bits: Iterable[Union[Qubit, Clbit]]):
else:
raise TypeError(f"Can only add qubits or classical bits, but received '{bit}'.")
+ def add_register(self, register: Register):
+ """Add a :obj:`.Register` to the set of resources used by this block, ensuring that
+ all bits contained within are also accounted for.
+
+ Args:
+ register: the register to add to the block.
+ """
+ if register in self.registers:
+ # Fast return to avoid iterating through the bits.
+ return
+ self.registers.add(register)
+ self.add_bits(register)
+
def build(
self, all_qubits: FrozenSet[Qubit], all_clbits: FrozenSet[Clbit]
) -> "qiskit.circuit.QuantumCircuit":
- """Build this scoped block into a complete :obj:`~QuantumCircuit` instance.
+ """Build this scoped block into a complete :obj:`.QuantumCircuit` instance.
This will build a circuit which contains all of the necessary qubits and clbits and no
others.
@@ -348,11 +405,13 @@ def build(
# We start off by only giving the QuantumCircuit the qubits we _know_ it will need, and add
# more later as needed.
- out = QuantumCircuit(list(self.qubits), list(self.clbits))
+ out = QuantumCircuit(list(self.qubits), list(self.clbits), *self.registers)
for operation, qubits, clbits in self.instructions:
if isinstance(operation, InstructionPlaceholder):
- operation, qubits, clbits = operation.concrete_instruction(all_qubits, all_clbits)
+ operation, resources = operation.concrete_instruction(all_qubits, all_clbits)
+ qubits = tuple(resources.qubits)
+ clbits = tuple(resources.clbits)
# We want to avoid iterating over the tuples unnecessarily if there's no chance
# we'll need to add bits to the circuit.
if potential_qubits and qubits:
@@ -365,12 +424,23 @@ def build(
if add_clbits:
potential_clbits -= add_clbits
out.add_bits(add_clbits)
+ for register in itertools.chain(resources.qregs, resources.cregs):
+ if register not in self.registers:
+ # As of 2021-12-09, QuantumCircuit doesn't have an efficient way to check if
+ # a register is already present, so we use our own tracking.
+ self.add_register(register)
+ out.add_register(register)
+ if operation.condition is not None:
+ for register in condition_registers(operation.condition):
+ if register not in self.registers:
+ self.add_register(register)
+ out.add_register(register)
# We already did the broadcasting and checking when the first call to
# QuantumCircuit.append happened (which the user wrote), and added the instruction into
# this scope. We just need to finish the job now.
#
- # We have to convert the tuples to lists, because some parts of QuantumCircuit still
- # expect exactly this type.
+ # We have to convert to lists, because some parts of QuantumCircuit still expect
+ # exactly this type.
out._append(operation, list(qubits), list(clbits))
return out
@@ -388,5 +458,6 @@ def copy(self) -> "ControlFlowBuilderBlock":
out.instructions = self.instructions.copy()
out.qubits = self.qubits.copy()
out.clbits = self.clbits.copy()
+ out.registers = self.registers.copy()
out._allow_jumps = self._allow_jumps
return out
diff --git a/qiskit/circuit/controlflow/condition.py b/qiskit/circuit/controlflow/condition.py
--- a/qiskit/circuit/controlflow/condition.py
+++ b/qiskit/circuit/controlflow/condition.py
@@ -57,3 +57,20 @@ def condition_bits(condition: Tuple[Union[ClassicalRegister, Clbit], int]) -> Tu
a tuple of all classical bits used in the condition.
"""
return (condition[0],) if isinstance(condition[0], Clbit) else tuple(condition[0])
+
+
+def condition_registers(
+ condition: Tuple[Union[ClassicalRegister, Clbit], int]
+) -> Tuple[ClassicalRegister, ...]:
+ """Return any classical registers used by ``condition`` as a tuple of :obj:`.ClassicalRegister`.
+
+ This is useful as a quick method for extracting the registers from a condition, if any exist.
+ The output might be empty if the condition is on a single bit.
+
+ Args:
+ condition: the valid condition to extract any registers from.
+
+ Returns:
+ a tuple of all classical registers used in the condition.
+ """
+ return (condition[0],) if isinstance(condition[0], ClassicalRegister) else ()
diff --git a/qiskit/circuit/controlflow/continue_loop.py b/qiskit/circuit/controlflow/continue_loop.py
--- a/qiskit/circuit/controlflow/continue_loop.py
+++ b/qiskit/circuit/controlflow/continue_loop.py
@@ -15,7 +15,7 @@
from typing import Optional
from qiskit.circuit.instruction import Instruction
-from .builder import InstructionPlaceholder
+from .builder import InstructionPlaceholder, InstructionResources
class ContinueLoopOp(Instruction):
@@ -49,7 +49,13 @@ def __init__(self, num_qubits: int, num_clbits: int, label: Optional[str] = None
class ContinueLoopPlaceholder(InstructionPlaceholder):
"""A placeholder instruction for use in control-flow context managers, when the number of qubits
- and clbits is not yet known."""
+ and clbits is not yet known.
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
+ """
def __init__(self, *, label: Optional[str] = None):
super().__init__("continue_loop", 0, 0, [], label=label)
@@ -59,9 +65,8 @@ def concrete_instruction(self, qubits, clbits):
self._copy_mutable_properties(
ContinueLoopOp(len(qubits), len(clbits), label=self.label)
),
- tuple(qubits),
- tuple(clbits),
+ InstructionResources(qubits=tuple(qubits), clbits=tuple(clbits)),
)
def placeholder_resources(self):
- return ((), ())
+ return InstructionResources()
diff --git a/qiskit/circuit/controlflow/for_loop.py b/qiskit/circuit/controlflow/for_loop.py
--- a/qiskit/circuit/controlflow/for_loop.py
+++ b/qiskit/circuit/controlflow/for_loop.py
@@ -148,6 +148,11 @@ class ForLoopContext:
the resulting instance is a "friend" of the calling circuit. The context will manipulate the
circuit's defined scopes when it is entered (by pushing a new scope onto the stack) and exited
(by popping its scope, building it, and appending the resulting :obj:`.ForLoopOp`).
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
"""
# Class-level variable keep track of the number of auto-generated loop variables, so we don't
diff --git a/qiskit/circuit/controlflow/if_else.py b/qiskit/circuit/controlflow/if_else.py
--- a/qiskit/circuit/controlflow/if_else.py
+++ b/qiskit/circuit/controlflow/if_else.py
@@ -13,13 +13,15 @@
"Circuit operation representing an ``if/else`` statement."
-from typing import Optional, Tuple, Union
+from typing import Optional, Tuple, Union, Iterable, Set
-from qiskit.circuit import ClassicalRegister, Clbit, QuantumCircuit, Qubit
+from qiskit.circuit import ClassicalRegister, Clbit, QuantumCircuit
from qiskit.circuit.instructionset import InstructionSet
from qiskit.circuit.exceptions import CircuitError
-from .builder import ControlFlowBuilderBlock, InstructionPlaceholder
-from .condition import validate_condition, condition_bits
+from qiskit.circuit.quantumregister import QuantumRegister
+from qiskit.circuit.register import Register
+from .builder import ControlFlowBuilderBlock, InstructionPlaceholder, InstructionResources
+from .condition import validate_condition, condition_bits, condition_registers
from .control_flow import ControlFlowOp
@@ -148,6 +150,11 @@ class IfElsePlaceholder(InstructionPlaceholder):
This generally should not be instantiated manually; only :obj:`.IfContext` and
:obj:`.ElseContext` should do it when they need to defer creation of the concrete instruction.
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
"""
def __init__(
@@ -171,8 +178,9 @@ def __init__(
self.__true_block = true_block
self.__false_block: Optional[ControlFlowBuilderBlock] = false_block
self.__resources = self._placeholder_resources()
- qubits, clbits = self.__resources
- super().__init__("if_else", len(qubits), len(clbits), [], label=label)
+ super().__init__(
+ "if_else", len(self.__resources.qubits), len(self.__resources.clbits), [], label=label
+ )
# Set the condition after super().__init__() has initialised it to None.
self.condition = validate_condition(condition)
@@ -201,7 +209,13 @@ def with_false_block(self, false_block: ControlFlowBuilderBlock) -> "IfElsePlace
false_block.add_bits(true_bits - false_bits)
return type(self)(self.condition, true_block, false_block, label=self.label)
- def _placeholder_resources(self) -> Tuple[Tuple[Qubit, ...], Tuple[Clbit, ...]]:
+ def registers(self):
+ """Get the registers used by the interior blocks."""
+ if self.__false_block is None:
+ return self.__true_block.registers.copy()
+ return self.__true_block.registers | self.__false_block.registers
+
+ def _placeholder_resources(self) -> InstructionResources:
"""Get the placeholder resources (see :meth:`.placeholder_resources`).
This is a separate function because we use the resources during the initialisation to
@@ -209,14 +223,24 @@ def _placeholder_resources(self) -> Tuple[Tuple[Qubit, ...], Tuple[Clbit, ...]]:
public version as a cache access for efficiency.
"""
if self.__false_block is None:
- return tuple(self.__true_block.qubits), tuple(self.__true_block.clbits)
- return (
- tuple(self.__true_block.qubits | self.__false_block.qubits),
- tuple(self.__true_block.clbits | self.__false_block.clbits),
+ qregs, cregs = _partition_registers(self.__true_block.registers)
+ return InstructionResources(
+ qubits=tuple(self.__true_block.qubits),
+ clbits=tuple(self.__true_block.clbits),
+ qregs=tuple(qregs),
+ cregs=tuple(cregs),
+ )
+ true_qregs, true_cregs = _partition_registers(self.__true_block.registers)
+ false_qregs, false_cregs = _partition_registers(self.__false_block.registers)
+ return InstructionResources(
+ qubits=tuple(self.__true_block.qubits | self.__false_block.qubits),
+ clbits=tuple(self.__true_block.clbits | self.__false_block.clbits),
+ qregs=tuple(true_qregs) + tuple(false_qregs),
+ cregs=tuple(true_cregs) + tuple(false_cregs),
)
def placeholder_resources(self):
- # Tuple and Bit are both immutable, so the resource cache is completely immutable.
+ # All the elements of our InstructionResources are immutable (tuple, Bit and Register).
return self.__resources
def concrete_instruction(self, qubits, clbits):
@@ -241,13 +265,17 @@ def concrete_instruction(self, qubits, clbits):
# The bodies are not compelled to use all the resources that the
# ControlFlowBuilderBlock.build calls get passed, but they do need to be as wide as each
# other. Now we ensure that they are.
- true_body, false_body = _unify_circuit_bits(true_body, false_body)
+ true_body, false_body = _unify_circuit_resources(true_body, false_body)
return (
self._copy_mutable_properties(
IfElseOp(self.condition, true_body, false_body, label=self.label)
),
- tuple(true_body.qubits),
- tuple(true_body.clbits),
+ InstructionResources(
+ qubits=tuple(true_body.qubits),
+ clbits=tuple(true_body.clbits),
+ qregs=tuple(true_body.qregs),
+ cregs=tuple(true_body.cregs),
+ ),
)
def c_if(self, classical, val):
@@ -268,6 +296,11 @@ class IfContext:
the resulting instance is a "friend" of the calling circuit. The context will manipulate the
circuit's defined scopes when it is entered (by pushing a new scope onto the stack) and exited
(by popping its scope, building it, and appending the resulting :obj:`.IfElseOp`).
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
"""
__slots__ = ("_appended_instructions", "_circuit", "_condition", "_in_loop", "_label")
@@ -312,7 +345,11 @@ def in_loop(self) -> bool:
return self._in_loop
def __enter__(self):
- self._circuit._push_scope(clbits=condition_bits(self._condition), allow_jumps=self._in_loop)
+ self._circuit._push_scope(
+ clbits=condition_bits(self._condition),
+ registers=condition_registers(self._condition),
+ allow_jumps=self._in_loop,
+ )
return ElseContext(self)
def __exit__(self, exc_type, exc_val, exc_tb):
@@ -328,8 +365,9 @@ def __exit__(self, exc_type, exc_val, exc_tb):
# attached which _does_ gain them. We emit a placeholder to defer defining the
# resources we use until the containing loop concludes, to support ``break``.
operation = IfElsePlaceholder(self._condition, true_block, label=self._label)
+ resources = operation.placeholder_resources()
self._appended_instructions = self._circuit.append(
- operation, *operation.placeholder_resources()
+ operation, resources.qubits, resources.clbits
)
else:
# If we're not in a loop, we don't need to be worried about passing in any outer-scope
@@ -353,9 +391,14 @@ class ElseContext:
The context will manipulate the circuit's defined scopes when it is entered (by popping the old
:obj:`.IfElseOp` if it exists and pushing a new scope onto the stack) and exited (by popping its
scope, building it, and appending the resulting :obj:`.IfElseOp`).
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
"""
- __slots__ = ("_if_block", "_if_clbits", "_if_context", "_if_qubits", "_used")
+ __slots__ = ("_if_block", "_if_clbits", "_if_registers", "_if_context", "_if_qubits", "_used")
def __init__(self, if_context: IfContext):
# We want to avoid doing any processing until we're actually used, because the `if` block
@@ -364,6 +407,7 @@ def __init__(self, if_context: IfContext):
self._if_block = None
self._if_qubits = None
self._if_clbits = None
+ self._if_registers = None
self._if_context = if_context
self._used = False
@@ -390,7 +434,18 @@ def __enter__(self):
self._if_qubits,
self._if_clbits,
) = circuit._pop_previous_instruction_in_scope()
- circuit._push_scope(self._if_qubits, self._if_clbits, allow_jumps=self._if_context.in_loop)
+ if isinstance(self._if_block, IfElseOp):
+ self._if_registers = set(self._if_block.blocks[0].cregs).union(
+ self._if_block.blocks[0].qregs
+ )
+ else:
+ self._if_registers = self._if_block.registers()
+ circuit._push_scope(
+ self._if_qubits,
+ self._if_clbits,
+ registers=self._if_registers,
+ allow_jumps=self._if_context.in_loop,
+ )
def __exit__(self, exc_type, exc_val, exc_tb):
circuit = self._if_context.circuit
@@ -409,7 +464,8 @@ def __exit__(self, exc_type, exc_val, exc_tb):
# is not.
if isinstance(self._if_block, IfElsePlaceholder):
if_block = self._if_block.with_false_block(false_block)
- circuit.append(if_block, *if_block.placeholder_resources())
+ resources = if_block.placeholder_resources()
+ circuit.append(if_block, resources.qubits, resources.clbits)
else:
# In this case, we need to update both true_body and false_body to have exactly the same
# widths. Passing extra resources to `ControlFlowBuilderBlock.build` doesn't _compel_
@@ -418,7 +474,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
# bits onto the circuits at the end.
true_body = self._if_block.blocks[0]
false_body = false_block.build(false_block.qubits, false_block.clbits)
- true_body, false_body = _unify_circuit_bits(true_body, false_body)
+ true_body, false_body = _unify_circuit_resources(true_body, false_body)
circuit.append(
IfElseOp(
self._if_context.condition,
@@ -432,20 +488,37 @@ def __exit__(self, exc_type, exc_val, exc_tb):
return False
-def _unify_circuit_bits(
+def _partition_registers(
+ registers: Iterable[Register],
+) -> Tuple[Set[QuantumRegister], Set[ClassicalRegister]]:
+ """Partition a sequence of registers into its quantum and classical registers."""
+ qregs = set()
+ cregs = set()
+ for register in registers:
+ if isinstance(register, QuantumRegister):
+ qregs.add(register)
+ elif isinstance(register, ClassicalRegister):
+ cregs.add(register)
+ else:
+ # Purely defensive against Terra expansion.
+ raise CircuitError(f"Unknown register: {register}.")
+ return qregs, cregs
+
+
+def _unify_circuit_resources(
true_body: QuantumCircuit, false_body: Optional[QuantumCircuit]
) -> Tuple[QuantumCircuit, Union[QuantumCircuit, None]]:
"""
- Ensure that ``true_body`` and ``false_body`` have all the same qubits and clbits, and that they
- are defined in the same order. The order is important for binding when the bodies are used in
- the 3-tuple :obj:`.Instruction` context.
+ Ensure that ``true_body`` and ``false_body`` have all the same qubits, clbits and registers, and
+ that they are defined in the same order. The order is important for binding when the bodies are
+ used in the 3-tuple :obj:`.Instruction` context.
This function will preferentially try to mutate ``true_body`` and ``false_body`` if they share
an ordering, but if not, it will rebuild two new circuits. This is to avoid coupling too
tightly to the inner class; there is no real support for deleting or re-ordering bits within a
:obj:`.QuantumCircuit` context, and we don't want to rely on the *current* behaviour of the
private APIs, since they are very liable to change. No matter the method used, two circuits
- with unified bits are returned.
+ with unified bits and registers are returned.
"""
if false_body is None:
return true_body, false_body
@@ -461,17 +534,17 @@ def _unify_circuit_bits(
elif n_false_qubits < n_true_qubits and false_qubits == true_qubits[:n_false_qubits]:
false_body.add_bits(true_qubits[n_false_qubits:])
else:
- return _unify_circuit_bits_rebuild(true_body, false_body)
+ return _unify_circuit_resources_rebuild(true_body, false_body)
if n_true_clbits <= n_false_clbits and true_clbits == false_clbits[:n_true_clbits]:
true_body.add_bits(false_clbits[n_true_clbits:])
elif n_false_clbits < n_true_clbits and false_clbits == true_clbits[:n_false_clbits]:
false_body.add_bits(true_clbits[n_false_clbits:])
else:
- return _unify_circuit_bits_rebuild(true_body, false_body)
- return true_body, false_body
+ return _unify_circuit_resources_rebuild(true_body, false_body)
+ return _unify_circuit_registers(true_body, false_body)
-def _unify_circuit_bits_rebuild(
+def _unify_circuit_resources_rebuild( # pylint: disable=invalid-name # (it's too long?!)
true_body: QuantumCircuit, false_body: QuantumCircuit
) -> Tuple[QuantumCircuit, QuantumCircuit]:
"""
@@ -484,10 +557,27 @@ def _unify_circuit_bits_rebuild(
qubits = list(set(true_body.qubits).union(false_body.qubits))
clbits = list(set(true_body.clbits).union(false_body.clbits))
# We use the inner `_append` method because everything is already resolved.
- true_out = QuantumCircuit(qubits, clbits)
+ true_out = QuantumCircuit(qubits, clbits, *true_body.qregs, *true_body.cregs)
for data in true_body.data:
true_out._append(*data)
- false_out = QuantumCircuit(qubits, clbits)
+ false_out = QuantumCircuit(qubits, clbits, *false_body.qregs, *false_body.cregs)
for data in false_body.data:
false_out._append(*data)
- return true_out, false_out
+ return _unify_circuit_registers(true_out, false_out)
+
+
+def _unify_circuit_registers(
+ true_body: QuantumCircuit, false_body: QuantumCircuit
+) -> Tuple[QuantumCircuit, QuantumCircuit]:
+ """
+ Ensure that ``true_body`` and ``false_body`` have the same registers defined within them. These
+ do not need to be in the same order between circuits. The two input circuits are returned,
+ mutated to have the same registers.
+ """
+ true_registers = set(true_body.qregs) | set(true_body.cregs)
+ false_registers = set(false_body.qregs) | set(false_body.cregs)
+ for register in false_registers - true_registers:
+ true_body.add_register(register)
+ for register in true_registers - false_registers:
+ false_body.add_register(register)
+ return true_body, false_body
diff --git a/qiskit/circuit/controlflow/while_loop.py b/qiskit/circuit/controlflow/while_loop.py
--- a/qiskit/circuit/controlflow/while_loop.py
+++ b/qiskit/circuit/controlflow/while_loop.py
@@ -16,7 +16,7 @@
from qiskit.circuit import Clbit, ClassicalRegister, QuantumCircuit
from qiskit.circuit.exceptions import CircuitError
-from .condition import validate_condition, condition_bits
+from .condition import validate_condition, condition_bits, condition_registers
from .control_flow import ControlFlowOp
@@ -125,6 +125,11 @@ class WhileLoopContext:
qc.h(0)
qc.cx(0, 1)
qc.measure(0, 0)
+
+ .. warning::
+
+ This is an internal interface and no part of it should be relied upon outside of Qiskit
+ Terra.
"""
__slots__ = ("_circuit", "_condition", "_label")
@@ -146,7 +151,9 @@ def __init__(
self._label = label
def __enter__(self):
- self._circuit._push_scope(clbits=condition_bits(self._condition))
+ self._circuit._push_scope(
+ clbits=condition_bits(self._condition), registers=condition_registers(self._condition)
+ )
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
diff --git a/qiskit/circuit/quantumcircuit.py b/qiskit/circuit/quantumcircuit.py
--- a/qiskit/circuit/quantumcircuit.py
+++ b/qiskit/circuit/quantumcircuit.py
@@ -4052,7 +4052,11 @@ def pauli(
return self.append(PauliGate(pauli_string), qubits, [])
def _push_scope(
- self, qubits: Iterable[Qubit] = (), clbits: Iterable[Clbit] = (), allow_jumps: bool = True
+ self,
+ qubits: Iterable[Qubit] = (),
+ clbits: Iterable[Clbit] = (),
+ registers: Iterable[Register] = (),
+ allow_jumps: bool = True,
):
"""Add a scope for collecting instructions into this circuit.
@@ -4067,11 +4071,19 @@ def _push_scope(
# pylint: disable=cyclic-import
from qiskit.circuit.controlflow.builder import ControlFlowBuilderBlock
+ # Chain resource requests so things like registers added to inner scopes via conditions are
+ # requested in the outer scope as well.
+ if self._control_flow_scopes:
+ resource_requester = self._control_flow_scopes[-1].request_classical_resource
+ else:
+ resource_requester = self._resolve_classical_resource
+
self._control_flow_scopes.append(
ControlFlowBuilderBlock(
qubits,
clbits,
- resource_requester=self._resolve_classical_resource,
+ resource_requester=resource_requester,
+ registers=registers,
allow_jumps=allow_jumps,
)
)
@@ -4516,7 +4528,8 @@ def break_loop(self) -> InstructionSet:
if self._control_flow_scopes:
operation = BreakLoopPlaceholder()
- return self.append(operation, *operation.placeholder_resources())
+ resources = operation.placeholder_resources()
+ return self.append(operation, resources.qubits, resources.clbits)
return self.append(BreakLoopOp(self.num_qubits, self.num_clbits), self.qubits, self.clbits)
def continue_loop(self) -> InstructionSet:
@@ -4545,7 +4558,8 @@ def continue_loop(self) -> InstructionSet:
if self._control_flow_scopes:
operation = ContinueLoopPlaceholder()
- return self.append(operation, *operation.placeholder_resources())
+ resources = operation.placeholder_resources()
+ return self.append(operation, resources.qubits, resources.clbits)
return self.append(
ContinueLoopOp(self.num_qubits, self.num_clbits), self.qubits, self.clbits
)
| `DAGCircuitError` when `if_test` is called in `for_loop` scope.
### Environment
- **Qiskit Terra version**: 0.19.0
- **Python version**: any
- **Operating system**: any
### What is happening?
Encounter `DAGCircuitError` when `if_test` is called in `for_loop` scope.
### How can we reproduce the issue?
```
from qiskit import QuantumCircuit
from qiskit.circuit import QuantumRegister, ClassicalRegister
qreg = QuantumRegister(4)
creg = ClassicalRegister(1)
circ = QuantumCircuit(qreg, creg)
with circ.for_loop(range(10)) as a:
circ.ry(a, 0)
with circ.if_test((creg, 1)):
circ.break_loop()
print(circ)
print(circ.data[0][0].params[2]) ## qiskit.dagcircuit.exceptions.DAGCircuitError: 'invalid creg in condition for if_else'
```
```
┌───────────┐
q0_0: ┤0 ├
│ │
q0_1: ┤ ├
│ │
q0_2: ┤ For_loop ├
│ │
q0_3: ┤ ├
│ │
c0: ╡0 ╞
└───────────┘
Traceback (most recent call last):
File "nest_error.py", line 14, in <module>
print(circ.data[0][0].params[2])
File "site-packages/qiskit/circuit/quantumcircuit.py", line 364, in __str__
return str(self.draw(output="text"))
File "site-packages/qiskit/circuit/quantumcircuit.py", line 1865, in draw
return circuit_drawer(
File "site-packages/qiskit/visualization/circuit_visualization.py", line 242, in circuit_drawer
return _text_circuit_drawer(
File "site-packages/qiskit/visualization/circuit_visualization.py", line 360, in _text_circuit_drawer
qubits, clbits, nodes = utils._get_layered_instructions(
File "site-packages/qiskit/visualization/utils.py", line 343, in _get_layered_instructions
dag = circuit_to_dag(circuit)
File "site-packages/qiskit/converters/circuit_to_dag.py", line 62, in circuit_to_dag
dagcircuit.apply_operation_back(instruction.copy(), qargs, cargs)
File "site-packages/qiskit/dagcircuit/dagcircuit.py", line 541, in apply_operation_back
self._check_condition(op.name, op.condition)
File "site-packages/qiskit/dagcircuit/dagcircuit.py", line 431, in _check_condition
raise DAGCircuitError("invalid creg in condition for %s" % name)
qiskit.dagcircuit.exceptions.DAGCircuitError: 'invalid creg in condition for if_else'
```
### What should happen?
No error.
### Any suggestions?
None
| The issue here is that the control-flow builder blocks don't track the registers that are used, only the bits, so they don't define any necessary registers in their body blocks. We need to add tracking of these to `ControlFlowBuilderBlock`, and add them when the circuits are created to fix this.
It's a `DAGCircuitError` because the drawer tries to convert the circuit into a DAG first to get the orderings, and that's when the issue gets detected. | 2021-12-09T16:45:30Z | [] | [] |
Traceback (most recent call last):
File "nest_error.py", line 14, in <module>
print(circ.data[0][0].params[2])
File "site-packages/qiskit/circuit/quantumcircuit.py", line 364, in __str__
return str(self.draw(output="text"))
File "site-packages/qiskit/circuit/quantumcircuit.py", line 1865, in draw
return circuit_drawer(
File "site-packages/qiskit/visualization/circuit_visualization.py", line 242, in circuit_drawer
return _text_circuit_drawer(
File "site-packages/qiskit/visualization/circuit_visualization.py", line 360, in _text_circuit_drawer
qubits, clbits, nodes = utils._get_layered_instructions(
File "site-packages/qiskit/visualization/utils.py", line 343, in _get_layered_instructions
dag = circuit_to_dag(circuit)
File "site-packages/qiskit/converters/circuit_to_dag.py", line 62, in circuit_to_dag
dagcircuit.apply_operation_back(instruction.copy(), qargs, cargs)
File "site-packages/qiskit/dagcircuit/dagcircuit.py", line 541, in apply_operation_back
self._check_condition(op.name, op.condition)
File "site-packages/qiskit/dagcircuit/dagcircuit.py", line 431, in _check_condition
raise DAGCircuitError("invalid creg in condition for %s" % name)
qiskit.dagcircuit.exceptions.DAGCircuitError: 'invalid creg in condition for if_else'
| 1,934 |
|||
Qiskit/qiskit | Qiskit__qiskit-7407 | 93a8172c3ca8fda510393087a861ef12d661906f | diff --git a/qiskit/circuit/library/standard_gates/equivalence_library.py b/qiskit/circuit/library/standard_gates/equivalence_library.py
--- a/qiskit/circuit/library/standard_gates/equivalence_library.py
+++ b/qiskit/circuit/library/standard_gates/equivalence_library.py
@@ -354,6 +354,13 @@
def_ry.append(RGate(theta, pi / 2), [q[0]], [])
_sel.add_equivalence(RYGate(theta), def_ry)
+q = QuantumRegister(1, "q")
+ry_to_rx = QuantumCircuit(q)
+ry_to_rx.sdg(0)
+ry_to_rx.rx(theta, 0)
+ry_to_rx.s(0)
+_sel.add_equivalence(RYGate(theta), ry_to_rx)
+
# CRYGate
#
# q_0: ────■──── q_0: ─────────────■────────────────■──
@@ -451,6 +458,20 @@
ryy_to_rzz.append(inst, qargs, cargs)
_sel.add_equivalence(RYYGate(theta), ryy_to_rzz)
+# RYY to RXX
+q = QuantumRegister(2, "q")
+theta = Parameter("theta")
+ryy_to_rxx = QuantumCircuit(q)
+for inst, qargs, cargs in [
+ (SdgGate(), [q[0]], []),
+ (SdgGate(), [q[1]], []),
+ (RXXGate(theta), [q[0], q[1]], []),
+ (SGate(), [q[0]], []),
+ (SGate(), [q[1]], []),
+]:
+ ryy_to_rxx.append(inst, qargs, cargs)
+_sel.add_equivalence(RYYGate(theta), ryy_to_rxx)
+
# RZGate
# global phase: -ϴ/2
# ┌───────┐ ┌───────┐
@@ -474,6 +495,13 @@
rz_to_sxry.sxdg(0)
_sel.add_equivalence(RZGate(theta), rz_to_sxry)
+q = QuantumRegister(1, "q")
+rz_to_rx = QuantumCircuit(q)
+rz_to_rx.h(0)
+rz_to_rx.rx(theta, 0)
+rz_to_rx.h(0)
+_sel.add_equivalence(RZGate(theta), rz_to_rx)
+
# CRZGate
#
# q_0: ────■──── q_0: ─────────────■────────────────■──
@@ -588,7 +616,6 @@
rzz_to_ryy.append(inst, qargs, cargs)
_sel.add_equivalence(RZZGate(theta), rzz_to_ryy)
-
# RZXGate
#
# ┌─────────┐
| Add translation to RX(X) basis to equivalence library
### What is the expected enhancement?
Allow using `RX` and `RXX` as basis gates by adding appropriate entries to the equivalence library:
* `RZ = H RX H` (and `RZZ = HH RXX HH`)
* `RY = Sdg RX S` (and equivalently for 2 qubits)
Currently this translation is not supported:
```python
>>> from qiskit import QuantumCircuit, transpile
>>> qc = QuantumCircuit(1)
>>> qc.ry(0.1, 0);
>>> transpile(qc, basis_gates=["rx", "h", "s", "sdg"])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/jul/Qiskit/qiskit-terra/qiskit/compiler/transpiler.py", line 319, in transpile
circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args)))
File "/Users/jul/Qiskit/qiskit-terra/qiskit/tools/parallel.py", line 132, in parallel_map
return [task(values[0], *task_args, **task_kwargs)]
File "/Users/jul/Qiskit/qiskit-terra/qiskit/compiler/transpiler.py", line 407, in _transpile_circuit
circuit, callback=transpile_config["callback"], output_name=transpile_config["output_name"]
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/passmanager.py", line 216, in run
return self._run_single_circuit(circuits, output_name, callback)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/passmanager.py", line 272, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 123, in run
dag = self._do_pass(pass_, dag, passset.options)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 154, in _do_pass
dag = self._run_this_pass(pass_, dag)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 166, in _run_this_pass
new_dag = pass_.run(dag)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/passes/basis/basis_translator.py", line 114, in run
"over library {}.".format(source_basis, target_basis, self._equiv_lib)
qiskit.transpiler.exceptions.TranspilerError: "Unable to map source basis {('ry', 1)} to target basis {'reset', 'snapshot', 's', 'delay', 'barrier', 'h', 'measure', 'sdg', 'rx'} over library <qiskit.circuit.equivalence.EquivalenceLibrary object at 0x7f9058c20a90>."
```
| `basis_gates=["rx", "rz"]` works.
So I agree that it should be enough to add the equivalence relations you have highlighted. | 2021-12-13T23:10:33Z | [] | [] |
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/jul/Qiskit/qiskit-terra/qiskit/compiler/transpiler.py", line 319, in transpile
circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_args)))
File "/Users/jul/Qiskit/qiskit-terra/qiskit/tools/parallel.py", line 132, in parallel_map
return [task(values[0], *task_args, **task_kwargs)]
File "/Users/jul/Qiskit/qiskit-terra/qiskit/compiler/transpiler.py", line 407, in _transpile_circuit
circuit, callback=transpile_config["callback"], output_name=transpile_config["output_name"]
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/passmanager.py", line 216, in run
return self._run_single_circuit(circuits, output_name, callback)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/passmanager.py", line 272, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 123, in run
dag = self._do_pass(pass_, dag, passset.options)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 154, in _do_pass
dag = self._run_this_pass(pass_, dag)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/runningpassmanager.py", line 166, in _run_this_pass
new_dag = pass_.run(dag)
File "/Users/jul/Qiskit/qiskit-terra/qiskit/transpiler/passes/basis/basis_translator.py", line 114, in run
"over library {}.".format(source_basis, target_basis, self._equiv_lib)
qiskit.transpiler.exceptions.TranspilerError: "Unable to map source basis {('ry', 1)} to target basis {'reset', 'snapshot', 's', 'delay', 'barrier', 'h', 'measure', 'sdg', 'rx'} over library <qiskit.circuit.equivalence.EquivalenceLibrary object at 0x7f9058c20a90>."
| 1,937 |
|||
Qiskit/qiskit | Qiskit__qiskit-8321 | e9f2a2b1975bce588a33a4675e8dcce3920b3493 | diff --git a/qiskit/visualization/latex.py b/qiskit/visualization/latex.py
--- a/qiskit/visualization/latex.py
+++ b/qiskit/visualization/latex.py
@@ -420,7 +420,7 @@ def _build_latex_array(self):
for node in layer:
op = node.op
num_cols_op = 1
- wire_list = [self._wire_map[qarg] for qarg in node.qargs]
+ wire_list = [self._wire_map[qarg] for qarg in node.qargs if qarg in self._qubits]
if op.condition:
self._add_condition(op, wire_list, column)
@@ -435,7 +435,9 @@ def _build_latex_array(self):
gate_text += get_param_str(op, "latex", ndigits=4)
gate_text = generate_latex_label(gate_text)
if node.cargs:
- cwire_list = [self._wire_map[carg] for carg in node.cargs]
+ cwire_list = [
+ self._wire_map[carg] for carg in node.cargs if carg in self._clbits
+ ]
else:
cwire_list = []
@@ -582,7 +584,7 @@ def _build_measure(self, node, col):
def _build_barrier(self, node, col):
"""Build a partial or full barrier if plot_barriers set"""
if self._plot_barriers:
- indexes = [self._wire_map[qarg] for qarg in node.qargs]
+ indexes = [self._wire_map[qarg] for qarg in node.qargs if qarg in self._qubits]
indexes.sort()
first = last = indexes[0]
for index in indexes[1:]:
diff --git a/qiskit/visualization/matplotlib.py b/qiskit/visualization/matplotlib.py
--- a/qiskit/visualization/matplotlib.py
+++ b/qiskit/visualization/matplotlib.py
@@ -574,15 +574,17 @@ def _get_coords(self, n_lines):
# get qubit index
q_indxs = []
for qarg in node.qargs:
- q_indxs.append(self._wire_map[qarg])
+ if qarg in self._qubits:
+ q_indxs.append(self._wire_map[qarg])
c_indxs = []
for carg in node.cargs:
- register = get_bit_register(self._circuit, carg)
- if register is not None and self._cregbundle:
- c_indxs.append(self._wire_map[register])
- else:
- c_indxs.append(self._wire_map[carg])
+ if carg in self._clbits:
+ register = get_bit_register(self._circuit, carg)
+ if register is not None and self._cregbundle:
+ c_indxs.append(self._wire_map[register])
+ else:
+ c_indxs.append(self._wire_map[carg])
# qubit coordinate
self._data[node]["q_xy"] = [
| mpl circ drawer fails with `idle_wires=False`
### Environment
- **Qiskit Terra version**: latest
- **Python version**:
- **Operating system**:
### What is happening?
```python
qc = QuantumCircuit(6, 5)
qc.x(5)
qc.h(range(6))
qc.cx(range(5),5)
qc.h(range(5))
qc.measure(range(5), range(5))
backend = provider.get_backend('ibm_geneva')
trans_qc = transpile(qc, backend, optimization_level=3, approximation_degree=0)
trans_qc.draw('mpl', idle_wires=False)
```
gives
Traceback (most recent call last):
Input In [9] in <cell line: 2>
trans_qc.draw('mpl', idle_wires=False)
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/circuit/quantumcircuit.py:1907 in draw
return circuit_drawer(
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/visualization/circuit_visualization.py:231 in circuit_drawer
image = _matplotlib_circuit_drawer(
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/visualization/circuit_visualization.py:601 in _matplotlib_circuit_drawer
return qcd.draw(filename)
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/visualization/matplotlib.py:323 in draw
max_anc = self._get_coords(n_lines)
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/visualization/matplotlib.py:579 in _get_coords
q_indxs.append(self._wire_map[qarg])
KeyError: Qubit(QuantumRegister(27, 'q'), 0)
### How can we reproduce the issue?
Do above
### What should happen?
should not fail
### Any suggestions?
_No response_
| A self-contained reproducer for assisting debugging:
```python
from qiskit import QuantumCircuit, transpile
from qiskit.providers.fake_provider import FakeKolkata
qc = QuantumCircuit(6, 5)
qc.x(5)
qc.h(range(6))
qc.cx(range(5),5)
qc.h(range(5))
qc.measure(range(5), range(5))
trans_qc = transpile(qc, FakeKolkata(), optimization_level=3, approximation_degree=0)
trans_qc.draw('mpl', idle_wires=False)
``` | 2022-07-11T16:27:45Z | [] | [] |
Traceback (most recent call last):
Input In [9] in <cell line: 2>
trans_qc.draw('mpl', idle_wires=False)
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/circuit/quantumcircuit.py:1907 in draw
return circuit_drawer(
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/visualization/circuit_visualization.py:231 in circuit_drawer
image = _matplotlib_circuit_drawer(
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/visualization/circuit_visualization.py:601 in _matplotlib_circuit_drawer
return qcd.draw(filename)
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/visualization/matplotlib.py:323 in draw
max_anc = self._get_coords(n_lines)
File /opt/miniconda3/envs/qiskit/lib/python3.10/site-packages/qiskit/visualization/matplotlib.py:579 in _get_coords
q_indxs.append(self._wire_map[qarg])
KeyError: Qubit(QuantumRegister(27, 'q'), 0)
| 2,047 |
|||
Qiskit/qiskit | Qiskit__qiskit-885 | 17fda19325960d1c17912bb34744f24f73f7469f | diff --git a/qiskit/_compositegate.py b/qiskit/_compositegate.py
--- a/qiskit/_compositegate.py
+++ b/qiskit/_compositegate.py
@@ -15,7 +15,7 @@
class CompositeGate(Gate):
"""Composite gate, a sequence of unitary gates."""
- def __init__(self, name, param, args, circuit=None):
+ def __init__(self, name, param, args, circuit=None, inverse_name=None):
"""Create a new composite gate.
name = instruction name string
@@ -26,6 +26,7 @@ def __init__(self, name, param, args, circuit=None):
super().__init__(name, param, args, circuit)
self.data = [] # gate sequence defining the composite unitary
self.inverse_flag = False
+ self.inverse_name = inverse_name or (name + 'dg')
def instruction_list(self):
"""Return a list of instructions for this CompositeGate.
diff --git a/qiskit/dagcircuit/_dagcircuit.py b/qiskit/dagcircuit/_dagcircuit.py
--- a/qiskit/dagcircuit/_dagcircuit.py
+++ b/qiskit/dagcircuit/_dagcircuit.py
@@ -1330,11 +1330,17 @@ def property_summary(self):
return summary
@staticmethod
- def fromQuantumCircuit(circuit):
- """Returns a DAGCircuit object from a QuantumCircuit
+ def fromQuantumCircuit(circuit, expand_gates=True):
+ """Build a ``DAGCircuit`` object from a ``QuantumCircuit``.
- None of the gates are expanded, i.e. the gates that are defined in the
- circuit are included in the gate basis.
+ Args:
+ circuit (QuantumCircuit): the input circuit.
+ expand_gates (bool): if ``False``, none of the gates are expanded,
+ i.e. the gates that are defined in the circuit are included in
+ the DAG basis.
+
+ Return:
+ DAGCircuit: the DAG representing the input circuit.
"""
dagcircuit = DAGCircuit()
dagcircuit.name = circuit.name
@@ -1367,10 +1373,12 @@ def fromQuantumCircuit(circuit):
# TODO: generate definitions and nodes for CompositeGates,
# for now simply drop their instructions into the DAG
instruction_list = []
- if isinstance(main_instruction, CompositeGate):
+ is_composite = isinstance(main_instruction, CompositeGate)
+ if is_composite and expand_gates:
instruction_list = main_instruction.instruction_list()
else:
instruction_list.append(main_instruction)
+
for instruction in instruction_list:
# Add OpenQASM built-in gates on demand
if instruction.name in builtins:
@@ -1390,7 +1398,15 @@ def fromQuantumCircuit(circuit):
control = None
else:
control = (instruction.control[0].name, instruction.control[1])
- dagcircuit.apply_operation_back(instruction.name, qargs, cargs,
+
+ if is_composite and not expand_gates:
+ is_inverse = instruction.inverse_flag
+ name = instruction.name if not is_inverse else instruction.inverse_name
+
+ else:
+ name = instruction.name
+
+ dagcircuit.apply_operation_back(name, qargs, cargs,
instruction.param,
control)
return dagcircuit
diff --git a/qiskit/tools/visualization/_circuit_visualization.py b/qiskit/tools/visualization/_circuit_visualization.py
--- a/qiskit/tools/visualization/_circuit_visualization.py
+++ b/qiskit/tools/visualization/_circuit_visualization.py
@@ -31,11 +31,10 @@
from matplotlib import get_backend as get_matplotlib_backend, \
patches as patches, pyplot as plt
-from qiskit._quantumcircuit import QuantumCircuit
-from qiskit._qiskiterror import QISKitError
-from qiskit.wrapper import load_qasm_file
-from qiskit.qasm import Qasm
-from qiskit.unroll import Unroller, JsonBackend
+from qiskit import QuantumCircuit, QISKitError, load_qasm_file
+
+from qiskit.dagcircuit import DAGCircuit
+from qiskit.transpiler import transpile
logger = logging.getLogger(__name__)
@@ -348,13 +347,8 @@ def generate_latex_source(circuit, filename=None,
Returns:
str: Latex string appropriate for writing to file.
"""
- ast = Qasm(data=circuit.qasm()).parse()
- if basis:
- # Split basis only if it is not the empty string.
- basis = basis.split(',')
- u = Unroller(ast, JsonBackend(basis))
- u.execute()
- json_circuit = u.backend.circuit
+ dag_circuit = DAGCircuit.fromQuantumCircuit(circuit, expand_gates=False)
+ json_circuit = transpile(dag_circuit, basis_gates=basis, format='json')
qcimg = QCircuitImage(json_circuit, scale, style=style)
latex = qcimg.latex()
if filename:
@@ -1391,7 +1385,7 @@ def __init__(self,
scale=1.0, style=None):
self._ast = None
- self._basis = basis.split(',')
+ self._basis = basis
self._scale = DEFAULT_SCALE * scale
self._creg = []
self._qreg = []
@@ -1421,14 +1415,12 @@ def __init__(self,
self.ax.tick_params(labelbottom='off', labeltop='off', labelleft='off', labelright='off')
def load_qasm_file(self, filename):
- circuit = load_qasm_file(filename, name='draw', basis_gates=','.join(self._basis))
+ circuit = load_qasm_file(filename, name='draw', basis_gates=self._basis)
self.parse_circuit(circuit)
def parse_circuit(self, circuit: QuantumCircuit):
- ast = Qasm(data=circuit.qasm()).parse()
- u = Unroller(ast, JsonBackend(self._basis))
- u.execute()
- self._ast = u.backend.circuit
+ dag_circuit = DAGCircuit.fromQuantumCircuit(circuit, expand_gates=False)
+ self._ast = transpile(dag_circuit, basis_gates=self._basis, format='json')
self._registers()
self._ops = self._ast['instructions']
| Using simulator instructions breaks latex circuit drawer
### What is the current behavior?
Using circuit simulator instructions (such as [`snapshot`](https://qiskit.org/documentation/_autodoc/qiskit._quantumcircuit.html?highlight=snapshot#qiskit._quantumcircuit.QuantumCircuit.snapshot)) cause those instructions to be added to the basis of the DAG, which in turn cause the circuit drawer to raise:
```sh
circuit_drawer(qc).show()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/tools/visualization/_circuit_visualization.py", line 77, in circuit_drawer
return latex_circuit_drawer(circuit, basis, scale, filename)
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/tools/visualization/_circuit_visualization.py", line 105, in latex_circuit_drawer
generate_latex_source(circuit, filename=tmppath, basis=basis, scale=scale)
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/tools/visualization/_circuit_visualization.py", line 177, in generate_latex_source
ast = Qasm(data=circuit.qasm()).parse()
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/qasm/_qasm.py", line 49, in parse
return qasm_p.parse(self._data)
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/qasm/_qasmparser.py", line 1065, in parse
self.parser.parse(data, lexer=self.lexer, debug=self.parse_deb)
File "/Users/salva/workspace/qiskit-sdk-py/.venv/lib/python3.6/site-packages/ply/yacc.py", line 333, in parse
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
File "/Users/salva/workspace/qiskit-sdk-py/.venv/lib/python3.6/site-packages/ply/yacc.py", line 1120, in parseopt_notrack
p.callable(pslice)
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/qasm/_qasmparser.py", line 651, in p_unitary_op_4
self.verify_as_gate(program[1], program[5], arglist=program[3])
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/qasm/_qasmparser.py", line 126, in verify_as_gate
+ "', line", str(obj.line), 'file', obj.file)
qiskit.qasm._qasmerror.QasmError: "Cannot find gate definition for 'snapshot', line 6 file "
```
### Steps to reproduce the problem
```python
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.tools.visualization import circuit_drawer
from qiskit.extensions import simulator
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.snapshot('0')
qc.measure(q, c)
circuit_drawer(qc).show()
```
### Expected behavior
| Also for the experiment.
@ajavadia can you provide the steps to reproduce and the expected behavior, please?
Since is in my bug I will. But it is pretty clear from the description.
```python
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.tools.visualization import circuit_drawer
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.snapshot('0')
qc.measure(q, c)
circuit_drawer(qc).show()
```
@jaygambetta this is the output I'm getting, since I and other contributors could not be used to quantum circuits representation, what's the expected behaviour?
![captura de pantalla 2018-08-07 a las 16 09 28](https://user-images.githubusercontent.com/757942/43781098-6bf81c1c-9a5c-11e8-9891-a5ea3ed569fe.png)
hmm no, it crashes. but I think I know why. In your corrections to my code could you add
```python
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.tools.visualization import circuit_drawer
from qiskit.extensions import simulator
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.snapshot('0')
qc.measure(q, c)
circuit_drawer(qc).show()
```
and it should give you the error. I think your code is just ignoring the snapshot.
QasmError: "Cannot find gate definition for 'snapshot', line 6 file "
Thank you, Jay, for fixing the steps to reproduce. With all the information in place, I'm completing the summary and assigning to myself.
@ajavadia what's the expected behaviour? It is "not crashing and displaying [the circuit](https://github.com/Qiskit/qiskit-terra/issues/732#issuecomment-411070422)" or should the snapshot be displayed in some way?
@delapuente the expected behavior is that it should not crash. The reason for the crash seems to be that these simulator instructions get added to the basis of DAGCircuit, but not recognized by the drawer or the device experiment.
Displaying the snapshot/barrier/etc. is tracked in a separate PR #731.
What happens is that the QASM DAG parser cannot verify that `snapshot` is a gate because it is not in the global symbol table. The QASM the parser is trying to parse is:
```
OPENQASM 2.0;
include "qelib1.inc";
qreg q0[1];
creg c0[1];
snapshot(0) q0[0];
```
Which is, indeed, incorrect since `snapshot` is not defined. The correct one would be:
```
OPENQASM 2.0;
include "qelib1.inc";
opaque snapshot(n) q;
qreg q0[1];
creg c0[1];
snapshot(0) q0[0];
```
Notice the `opaque` definition.
A quick workaround is to make the `qasm()` method for `snapshot` to emit a comment like `#snapshot(0) q0[0];` but this would cause the DAG not to include the `snapshot` node.
A more elaborated solution is adding support for the extensions to add their own definitions so the simulator extension can include the corresponding opaque.
Another solution could be to hardcode the opaque definition of `snapshot` in the `qelib1.inc` and in the basis.
For some reason, even if adding the opaque definition, the JSON unroller complains if the opaque is not in the basis.
@ajavadia @jaygambetta any preference?
@delapuente let's not change `qelib1.inc`.
I don't quite understand how the first solution would work. Why is this a qasm problem. There should be no qasm text generated, just circuit and dag.
Any gate not in the standard `qelib1.inc` should be dynamically added to the dag basis when the dag is created. The source of error here is that it is not found in the dag basis. Then I think we should teach the visualizer to plot snapshot the same way as it does barrier.
> The source of error here is that it is not found in the dag basis.
I think there is a misunderstanding on how circuit visualization run right now. DAGs are never used in the current implementation of visualizations. They use the JSON representation of the circuits but, instead of getting here through the DAG, they use the QASM AST:
https://github.com/Qiskit/qiskit-terra/blob/9fb88b035a47eee2ad8e57ddf184049c35e31933/qiskit/tools/visualization/_circuit_visualization.py#L349-L355
Anyway @ajavadia, you gave me a clue!
> Any gate not in the standard qelib1.inc should be dynamically added to the dag basis when the dag is created.
And I'm going to modify the JSON generation making it to use the `DagUnroller` instead of the QASM one.
Yeah that's a bad code, it's dumping qasm text, then reparsing it. Should use the DagUnroller. | 2018-09-10T18:53:12Z | [] | [] |
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/tools/visualization/_circuit_visualization.py", line 77, in circuit_drawer
return latex_circuit_drawer(circuit, basis, scale, filename)
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/tools/visualization/_circuit_visualization.py", line 105, in latex_circuit_drawer
generate_latex_source(circuit, filename=tmppath, basis=basis, scale=scale)
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/tools/visualization/_circuit_visualization.py", line 177, in generate_latex_source
ast = Qasm(data=circuit.qasm()).parse()
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/qasm/_qasm.py", line 49, in parse
return qasm_p.parse(self._data)
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/qasm/_qasmparser.py", line 1065, in parse
self.parser.parse(data, lexer=self.lexer, debug=self.parse_deb)
File "/Users/salva/workspace/qiskit-sdk-py/.venv/lib/python3.6/site-packages/ply/yacc.py", line 333, in parse
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
File "/Users/salva/workspace/qiskit-sdk-py/.venv/lib/python3.6/site-packages/ply/yacc.py", line 1120, in parseopt_notrack
p.callable(pslice)
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/qasm/_qasmparser.py", line 651, in p_unitary_op_4
self.verify_as_gate(program[1], program[5], arglist=program[3])
File "/Users/salva/workspace/qiskit-sdk-py/qiskit/qasm/_qasmparser.py", line 126, in verify_as_gate
+ "', line", str(obj.line), 'file', obj.file)
qiskit.qasm._qasmerror.QasmError: "Cannot find gate definition for 'snapshot', line 6 file "
| 2,112 |
|||
Qiskit/qiskit | Qiskit__qiskit-8978 | de8e4dd83627a63fb00f3c8f65bd1dd8bb7e2eac | diff --git a/qiskit/transpiler/passes/layout/vf2_layout.py b/qiskit/transpiler/passes/layout/vf2_layout.py
--- a/qiskit/transpiler/passes/layout/vf2_layout.py
+++ b/qiskit/transpiler/passes/layout/vf2_layout.py
@@ -154,6 +154,11 @@ def run(self, dag):
if len(cm_graph) == len(im_graph):
chosen_layout = layout
break
+ # If there is no error map avilable we can just skip the scoring stage as there
+ # is nothing to score with, so any match is the best we can find.
+ if not self.avg_error_map:
+ chosen_layout = layout
+ break
layout_score = vf2_utils.score_layout(
self.avg_error_map,
layout,
@@ -162,6 +167,12 @@ def run(self, dag):
im_graph,
self.strict_direction,
)
+ # If the layout score is 0 we can't do any better and we'll just
+ # waste time finding additional mappings that will at best match
+ # the performance, so exit early in this case
+ if layout_score == 0.0:
+ chosen_layout = layout
+ break
logger.debug("Trial %s has score %s", trials, layout_score)
if chosen_layout is None:
chosen_layout = layout
diff --git a/qiskit/transpiler/passes/layout/vf2_post_layout.py b/qiskit/transpiler/passes/layout/vf2_post_layout.py
--- a/qiskit/transpiler/passes/layout/vf2_post_layout.py
+++ b/qiskit/transpiler/passes/layout/vf2_post_layout.py
@@ -13,6 +13,7 @@
"""VF2PostLayout pass to find a layout after transpile using subgraph isomorphism"""
from enum import Enum
import logging
+import inspect
import time
from retworkx import PyDiGraph, vf2_mapping, PyGraph
@@ -131,6 +132,7 @@ def run(self, dag):
self.avg_error_map = vf2_utils.build_average_error_map(
self.target, self.properties, self.coupling_map
)
+
result = vf2_utils.build_interaction_graph(dag, self.strict_direction)
if result is None:
self.property_set["VF2PostLayout_stop_reason"] = VF2PostLayoutStopReason.MORE_THAN_2Q
@@ -138,21 +140,53 @@ def run(self, dag):
im_graph, im_graph_node_map, reverse_im_graph_node_map = result
if self.target is not None:
+ # If qargs is None then target is global and ideal so no
+ # scoring is needed
+ if self.target.qargs is None:
+ return
if self.strict_direction:
cm_graph = PyDiGraph(multigraph=False)
else:
cm_graph = PyGraph(multigraph=False)
- cm_graph.add_nodes_from(
- [self.target.operation_names_for_qargs((i,)) for i in range(self.target.num_qubits)]
- )
+ # If None is present in qargs there are globally defined ideal operations
+ # we should add these to all entries based on the number of qubits so we
+ # treat that as a valid operation even if there is no scoring for the
+ # strict direction case
+ global_ops = None
+ if None in self.target.qargs:
+ global_ops = {1: [], 2: []}
+ for op in self.target.operation_names_for_qargs(None):
+ operation = self.target.operation_for_name(op)
+ # If operation is a class this is a variable width ideal instruction
+ # so we treat it as available on both 1 and 2 qubits
+ if inspect.isclass(operation):
+ global_ops[1].append(op)
+ global_ops[2].append(op)
+ else:
+ num_qubits = operation.num_qubits
+ if num_qubits in global_ops:
+ global_ops[num_qubits].append(op)
+ op_names = []
+ for i in range(self.target.num_qubits):
+ entry = set()
+ try:
+ entry = set(self.target.operation_names_for_qargs((i,)))
+ except KeyError:
+ pass
+ if global_ops is not None:
+ entry.update(global_ops[1])
+ op_names.append(entry)
+
+ cm_graph.add_nodes_from(op_names)
for qargs in self.target.qargs:
len_args = len(qargs)
# If qargs == 1 we already populated it and if qargs > 2 there are no instructions
# using those in the circuit because we'd have already returned by this point
if len_args == 2:
- cm_graph.add_edge(
- qargs[0], qargs[1], self.target.operation_names_for_qargs(qargs)
- )
+ ops = set(self.target.operation_names_for_qargs(qargs))
+ if global_ops is not None:
+ ops.update(global_ops[2])
+ cm_graph.add_edge(qargs[0], qargs[1], ops)
cm_nodes = list(cm_graph.node_indexes())
else:
cm_graph, cm_nodes = vf2_utils.shuffle_coupling_graph(
diff --git a/qiskit/transpiler/passes/layout/vf2_utils.py b/qiskit/transpiler/passes/layout/vf2_utils.py
--- a/qiskit/transpiler/passes/layout/vf2_utils.py
+++ b/qiskit/transpiler/passes/layout/vf2_utils.py
@@ -84,13 +84,16 @@ def score_layout(avg_error_map, layout, bit_map, reverse_bit_map, im_graph, stri
fidelity = 1
for bit, node_index in bit_map.items():
gate_count = sum(im_graph[node_index].values())
- fidelity *= (1 - avg_error_map[(bits[bit],)]) ** gate_count
+ error_rate = avg_error_map.get((bits[bit],))
+ if error_rate is not None:
+ fidelity *= (1 - avg_error_map[(bits[bit],)]) ** gate_count
for edge in im_graph.edge_index_map().values():
gate_count = sum(edge[2].values())
qargs = (bits[reverse_bit_map[edge[0]]], bits[reverse_bit_map[edge[1]]])
if not strict_direction and qargs not in avg_error_map:
qargs = (qargs[1], qargs[0])
- fidelity *= (1 - avg_error_map[qargs]) ** gate_count
+ if qargs in avg_error_map:
+ fidelity *= (1 - avg_error_map[qargs]) ** gate_count
return 1 - fidelity
| VF2 layout fails with cryptic error message when instruction properties are not available
### Environment
- **Qiskit Terra version**: 0.22.0
- **Python version**: 3.9.12
- **Operating system**: Windows 10
### What is happening?
```
Traceback (most recent call last):
File ".\src\playground.py", line 87, in <module>
transpile(qc, target=target, optimization_level=2)
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 382, in transpile
_serial_transpile_circuit(
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 475, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 172, in _do_pass
dag = self._run_this_pass(pass_, dag)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 226, in _run_this_pass
pass_.run(FencedDAGCircuit(dag))
File ".\lib\site-packages\qiskit\transpiler\passes\layout\vf2_layout.py", line 157, in run
layout_score = vf2_utils.score_layout(
File ".\lib\site-packages\qiskit\transpiler\passes\layout\vf2_utils.py", line 87, in score_layout
fidelity *= (1 - avg_error_map[(bits[bit],)]) ** gate_count
KeyError: (5,)
```
### How can we reproduce the issue?
The below code will reproduce the error pasted above
```
from qiskit import QuantumCircuit
from qiskit.circuit.library import CXGate
from qiskit.compiler import transpile
from qiskit.transpiler import Target
n_qubits = 15
target = Target()
target.add_instruction(CXGate(), {(i, i+1): None for i in range(n_qubits - 1)})
qc = QuantumCircuit(2)
qc.cx(0, 1)
transpile(qc, target=target, optimization_level=2)
```
My understanding is, that the VF2 layout method tries to score the layout based on average error rates, but fails because instructions do not have properties hence no average error rate is available.
Note that the properties for `CXGate` instructions are specified as `None`, which is a valid value and indicates that any properties are not available. The documentation of `qiskit.transpiler.Target.add_instruction` says `Properties are optional for any instruction implementation, if there are no InstructionProperties available for the backend the value can be None.`.
Note, that `optimization_level=2` is important here. The error does not happen with lower optimization levels.
### What should happen?
I am not well aware of the full philosophy behind the design of VF2 layout method, but I think at least one of the following two options should be happening instead of the error pasted above:
- If layout scoring based on average error rates is a mandatory component of VF2 layout method, then it should raise a better explanatory error saying something like "Hey, you are trying to use a layout method that mandatorily requires average errors, but you do not have any instruction properties in your target, so please use other layout methods, e.g. 'dense', 'sabre', etc.".
- Else, the transpilation should not fail with any error and should produce properly transpiled result.
### Any suggestions?
_No response_
| Yeah, this is a bug `VF2Layout`, should operate even if there are no error rates present in the target. In those cases it just shouldn't do the heuristic scoring and just pick the first isomorphic subgraph it finds.
Just for posterity the `VF2PostLayout` pass is different and does require scores, but in that case it still shouldn't raise it should just exit if no error rates are present. | 2022-10-20T21:07:23Z | [] | [] |
Traceback (most recent call last):
File ".\src\playground.py", line 87, in <module>
transpile(qc, target=target, optimization_level=2)
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 382, in transpile
_serial_transpile_circuit(
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 475, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 172, in _do_pass
dag = self._run_this_pass(pass_, dag)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 226, in _run_this_pass
pass_.run(FencedDAGCircuit(dag))
File ".\lib\site-packages\qiskit\transpiler\passes\layout\vf2_layout.py", line 157, in run
layout_score = vf2_utils.score_layout(
File ".\lib\site-packages\qiskit\transpiler\passes\layout\vf2_utils.py", line 87, in score_layout
fidelity *= (1 - avg_error_map[(bits[bit],)]) ** gate_count
KeyError: (5,)
| 2,132 |
|||
Qiskit/qiskit | Qiskit__qiskit-8989 | de35b90ddb2f753066070c75a512291087a10968 | diff --git a/qiskit/algorithms/eigensolvers/vqd.py b/qiskit/algorithms/eigensolvers/vqd.py
--- a/qiskit/algorithms/eigensolvers/vqd.py
+++ b/qiskit/algorithms/eigensolvers/vqd.py
@@ -91,13 +91,16 @@ class VQD(VariationalAlgorithm, Eigensolver):
optimizer(Optimizer): A classical optimizer. Can either be a Qiskit optimizer or a callable
that takes an array as input and returns a Qiskit or SciPy optimization result.
k (int): the number of eigenvalues to return. Returns the lowest k eigenvalues.
- betas (list[float]): beta parameters in the VQD paper.
+ betas (list[float]): Beta parameters in the VQD paper.
Should have length k - 1, with k the number of excited states.
These hyper-parameters balance the contribution of each overlap term to the cost
function and have a default value computed as the mean square sum of the
coefficients of the observable.
+ initial point (list[float]): An optional initial point (i.e. initial parameter values)
+ for the optimizer. If ``None`` then VQD will look to the ansatz for a preferred
+ point and if not will simply compute a random one.
callback (Callable[[int, np.ndarray, float, dict[str, Any]], None] | None):
- a callback that can access the intermediate data
+ A callback that can access the intermediate data
during the optimization. Four parameter values are passed to the callback as
follows during each evaluation by the optimizer: the evaluation count,
the optimizer parameters for the ansatz, the estimated value, the estimation
@@ -124,16 +127,16 @@ def __init__(
ansatz: A parameterized circuit used as ansatz for the wave function.
optimizer: A classical optimizer. Can either be a Qiskit optimizer or a callable
that takes an array as input and returns a Qiskit or SciPy optimization result.
- k: the number of eigenvalues to return. Returns the lowest k eigenvalues.
- betas: beta parameters in the VQD paper.
+ k: The number of eigenvalues to return. Returns the lowest k eigenvalues.
+ betas: Beta parameters in the VQD paper.
Should have length k - 1, with k the number of excited states.
These hyperparameters balance the contribution of each overlap term to the cost
function and have a default value computed as the mean square sum of the
coefficients of the observable.
- initial_point: an optional initial point (i.e. initial parameter values)
+ initial_point: An optional initial point (i.e. initial parameter values)
for the optimizer. If ``None`` then VQD will look to the ansatz for a preferred
point and if not will simply compute a random one.
- callback: a callback that can access the intermediate data
+ callback: A callback that can access the intermediate data
during the optimization. Four parameter values are passed to the callback as
follows during each evaluation by the optimizer: the evaluation count,
the optimizer parameters for the ansatz, the estimated value,
@@ -238,11 +241,19 @@ def compute_eigenvalues(
if aux_operators is not None:
aux_values = []
+ # We keep a list of the bound circuits with optimal parameters, to avoid re-binding
+ # the same parameters to the ansatz if we do multiple steps
+ prev_states = []
+
for step in range(1, self.k + 1):
+ # update list of optimal circuits
+ if step > 1:
+ prev_states.append(self.ansatz.bind_parameters(result.optimal_points[-1]))
+
self._eval_count = 0
energy_evaluation = self._get_evaluate_energy(
- step, operator, betas, prev_states=result.optimal_parameters
+ step, operator, betas, prev_states=prev_states
)
start_time = time()
@@ -304,7 +315,7 @@ def _get_evaluate_energy(
step: int,
operator: BaseOperator | PauliSumOp,
betas: Sequence[float],
- prev_states: list[np.ndarray] | None = None,
+ prev_states: list[QuantumCircuit] | None = None,
) -> Callable[[np.ndarray], float | list[float]]:
"""Returns a function handle to evaluate the ansatz's energy for any given parameters.
This is the objective function to be passed to the optimizer that is used for evaluation.
@@ -312,7 +323,7 @@ def _get_evaluate_energy(
Args:
step: level of energy being calculated. 0 for ground, 1 for first excited state...
operator: The operator whose energy to evaluate.
- prev_states: List of parameters from previous rounds of optimization.
+ prev_states: List of optimal circuits from previous rounds of optimization.
Returns:
A callable that computes and returns the energy of the hamiltonian
@@ -336,10 +347,6 @@ def _get_evaluate_energy(
self._check_operator_ansatz(operator)
- prev_circs = []
- for state in range(step - 1):
- prev_circs.append(self.ansatz.bind_parameters(prev_states[state]))
-
def evaluate_energy(parameters: np.ndarray) -> np.ndarray | float:
try:
@@ -355,10 +362,9 @@ def evaluate_energy(parameters: np.ndarray) -> np.ndarray | float:
if step > 1:
# Compute overlap cost
fidelity_job = self.fidelity.run(
- [self.ansatz] * len(prev_circs),
- prev_circs,
- [parameters] * len(prev_circs),
- [prev_states[:-1]],
+ [self.ansatz] * (step - 1),
+ prev_states,
+ [parameters] * (step - 1),
)
costs = fidelity_job.result().fidelities
| VQD with primitives for k > 2 fails to validate parameter values
### Environment
- **Qiskit Terra version**: 0.22.0
- **Python version**: 3.9.6
- **Operating system**: Fedora 36
### What is happening?
When running `VQD` with primitives, the program fails to validate parameter values when calculating fidelities for k > 2. The ground and first excited states are found successfully, but when the algorithm gets to the second excited state, the sampler used for the fidelity fails to validate the parameters when `VQD` passes the parameters to the `run()` method of `ComputeUncompute`. This seems to have slipped past unit tests because those for `VQD` only test for k=1 and k=2.
### How can we reproduce the issue?
The following code reproduces the issue:
```
from qiskit.algorithms.eigensolvers import VQD
from qiskit.algorithms.state_fidelities import ComputeUncompute
from qiskit.primitives import Estimator, Sampler
from qiskit.algorithms.optimizers import COBYLA
from qiskit.circuit.library import RealAmplitudes
from qiskit.quantum_info import Pauli
from qiskit_aer import AerSimulator
backend = AerSimulator(method='statevector')
qubit_op = Pauli('ZZ')
ansatz = RealAmplitudes(2)
cobyla = COBYLA()
estimator = Estimator()
sampler = Sampler()
fidelity = ComputeUncompute(sampler=sampler)
betas = [2, 2]
def print_intermediate_result(eval_count, params, estimated_value, estimation_metadata, current_step):
print(f'eval count: {eval_count}, energy: {estimated_value}, step: {current_step}')
vqd_instance = VQD(estimator=estimator,
fidelity=fidelity,
ansatz=ansatz,
optimizer=cobyla,
k=3,
betas=betas,
initial_point=None,
callback=print_intermediate_result)
result = vqd_instance.compute_eigenvalues(operator=qubit_op)
print(result.eigenvalues)
```
This produces the following traceback:
```
capi_return is NULL
Call-back cb_calcfc_in__cobyla__user__routines failed.
Traceback (most recent call last):
File "/home/joel/Desktop/electronic-structure-methods/test/VQD/VQD_ bugtest.py", line 32, in <module>
result = vqd_instance.compute_eigenvalues(operator=qubit_op)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/algorithms/eigensolvers/vqd.py", line 256, in compute_eigenvalues
opt_result = self.optimizer.minimize(
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/algorithms/optimizers/scipy_optimizer.py", line 148, in minimize
raw_result = minimize(
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/scipy/optimize/_minimize.py", line 705, in minimize
res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/scipy/optimize/_cobyla_py.py", line 34, in wrapper
return func(*args, **kwargs)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/scipy/optimize/_cobyla_py.py", line 273, in _minimize_cobyla
xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/scipy/optimize/_cobyla_py.py", line 261, in calcfc
f = fun(np.copy(x), *args)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/algorithms/eigensolvers/vqd.py", line 363, in evaluate_energy
costs = fidelity_job.result().fidelities
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/primitives/primitive_job.py", line 50, in result
return self._future.result()
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/concurrent/futures/_base.py", line 438, in result
return self.__get_result()
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/concurrent/futures/_base.py", line 390, in __get_result
raise self._exception
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/concurrent/futures/thread.py", line 52, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/algorithms/state_fidelities/compute_uncompute.py", line 141, in _run
job = self._sampler.run(circuits=circuits, parameter_values=values, **opts.__dict__)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/primitives/base/base_sampler.py", line 172, in run
parameter_values = self._validate_parameter_values(
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/primitives/base/base_primitive.py", line 108, in _validate_parameter_values
raise TypeError("Invalid parameter values, expected Sequence[Sequence[float]].")
TypeError: Invalid parameter values, expected Sequence[Sequence[float]].
```
### What should happen?
Algorithm completes successfully.
### Any suggestions?
I am still looking into why this occurs, but my guess is that because the fidelity for the first excited state is computed successfully, there might be some error in the arguments that `VQD` passes to the `run()` method of `ComputeUncompute` on line 357 of `vqd.py` that is only problematic when `len(prev_circs)` > 1.
| @ElePT | 2022-10-25T12:34:02Z | [] | [] |
Traceback (most recent call last):
File "/home/joel/Desktop/electronic-structure-methods/test/VQD/VQD_ bugtest.py", line 32, in <module>
result = vqd_instance.compute_eigenvalues(operator=qubit_op)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/algorithms/eigensolvers/vqd.py", line 256, in compute_eigenvalues
opt_result = self.optimizer.minimize(
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/algorithms/optimizers/scipy_optimizer.py", line 148, in minimize
raw_result = minimize(
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/scipy/optimize/_minimize.py", line 705, in minimize
res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/scipy/optimize/_cobyla_py.py", line 34, in wrapper
return func(*args, **kwargs)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/scipy/optimize/_cobyla_py.py", line 273, in _minimize_cobyla
xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/scipy/optimize/_cobyla_py.py", line 261, in calcfc
f = fun(np.copy(x), *args)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/algorithms/eigensolvers/vqd.py", line 363, in evaluate_energy
costs = fidelity_job.result().fidelities
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/primitives/primitive_job.py", line 50, in result
return self._future.result()
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/concurrent/futures/_base.py", line 438, in result
return self.__get_result()
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/concurrent/futures/_base.py", line 390, in __get_result
raise self._exception
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/concurrent/futures/thread.py", line 52, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/algorithms/state_fidelities/compute_uncompute.py", line 141, in _run
job = self._sampler.run(circuits=circuits, parameter_values=values, **opts.__dict__)
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/primitives/base/base_sampler.py", line 172, in run
parameter_values = self._validate_parameter_values(
File "/home/joel/miniconda3/envs/Qiskit-0390/lib/python3.9/site-packages/qiskit/primitives/base/base_primitive.py", line 108, in _validate_parameter_values
raise TypeError("Invalid parameter values, expected Sequence[Sequence[float]].")
TypeError: Invalid parameter values, expected Sequence[Sequence[float]].
| 2,134 |
|||
Qiskit/qiskit | Qiskit__qiskit-8995 | 7db16a5a7f87666dee9e32164a4bbf42bf6a13ea | diff --git a/qiskit/transpiler/target.py b/qiskit/transpiler/target.py
--- a/qiskit/transpiler/target.py
+++ b/qiskit/transpiler/target.py
@@ -1043,24 +1043,25 @@ def target_to_backend_properties(target: Target):
continue
qubit = qargs[0]
props_list = []
- if props.error is not None:
- props_list.append(
- {
- "date": datetime.datetime.utcnow(),
- "name": "readout_error",
- "unit": "",
- "value": props.error,
- }
- )
- if props.duration is not None:
- props_list.append(
- {
- "date": datetime.datetime.utcnow(),
- "name": "readout_length",
- "unit": "s",
- "value": props.duration,
- }
- )
+ if props is not None:
+ if props.error is not None:
+ props_list.append(
+ {
+ "date": datetime.datetime.utcnow(),
+ "name": "readout_error",
+ "unit": "",
+ "value": props.error,
+ }
+ )
+ if props.duration is not None:
+ props_list.append(
+ {
+ "date": datetime.datetime.utcnow(),
+ "name": "readout_length",
+ "unit": "s",
+ "value": props.duration,
+ }
+ )
if not props_list:
qubit_props = {}
break
| Transpiling fails if the target does not specify properties for measurement instructions
### Environment
- **Qiskit Terra version**: 0.22.0
- **Python version**: 3.9.12
- **Operating system**: Windows 10
### What is happening?
```
Traceback (most recent call last):
File ".\src\playground.py", line 86, in <module>
qc_transpiled = transpile(qc, target=target)
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 327, in transpile
unique_transpile_args, shared_args = _parse_transpile_args(
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 646, in _parse_transpile_args
backend_properties = target_to_backend_properties(target)
File ".\lib\site-packages\qiskit\transpiler\target.py", line 1021, in target_to_backend_properties
if props.error is not None:
AttributeError: 'NoneType' object has no attribute 'error'
```
### How can we reproduce the issue?
When building a transpiler target, the properties for added instructions are optional. The documentation of `qiskit.transpiler.Target.add_instruction` says `Properties are optional for any instruction implementation, if there are no InstructionProperties available for the backend the value can be None.`. However, it seems this claim is true for all instructions except measurement.
The below code snippet shows how to construct a very simple target with one qubit and measurement instruction associated with it. The measurement instruction does not have any properties (i.e. it is specified as `None`). Then, this target is used to transpile a circuit, which results into the error pasted above
```
from qiskit import QuantumCircuit
from qiskit.circuit.library import Measure
from qiskit.compiler import transpile
from qiskit.transpiler import Target
target = Target()
target.add_instruction(Measure(), {(0,): None})
qc = QuantumCircuit(1, 1)
qc.measure(0, 0)
transpile(qc, target=target)
```
### What should happen?
The transpilation should not fail
### Any suggestions?
_No response_
| 2022-10-25T19:23:19Z | [] | [] |
Traceback (most recent call last):
File ".\src\playground.py", line 86, in <module>
qc_transpiled = transpile(qc, target=target)
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 327, in transpile
unique_transpile_args, shared_args = _parse_transpile_args(
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 646, in _parse_transpile_args
backend_properties = target_to_backend_properties(target)
File ".\lib\site-packages\qiskit\transpiler\target.py", line 1021, in target_to_backend_properties
if props.error is not None:
AttributeError: 'NoneType' object has no attribute 'error'
| 2,135 |
||||
Qiskit/qiskit | Qiskit__qiskit-8997 | 04fd2f2878f0cec162c65da7de3c50abc7d0bc00 | diff --git a/qiskit/transpiler/passes/layout/vf2_layout.py b/qiskit/transpiler/passes/layout/vf2_layout.py
--- a/qiskit/transpiler/passes/layout/vf2_layout.py
+++ b/qiskit/transpiler/passes/layout/vf2_layout.py
@@ -154,6 +154,11 @@ def run(self, dag):
if len(cm_graph) == len(im_graph):
chosen_layout = layout
break
+ # If there is no error map avilable we can just skip the scoring stage as there
+ # is nothing to score with, so any match is the best we can find.
+ if not self.avg_error_map:
+ chosen_layout = layout
+ break
layout_score = vf2_utils.score_layout(
self.avg_error_map,
layout,
@@ -162,6 +167,12 @@ def run(self, dag):
im_graph,
self.strict_direction,
)
+ # If the layout score is 0 we can't do any better and we'll just
+ # waste time finding additional mappings that will at best match
+ # the performance, so exit early in this case
+ if layout_score == 0.0:
+ chosen_layout = layout
+ break
logger.debug("Trial %s has score %s", trials, layout_score)
if chosen_layout is None:
chosen_layout = layout
diff --git a/qiskit/transpiler/passes/layout/vf2_post_layout.py b/qiskit/transpiler/passes/layout/vf2_post_layout.py
--- a/qiskit/transpiler/passes/layout/vf2_post_layout.py
+++ b/qiskit/transpiler/passes/layout/vf2_post_layout.py
@@ -13,6 +13,7 @@
"""VF2PostLayout pass to find a layout after transpile using subgraph isomorphism"""
from enum import Enum
import logging
+import inspect
import time
from retworkx import PyDiGraph, vf2_mapping, PyGraph
@@ -131,6 +132,7 @@ def run(self, dag):
self.avg_error_map = vf2_utils.build_average_error_map(
self.target, self.properties, self.coupling_map
)
+
result = vf2_utils.build_interaction_graph(dag, self.strict_direction)
if result is None:
self.property_set["VF2PostLayout_stop_reason"] = VF2PostLayoutStopReason.MORE_THAN_2Q
@@ -138,21 +140,53 @@ def run(self, dag):
im_graph, im_graph_node_map, reverse_im_graph_node_map = result
if self.target is not None:
+ # If qargs is None then target is global and ideal so no
+ # scoring is needed
+ if self.target.qargs is None:
+ return
if self.strict_direction:
cm_graph = PyDiGraph(multigraph=False)
else:
cm_graph = PyGraph(multigraph=False)
- cm_graph.add_nodes_from(
- [self.target.operation_names_for_qargs((i,)) for i in range(self.target.num_qubits)]
- )
+ # If None is present in qargs there are globally defined ideal operations
+ # we should add these to all entries based on the number of qubits so we
+ # treat that as a valid operation even if there is no scoring for the
+ # strict direction case
+ global_ops = None
+ if None in self.target.qargs:
+ global_ops = {1: [], 2: []}
+ for op in self.target.operation_names_for_qargs(None):
+ operation = self.target.operation_for_name(op)
+ # If operation is a class this is a variable width ideal instruction
+ # so we treat it as available on both 1 and 2 qubits
+ if inspect.isclass(operation):
+ global_ops[1].append(op)
+ global_ops[2].append(op)
+ else:
+ num_qubits = operation.num_qubits
+ if num_qubits in global_ops:
+ global_ops[num_qubits].append(op)
+ op_names = []
+ for i in range(self.target.num_qubits):
+ entry = set()
+ try:
+ entry = set(self.target.operation_names_for_qargs((i,)))
+ except KeyError:
+ pass
+ if global_ops is not None:
+ entry.update(global_ops[1])
+ op_names.append(entry)
+
+ cm_graph.add_nodes_from(op_names)
for qargs in self.target.qargs:
len_args = len(qargs)
# If qargs == 1 we already populated it and if qargs > 2 there are no instructions
# using those in the circuit because we'd have already returned by this point
if len_args == 2:
- cm_graph.add_edge(
- qargs[0], qargs[1], self.target.operation_names_for_qargs(qargs)
- )
+ ops = set(self.target.operation_names_for_qargs(qargs))
+ if global_ops is not None:
+ ops.update(global_ops[2])
+ cm_graph.add_edge(qargs[0], qargs[1], ops)
cm_nodes = list(cm_graph.node_indexes())
else:
cm_graph, cm_nodes = vf2_utils.shuffle_coupling_graph(
diff --git a/qiskit/transpiler/passes/layout/vf2_utils.py b/qiskit/transpiler/passes/layout/vf2_utils.py
--- a/qiskit/transpiler/passes/layout/vf2_utils.py
+++ b/qiskit/transpiler/passes/layout/vf2_utils.py
@@ -84,13 +84,16 @@ def score_layout(avg_error_map, layout, bit_map, reverse_bit_map, im_graph, stri
fidelity = 1
for bit, node_index in bit_map.items():
gate_count = sum(im_graph[node_index].values())
- fidelity *= (1 - avg_error_map[(bits[bit],)]) ** gate_count
+ error_rate = avg_error_map.get((bits[bit],))
+ if error_rate is not None:
+ fidelity *= (1 - avg_error_map[(bits[bit],)]) ** gate_count
for edge in im_graph.edge_index_map().values():
gate_count = sum(edge[2].values())
qargs = (bits[reverse_bit_map[edge[0]]], bits[reverse_bit_map[edge[1]]])
if not strict_direction and qargs not in avg_error_map:
qargs = (qargs[1], qargs[0])
- fidelity *= (1 - avg_error_map[qargs]) ** gate_count
+ if qargs in avg_error_map:
+ fidelity *= (1 - avg_error_map[qargs]) ** gate_count
return 1 - fidelity
| VF2 layout fails with cryptic error message when instruction properties are not available
### Environment
- **Qiskit Terra version**: 0.22.0
- **Python version**: 3.9.12
- **Operating system**: Windows 10
### What is happening?
```
Traceback (most recent call last):
File ".\src\playground.py", line 87, in <module>
transpile(qc, target=target, optimization_level=2)
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 382, in transpile
_serial_transpile_circuit(
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 475, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 172, in _do_pass
dag = self._run_this_pass(pass_, dag)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 226, in _run_this_pass
pass_.run(FencedDAGCircuit(dag))
File ".\lib\site-packages\qiskit\transpiler\passes\layout\vf2_layout.py", line 157, in run
layout_score = vf2_utils.score_layout(
File ".\lib\site-packages\qiskit\transpiler\passes\layout\vf2_utils.py", line 87, in score_layout
fidelity *= (1 - avg_error_map[(bits[bit],)]) ** gate_count
KeyError: (5,)
```
### How can we reproduce the issue?
The below code will reproduce the error pasted above
```
from qiskit import QuantumCircuit
from qiskit.circuit.library import CXGate
from qiskit.compiler import transpile
from qiskit.transpiler import Target
n_qubits = 15
target = Target()
target.add_instruction(CXGate(), {(i, i+1): None for i in range(n_qubits - 1)})
qc = QuantumCircuit(2)
qc.cx(0, 1)
transpile(qc, target=target, optimization_level=2)
```
My understanding is, that the VF2 layout method tries to score the layout based on average error rates, but fails because instructions do not have properties hence no average error rate is available.
Note that the properties for `CXGate` instructions are specified as `None`, which is a valid value and indicates that any properties are not available. The documentation of `qiskit.transpiler.Target.add_instruction` says `Properties are optional for any instruction implementation, if there are no InstructionProperties available for the backend the value can be None.`.
Note, that `optimization_level=2` is important here. The error does not happen with lower optimization levels.
### What should happen?
I am not well aware of the full philosophy behind the design of VF2 layout method, but I think at least one of the following two options should be happening instead of the error pasted above:
- If layout scoring based on average error rates is a mandatory component of VF2 layout method, then it should raise a better explanatory error saying something like "Hey, you are trying to use a layout method that mandatorily requires average errors, but you do not have any instruction properties in your target, so please use other layout methods, e.g. 'dense', 'sabre', etc.".
- Else, the transpilation should not fail with any error and should produce properly transpiled result.
### Any suggestions?
_No response_
| Yeah, this is a bug `VF2Layout`, should operate even if there are no error rates present in the target. In those cases it just shouldn't do the heuristic scoring and just pick the first isomorphic subgraph it finds.
Just for posterity the `VF2PostLayout` pass is different and does require scores, but in that case it still shouldn't raise it should just exit if no error rates are present. | 2022-10-25T21:51:38Z | [] | [] |
Traceback (most recent call last):
File ".\src\playground.py", line 87, in <module>
transpile(qc, target=target, optimization_level=2)
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 382, in transpile
_serial_transpile_circuit(
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 475, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File ".\lib\site-packages\qiskit\transpiler\passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 172, in _do_pass
dag = self._run_this_pass(pass_, dag)
File ".\lib\site-packages\qiskit\transpiler\runningpassmanager.py", line 226, in _run_this_pass
pass_.run(FencedDAGCircuit(dag))
File ".\lib\site-packages\qiskit\transpiler\passes\layout\vf2_layout.py", line 157, in run
layout_score = vf2_utils.score_layout(
File ".\lib\site-packages\qiskit\transpiler\passes\layout\vf2_utils.py", line 87, in score_layout
fidelity *= (1 - avg_error_map[(bits[bit],)]) ** gate_count
KeyError: (5,)
| 2,136 |
|||
Qiskit/qiskit | Qiskit__qiskit-9020 | 8eed4fa4a63fe36b7299364c42ffe1dfb144e146 | diff --git a/qiskit/transpiler/target.py b/qiskit/transpiler/target.py
--- a/qiskit/transpiler/target.py
+++ b/qiskit/transpiler/target.py
@@ -1043,24 +1043,25 @@ def target_to_backend_properties(target: Target):
continue
qubit = qargs[0]
props_list = []
- if props.error is not None:
- props_list.append(
- {
- "date": datetime.datetime.utcnow(),
- "name": "readout_error",
- "unit": "",
- "value": props.error,
- }
- )
- if props.duration is not None:
- props_list.append(
- {
- "date": datetime.datetime.utcnow(),
- "name": "readout_length",
- "unit": "s",
- "value": props.duration,
- }
- )
+ if props is not None:
+ if props.error is not None:
+ props_list.append(
+ {
+ "date": datetime.datetime.utcnow(),
+ "name": "readout_error",
+ "unit": "",
+ "value": props.error,
+ }
+ )
+ if props.duration is not None:
+ props_list.append(
+ {
+ "date": datetime.datetime.utcnow(),
+ "name": "readout_length",
+ "unit": "s",
+ "value": props.duration,
+ }
+ )
if not props_list:
qubit_props = {}
break
| Transpiling fails if the target does not specify properties for measurement instructions
### Environment
- **Qiskit Terra version**: 0.22.0
- **Python version**: 3.9.12
- **Operating system**: Windows 10
### What is happening?
```
Traceback (most recent call last):
File ".\src\playground.py", line 86, in <module>
qc_transpiled = transpile(qc, target=target)
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 327, in transpile
unique_transpile_args, shared_args = _parse_transpile_args(
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 646, in _parse_transpile_args
backend_properties = target_to_backend_properties(target)
File ".\lib\site-packages\qiskit\transpiler\target.py", line 1021, in target_to_backend_properties
if props.error is not None:
AttributeError: 'NoneType' object has no attribute 'error'
```
### How can we reproduce the issue?
When building a transpiler target, the properties for added instructions are optional. The documentation of `qiskit.transpiler.Target.add_instruction` says `Properties are optional for any instruction implementation, if there are no InstructionProperties available for the backend the value can be None.`. However, it seems this claim is true for all instructions except measurement.
The below code snippet shows how to construct a very simple target with one qubit and measurement instruction associated with it. The measurement instruction does not have any properties (i.e. it is specified as `None`). Then, this target is used to transpile a circuit, which results into the error pasted above
```
from qiskit import QuantumCircuit
from qiskit.circuit.library import Measure
from qiskit.compiler import transpile
from qiskit.transpiler import Target
target = Target()
target.add_instruction(Measure(), {(0,): None})
qc = QuantumCircuit(1, 1)
qc.measure(0, 0)
transpile(qc, target=target)
```
### What should happen?
The transpilation should not fail
### Any suggestions?
_No response_
| 2022-10-28T05:28:33Z | [] | [] |
Traceback (most recent call last):
File ".\src\playground.py", line 86, in <module>
qc_transpiled = transpile(qc, target=target)
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 327, in transpile
unique_transpile_args, shared_args = _parse_transpile_args(
File ".\lib\site-packages\qiskit\compiler\transpiler.py", line 646, in _parse_transpile_args
backend_properties = target_to_backend_properties(target)
File ".\lib\site-packages\qiskit\transpiler\target.py", line 1021, in target_to_backend_properties
if props.error is not None:
AttributeError: 'NoneType' object has no attribute 'error'
| 2,143 |
||||
Qiskit/qiskit | Qiskit__qiskit-9076 | 599b663e694f01f514aebbc16c1f13ebd6dade78 | diff --git a/qiskit/primitives/backend_estimator.py b/qiskit/primitives/backend_estimator.py
--- a/qiskit/primitives/backend_estimator.py
+++ b/qiskit/primitives/backend_estimator.py
@@ -147,6 +147,9 @@ def __new__( # pylint: disable=signature-differs
self = super().__new__(cls)
return self
+ def __getnewargs__(self):
+ return (self._backend,)
+
@property
def transpile_options(self) -> Options:
"""Return the transpiler options for transpiling the circuits."""
diff --git a/qiskit/primitives/backend_sampler.py b/qiskit/primitives/backend_sampler.py
--- a/qiskit/primitives/backend_sampler.py
+++ b/qiskit/primitives/backend_sampler.py
@@ -23,10 +23,10 @@
from qiskit.result import QuasiDistribution, Result
from qiskit.transpiler.passmanager import PassManager
+from .backend_estimator import _prepare_counts, _run_circuits
from .base import BaseSampler, SamplerResult
from .primitive_job import PrimitiveJob
from .utils import _circuit_key
-from .backend_estimator import _run_circuits, _prepare_counts
class BackendSampler(BaseSampler):
@@ -83,6 +83,9 @@ def __new__( # pylint: disable=signature-differs
self = super().__new__(cls)
return self
+ def __getnewargs__(self):
+ return (self._backend,)
+
@property
def preprocessed_circuits(self) -> list[QuantumCircuit]:
"""
| Backend based primitives are not serializable via dill
### Environment
- **Qiskit Terra version**: 0.22
- **Python version**: 3.7
- **Operating system**: Any
### What is happening?
Backend based primitives can't be saved via dill. Reference primitives can be loaded/saved. `QuantumInstance` is also serializable. This issue comes from Qiskit Machine Learning where quantum models can be saved to a file, then loaded to continue training or for inference on a real hardware.
### How can we reproduce the issue?
Run a script
```python
import dill
from qiskit import Aer
from qiskit.primitives import BackendSampler
sampler = BackendSampler(Aer.get_backend("aer_simulator"))
with open("sampler.dill", "wb") as f:
dill.dump(sampler, f)
with open("sampler.dill", "rb") as f:
sampler = dill.load(f)
```
### What should happen?
An exception is raised:
```
python __dill_primitives.py
Traceback (most recent call last):
File "__dill_primitives.py", line 10, in <module>
sampler = dill.load(f)
File ".../envs/dev-qml/lib/site-packages/dill/_dill.py", line 313, in load
return Unpickler(file, ignore=ignore, **kwds).load()
File ".../envs/dev-qml/lib/site-packages/dill/_dill.py", line 525, in load
obj = StockUnpickler.load(self)
TypeError: __new__() missing 1 required positional argument: 'backend'
```
### Any suggestions?
No exceptions must be raised.
| Adding the following method to BackendSampler should fix this issue.
```python
def __getnewargs__(self):
return self._backend,
```
This should be fixed in Terra, but Terra 0.22.1 has been just released...
Workaround
```python
def __getnewargs__(self):
return self._backend,
BackendSampler.__getnewargs__ = __getnewargs__
```
How come `BaseSampler` and `BaseEstimator` are overriding `__new__`? They don't need to influence the object-creation semantics - it looks to me like everything going on in that method is just instance initialisation, which should be done in `__init__`. The `__new__` override is the underlying cause of the failure to de-pickle here - derived classes from the primitives shouldn't be needing to override the `__new__` method to allow themselves to change the `__init__` signature.
@jakelishman It is legacy and will removed after the deprecation period, but the original introduction reason was a pre-init hook.
That still sounds odd, unless the primitives API is asserting that the `__init__` methods of subclasses have to have a particular signature, which would be very unusual (and make these `BackendSampler` etc classes invalid). I'm not sure I understand why that couldn't just have happened at the start of `BaseSampler.__init__` - it's the subclass's responsibility to ensure that that base class is initialised correctly (almost invariably by calling `super().__init__(...)` at some point, for sensible subclasses, and it looks like the primitives would fit this bill).
Independent of the mechanics of getting the class usable with pickle, as a general rule `Backend` objects are not guaranteed to be serializeable, this is part (but not the only reason) of why things like the transpiler do not pass backends around because they have to work in a multiprocessing context which implies they'd be serializable. This is because most backends contain inherently unserializable objects like handles to async remote execution, authorized sessions, compiled modules, etc.
Once I realized that the backend primitives are not serializable that was a call to re-consider model serialization in QML. I was suspecting that despite now I can serialize a backend instance, it is unnatural and a backend might not be serializable. You just confirmed that. Thanks. | 2022-11-04T13:46:52Z | [] | [] |
Traceback (most recent call last):
File "__dill_primitives.py", line 10, in <module>
sampler = dill.load(f)
File ".../envs/dev-qml/lib/site-packages/dill/_dill.py", line 313, in load
return Unpickler(file, ignore=ignore, **kwds).load()
File ".../envs/dev-qml/lib/site-packages/dill/_dill.py", line 525, in load
obj = StockUnpickler.load(self)
TypeError: __new__() missing 1 required positional argument: 'backend'
| 2,150 |
|||
Qiskit/qiskit | Qiskit__qiskit-9101 | 27da80d03f112b6225c80038e37169577bb8acd2 | diff --git a/qiskit/algorithms/eigensolvers/numpy_eigensolver.py b/qiskit/algorithms/eigensolvers/numpy_eigensolver.py
--- a/qiskit/algorithms/eigensolvers/numpy_eigensolver.py
+++ b/qiskit/algorithms/eigensolvers/numpy_eigensolver.py
@@ -20,11 +20,12 @@
from scipy import sparse as scisparse
from qiskit.opflow import PauliSumOp
+from qiskit.quantum_info import SparsePauliOp, Statevector
from qiskit.quantum_info.operators.base_operator import BaseOperator
-from qiskit.quantum_info import Statevector
from qiskit.utils.validation import validate_min
from .eigensolver import Eigensolver, EigensolverResult
+from ..exceptions import AlgorithmError
from ..list_or_dict import ListOrDict
logger = logging.getLogger(__name__)
@@ -53,12 +54,12 @@ def __init__(
) -> None:
"""
Args:
- k: number of eigenvalues are to be computed, with a min. value of 1.
- filter_criterion: callable that allows to filter eigenvalues/eigenstates. Only feasible
+ k: Number of eigenvalues are to be computed, with a minimum value of 1.
+ filter_criterion: Callable that allows to filter eigenvalues/eigenstates. Only feasible
eigenstates are returned in the results. The callable has the signature
``filter(eigenstate, eigenvalue, aux_values)`` and must return a boolean to indicate
whether to keep this value in the final returned result or not. If the number of
- elements that satisfies the criterion is smaller than `k`, then the returned list will
+ elements that satisfies the criterion is smaller than ``k``, then the returned list will
have fewer elements and can even be empty.
"""
validate_min("k", k, 1)
@@ -69,8 +70,6 @@ def __init__(
self._filter_criterion = filter_criterion
- self._ret = NumPyEigensolverResult()
-
@property
def k(self) -> int:
"""Return k (number of eigenvalues requested)."""
@@ -109,15 +108,30 @@ def _check_set_k(self, operator: BaseOperator | PauliSumOp) -> None:
else:
self._k = self._in_k
- def _solve(self, operator: BaseOperator | PauliSumOp) -> None:
+ def _solve(self, operator: BaseOperator | PauliSumOp) -> tuple[np.ndarray, np.ndarray]:
if isinstance(operator, PauliSumOp):
- sp_mat = operator.to_spmatrix()
+ op_matrix = operator.to_spmatrix()
+ else:
+ try:
+ op_matrix = operator.to_matrix(sparse=True)
+ except TypeError:
+ logger.debug(
+ "WARNING: operator of type `%s` does not support sparse matrices. "
+ "Trying dense computation",
+ type(operator),
+ )
+ try:
+ op_matrix = operator.to_matrix()
+ except AttributeError as ex:
+ raise AlgorithmError(f"Unsupported operator type `{type(operator)}`.") from ex
+
+ if isinstance(op_matrix, scisparse.csr_matrix):
# If matrix is diagonal, the elements on the diagonal are the eigenvalues. Solve by sorting.
- if scisparse.csr_matrix(sp_mat.diagonal()).nnz == sp_mat.nnz:
- diag = sp_mat.diagonal()
+ if scisparse.csr_matrix(op_matrix.diagonal()).nnz == op_matrix.nnz:
+ diag = op_matrix.diagonal()
indices = np.argsort(diag)[: self._k]
eigval = diag[indices]
- eigvec = np.zeros((sp_mat.shape[0], self._k))
+ eigvec = np.zeros((op_matrix.shape[0], self._k))
for i, idx in enumerate(indices):
eigvec[idx, i] = 1.0
else:
@@ -125,53 +139,33 @@ def _solve(self, operator: BaseOperator | PauliSumOp) -> None:
logger.debug(
"SciPy doesn't support to get all eigenvalues, using NumPy instead."
)
- if operator.is_hermitian():
- eigval, eigvec = np.linalg.eigh(operator.to_matrix())
- else:
- eigval, eigvec = np.linalg.eig(operator.to_matrix())
+ eigval, eigvec = self._solve_dense(operator.to_matrix())
else:
- if operator.is_hermitian():
- eigval, eigvec = scisparse.linalg.eigsh(sp_mat, k=self._k, which="SA")
- else:
- eigval, eigvec = scisparse.linalg.eigs(sp_mat, k=self._k, which="SR")
-
- indices = np.argsort(eigval)[: self._k]
- eigval = eigval[indices]
- eigvec = eigvec[:, indices]
+ eigval, eigvec = self._solve_sparse(op_matrix, self._k)
else:
- logger.debug("SciPy not supported, using NumPy instead.")
-
- if operator.data.all() == operator.data.conj().T.all():
- eigval, eigvec = np.linalg.eigh(operator.data)
- else:
- eigval, eigvec = np.linalg.eig(operator.data)
-
- indices = np.argsort(eigval)[: self._k]
- eigval = eigval[indices]
- eigvec = eigvec[:, indices]
-
- self._ret.eigenvalues = eigval
- self._ret.eigenstates = eigvec.T
+ # Sparse SciPy matrix not supported, use dense NumPy computation.
+ eigval, eigvec = self._solve_dense(operator.to_matrix())
- def _get_ground_state_energy(self, operator: BaseOperator | PauliSumOp) -> None:
- if self._ret.eigenvalues is None or self._ret.eigenstates is None:
- self._solve(operator)
+ indices = np.argsort(eigval)[: self._k]
+ eigval = eigval[indices]
+ eigvec = eigvec[:, indices]
+ return eigval, eigvec.T
- def _get_energies(
- self,
- operator: BaseOperator | PauliSumOp,
- aux_operators: ListOrDict[BaseOperator | PauliSumOp] | None,
- ) -> None:
- if self._ret.eigenvalues is None or self._ret.eigenstates is None:
- self._solve(operator)
+ @staticmethod
+ def _solve_sparse(op_matrix: scisparse.csr_matrix, k: int) -> tuple[np.ndarray, np.ndarray]:
+ if (op_matrix != op_matrix.H).nnz == 0:
+ # Operator is Hermitian
+ return scisparse.linalg.eigsh(op_matrix, k=k, which="SA")
+ else:
+ return scisparse.linalg.eigs(op_matrix, k=k, which="SR")
- if aux_operators is not None:
- aux_op_vals = []
- for i in range(self._k):
- aux_op_vals.append(
- self._eval_aux_operators(aux_operators, self._ret.eigenstates[i])
- )
- self._ret.aux_operators_evaluated = aux_op_vals
+ @staticmethod
+ def _solve_dense(op_matrix: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
+ if op_matrix.all() == op_matrix.conj().T.all():
+ # Operator is Hermitian
+ return np.linalg.eigh(op_matrix)
+ else:
+ return np.linalg.eig(op_matrix)
@staticmethod
def _eval_aux_operators(
@@ -190,25 +184,42 @@ def _eval_aux_operators(
else:
values = {}
key_op_iterator = aux_operators.items()
+
for key, operator in key_op_iterator:
if operator is None:
continue
- value = 0.0
+
+ if operator.num_qubits is None or operator.num_qubits < 1:
+ logger.info(
+ "The number of qubits of the %s operator must be greater than zero.", key
+ )
+ continue
+
+ op_matrix = None
if isinstance(operator, PauliSumOp):
if operator.coeff != 0:
- mat = operator.to_spmatrix()
- # Terra doesn't support sparse yet, so do the matmul directly if so
- # This is necessary for the particle_hole and other chemistry tests because the
- # pauli conversions are 2^12th large and will OOM error if not sparse.
- if isinstance(mat, scisparse.spmatrix):
- value = mat.dot(wavefn).dot(np.conj(wavefn))
- else:
- value = (
- Statevector(wavefn).expectation_value(operator.primitive)
- * operator.coeff
- )
+ op_matrix = operator.to_spmatrix()
else:
+ try:
+ op_matrix = operator.to_matrix(sparse=True)
+ except TypeError:
+ logger.debug(
+ "WARNING: operator of type `%s` does not support sparse matrices. "
+ "Trying dense computation",
+ type(operator),
+ )
+ try:
+ op_matrix = operator.to_matrix()
+ except AttributeError as ex:
+ raise AlgorithmError(f"Unsupported operator type {type(operator)}.") from ex
+
+ if isinstance(op_matrix, scisparse.csr_matrix):
+ value = op_matrix.dot(wavefn).dot(np.conj(wavefn))
+ elif isinstance(op_matrix, np.ndarray):
value = Statevector(wavefn).expectation_value(operator)
+ else:
+ value = 0.0
+
value = value if np.abs(value) > threshold else 0.0
# The value gets wrapped into a tuple: (mean, metadata).
# The metadata includes variance (and, for other eigensolvers, shots).
@@ -225,8 +236,12 @@ def compute_eigenvalues(
super().compute_eigenvalues(operator, aux_operators)
+ if operator.num_qubits is None or operator.num_qubits < 1:
+ raise AlgorithmError("The number of qubits of the operator must be greater than zero.")
+
self._check_set_k(operator)
- zero_op = PauliSumOp.from_list([("I", 1)]).tensorpower(operator.num_qubits) * 0.0
+
+ zero_op = SparsePauliOp(["I" * operator.num_qubits], coeffs=[0.0])
if isinstance(aux_operators, list) and len(aux_operators) > 0:
# For some reason Chemistry passes aux_ops with 0 qubits and paulis sometimes.
aux_operators = [zero_op if op == 0 else op for op in aux_operators]
@@ -244,49 +259,51 @@ def compute_eigenvalues(
# need to consider all elements if a filter is set
self._k = 2**operator.num_qubits
- self._ret = NumPyEigensolverResult()
- self._solve(operator)
+ eigvals, eigvecs = self._solve(operator)
# compute energies before filtering, as this also evaluates the aux operators
- self._get_energies(operator, aux_operators)
+ if aux_operators is not None:
+ aux_op_vals = [
+ self._eval_aux_operators(aux_operators, eigvecs[i]) for i in range(self._k)
+ ]
+ else:
+ aux_op_vals = None
# if a filter is set, loop over the given values and only keep
if self._filter_criterion:
-
- eigvecs = []
- eigvals = []
- aux_ops = []
- cnt = 0
- for i in range(len(self._ret.eigenvalues)):
- eigvec = self._ret.eigenstates[i]
- eigval = self._ret.eigenvalues[i]
- if self._ret.aux_operators_evaluated is not None:
- aux_op = self._ret.aux_operators_evaluated[i]
+ filt_eigvals = []
+ filt_eigvecs = []
+ filt_aux_op_vals = []
+ count = 0
+ for i, (eigval, eigvec) in enumerate(zip(eigvals, eigvecs)):
+ if aux_op_vals is not None:
+ aux_op_val = aux_op_vals[i]
else:
- aux_op = None
- if self._filter_criterion(eigvec, eigval, aux_op):
- cnt += 1
- eigvecs += [eigvec]
- eigvals += [eigval]
- if self._ret.aux_operators_evaluated is not None:
- aux_ops += [aux_op]
- if cnt == k_orig:
+ aux_op_val = None
+
+ if self._filter_criterion(eigvec, eigval, aux_op_val):
+ count += 1
+ filt_eigvecs.append(eigvec)
+ filt_eigvals.append(eigval)
+ if aux_op_vals is not None:
+ filt_aux_op_vals.append(aux_op_val)
+
+ if count == k_orig:
break
- self._ret.eigenstates = np.array(eigvecs)
- self._ret.eigenvalues = np.array(eigvals)
- # conversion to np.array breaks in case of aux_ops
- self._ret.aux_operators_evaluated = aux_ops
+ eigvals = np.array(filt_eigvals)
+ eigvecs = np.array(filt_eigvecs)
+ aux_op_vals = filt_aux_op_vals
self._k = k_orig
- # evaluate ground state after filtering (in case a filter is set)
- self._get_ground_state_energy(operator)
- if self._ret.eigenstates is not None:
- self._ret.eigenstates = [Statevector(vec) for vec in self._ret.eigenstates]
+ result = NumPyEigensolverResult()
+ result.eigenvalues = eigvals
+ result.eigenstates = [Statevector(vec) for vec in eigvecs]
+ result.aux_operators_evaluated = aux_op_vals
- logger.debug("NumpyEigensolverResult:\n%s", self._ret)
- return self._ret
+ logger.debug("NumpyEigensolverResult:\n%s", result)
+ return result
class NumPyEigensolverResult(EigensolverResult):
diff --git a/qiskit/algorithms/minimum_eigensolvers/numpy_minimum_eigensolver.py b/qiskit/algorithms/minimum_eigensolvers/numpy_minimum_eigensolver.py
--- a/qiskit/algorithms/minimum_eigensolvers/numpy_minimum_eigensolver.py
+++ b/qiskit/algorithms/minimum_eigensolvers/numpy_minimum_eigensolver.py
@@ -43,7 +43,7 @@ def __init__(
) -> None:
"""
Args:
- filter_criterion: callable that allows to filter eigenvalues/eigenstates. The minimum
+ filter_criterion: Callable that allows to filter eigenvalues/eigenstates. The minimum
eigensolver is only searching over feasible states and returns an eigenstate that
has the smallest eigenvalue among feasible states. The callable has the signature
``filter(eigenstate, eigenvalue, aux_values)`` and must return a boolean to indicate
diff --git a/qiskit/quantum_info/operators/operator.py b/qiskit/quantum_info/operators/operator.py
--- a/qiskit/quantum_info/operators/operator.py
+++ b/qiskit/quantum_info/operators/operator.py
@@ -470,6 +470,10 @@ def reverse_qargs(self):
ret._op_shape = self._op_shape.reverse()
return ret
+ def to_matrix(self):
+ """Convert operator to NumPy matrix."""
+ return self.data
+
@classmethod
def _einsum_matmul(cls, tensor, mat, indices, shift=0, right_mul=False):
"""Perform a contraction using Numpy.einsum
| `NumPyEigensolver` does not support all `BaseOperator` instances.
### Environment
- **Qiskit Terra version**: 3ce1737b
- **Python version**: 3.10.6
- **Operating system**: macOS 13.0
### What is happening?
`NumPyEigensolver` and by extension `NumPyMinimumEigensolver` do not support all `BaseOperator` instances, in particular `SparsePauliOp`. This is because it requires the `data` attribute, which `Operator` exposes but e.g. `SparsePauliOp` does not.
### How can we reproduce the issue?
```python
from qiskit.algorithms.eigensolvers import NumPyEigensolver
from qiskit.quantum_info import SparsePauliOp
op = SparsePauliOp("Z")
npe = NumPyEigensolver()
result = npe.compute_eigenvalues(op)
```
Output:
```
Traceback (most recent call last):
File "/Users/declanmillar/Projects/qiskit/qiskit-terra/test.py", line 10, in <module>
result = npe.compute_eigenvalues(op)
File "/Users/declanmillar/Projects/qiskit/qiskit-terra/qiskit/algorithms/eigensolvers/numpy_eigensolver.py", line 248, in compute_eigenvalues
self._solve(operator)
File "/Users/declanmillar/Projects/qiskit/qiskit-terra/qiskit/algorithms/eigensolvers/numpy_eigensolver.py", line 144, in _solve
if operator.data.all() == operator.data.conj().T.all():
AttributeError: 'SparsePauliOp' object has no attribute 'data'
```
### What should happen?
The code should compute the eigenvalues without error, similarly to:
```python
from qiskit.algorithms.eigensolvers import NumPyEigensolver
from qiskit.quantum_info import SparsePauliOp
from qiskit.opflow import PauliSumOp
op = SparsePauliOp("Z")
op = PauliSumOp(op)
npe = NumPyEigensolver()
result = npe.compute_eigenvalues(op)
```
or:
```python
from qiskit.algorithms.eigensolvers import NumPyEigensolver
from qiskit.quantum_info import SparsePauliOp, Operator
op = SparsePauliOp("Z")
op = Operator(op.to_matrix())
npe = NumPyEigensolver()
result = npe.compute_eigenvalues(op)
```
### Any suggestions?
Generalize the computation inside `NumPyEigensolver` to avoid the `data` attribute, perhaps via `to_matrix()` or, if possible, a sparse implementation that works for `BaseOperator` instances.
| I'm happy to work on this issue myself. | 2022-11-08T16:48:24Z | [] | [] |
Traceback (most recent call last):
File "/Users/declanmillar/Projects/qiskit/qiskit-terra/test.py", line 10, in <module>
result = npe.compute_eigenvalues(op)
File "/Users/declanmillar/Projects/qiskit/qiskit-terra/qiskit/algorithms/eigensolvers/numpy_eigensolver.py", line 248, in compute_eigenvalues
self._solve(operator)
File "/Users/declanmillar/Projects/qiskit/qiskit-terra/qiskit/algorithms/eigensolvers/numpy_eigensolver.py", line 144, in _solve
if operator.data.all() == operator.data.conj().T.all():
AttributeError: 'SparsePauliOp' object has no attribute 'data'
| 2,159 |
|||
Qiskit/qiskit | Qiskit__qiskit-9149 | fbf5284510b59909e2ddb14ad0121be9777892e0 | diff --git a/qiskit/primitives/utils.py b/qiskit/primitives/utils.py
--- a/qiskit/primitives/utils.py
+++ b/qiskit/primitives/utils.py
@@ -12,9 +12,10 @@
"""
Utility functions for primitives
"""
-
from __future__ import annotations
+import numpy as np
+
from qiskit.circuit import Instruction, ParameterExpression, QuantumCircuit
from qiskit.circuit.bit import Bit
from qiskit.extensions.quantum_initializer.initializer import Initialize
@@ -146,7 +147,10 @@ def _circuit_key(circuit: QuantumCircuit, functional: bool = True) -> tuple:
_bits_key(data.qubits, circuit), # qubits
_bits_key(data.clbits, circuit), # clbits
data.operation.name, # operation.name
- tuple(data.operation.params), # operation.params
+ tuple(
+ param.data.tobytes() if isinstance(param, np.ndarray) else param
+ for param in data.operation.params
+ ), # operation.params
)
for data in circuit.data
),
| Sampler fails on gates with unhashable parameters
### Environment
- **Qiskit Terra version**: fbff44bfe9ebc9d97203929b1bff5483fe06028a
- **Python version**: Python 3.10.8
- **Operating system**: Arch Linux
### What is happening?
See title.
### How can we reproduce the issue?
```python
import numpy as np
from qiskit.circuit import QuantumCircuit
from qiskit.extensions.unitary import UnitaryGate
from qiskit.primitives import Sampler
gate = UnitaryGate(np.eye(2))
circuit = QuantumCircuit(1)
circuit.append(gate, [0])
circuit.measure_all()
sampler = Sampler()
sampler_result = sampler.run([circuit]).result()
quasi_dists = sampler_result.quasi_dists
```
```
Traceback (most recent call last):
File "/home/kjs/projects/qiskit-terra/scratch/sampler_hash_bug.py", line 14, in <module>
sampler_result = sampler.run([circuit]).result()
File "/home/kjs/projects/qiskit-terra/qiskit/primitives/base/base_sampler.py", line 184, in run
return self._run(
File "/home/kjs/projects/qiskit-terra/qiskit/primitives/sampler.py", line 146, in _run
index = self._circuit_ids.get(key)
TypeError: unhashable type: 'numpy.ndarray'
```
### What should happen?
It should work.
### Any suggestions?
It appears the primitives rely on associating a hashable key with each circuit https://github.com/Qiskit/qiskit-terra/blob/514e38394657802e6cf4aefbf65fe3be3b23b783/qiskit/primitives/utils.py#L127. This is not working.
| 2022-11-17T03:25:38Z | [] | [] |
Traceback (most recent call last):
File "/home/kjs/projects/qiskit-terra/scratch/sampler_hash_bug.py", line 14, in <module>
sampler_result = sampler.run([circuit]).result()
File "/home/kjs/projects/qiskit-terra/qiskit/primitives/base/base_sampler.py", line 184, in run
return self._run(
File "/home/kjs/projects/qiskit-terra/qiskit/primitives/sampler.py", line 146, in _run
index = self._circuit_ids.get(key)
TypeError: unhashable type: 'numpy.ndarray'
| 2,165 |
||||
Qiskit/qiskit | Qiskit__qiskit-9310 | 244400ad19278125e39a4e35806a79fd9b824655 | diff --git a/qiskit/utils/classtools.py b/qiskit/utils/classtools.py
--- a/qiskit/utils/classtools.py
+++ b/qiskit/utils/classtools.py
@@ -25,11 +25,6 @@
_MAGIC_STATICMETHODS = {"__new__"}
_MAGIC_CLASSMETHODS = {"__init_subclass__", "__prepare__"}
-# `type` itself has several methods (mostly dunders). When we are wrapping those names, we need to
-# make sure that we don't interfere with `type.__getattribute__`'s handling that circumvents the
-# normal inheritance rules when appropriate.
-_TYPE_METHODS = set(dir(type))
-
class _lift_to_method: # pylint: disable=invalid-name
"""A decorator that ensures that an input callable object implements ``__get__``. It is
@@ -146,16 +141,6 @@ def wrap_method(cls: Type, name: str, *, before: Callable = None, after: Callabl
# The best time to apply decorators to methods is before they are bound (e.g. by using function
# decorators during the class definition), but if we're making a class decorator, we can't do
# that. We need the actual definition of the method, so we have to dodge the normal output of
- # `type.__getattribute__`, which evalutes descriptors if it finds them, unless the name we're
- # looking for is defined on `type` itself. In that case, we need the attribute getter to
- # correctly return the underlying object, not the one that `type` defines for its own purposes.
- attribute_getter = type.__getattribute__ if name in _TYPE_METHODS else object.__getattribute__
- for cls_ in inspect.getmro(cls):
- try:
- method = attribute_getter(cls_, name)
- break
- except AttributeError:
- pass
- else:
- raise ValueError(f"Method '{name}' is not defined for class '{cls.__name__}'")
+ # `type.__getattribute__`, which evalutes descriptors if it finds them.
+ method = inspect.getattr_static(cls, name)
setattr(cls, name, _WrappedMethod(method, before, after))
| `qiskit.test.decorators.enforce_subclasses_call` machinery is broken in 3.11.1
### Environment
- **Qiskit Terra version**:
HEAD of main, currently `dd7f9390cf07`
- **Python version**:
3.11.1
- **Operating system**:
Both macOS and ubuntu, so far.
### What is happening?
`import test.python` raises an error in 3.11.1, therefore tests do not run. @wshanks has indicated that it is working fine for him in 3.11.0 but not 3.11.1 (I don't have 3.11.0 installed).
```
Python 3.11.1 (v3.11.1:a7a450f84a, Dec 6 2022, 15:24:06) [Clang 13.0.0 (clang-1300.0.29.30)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import test.python
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/ihincks/ibm/qiskit-terra/test/__init__.py", line 16, in <module>
from qiskit.test.utils import generate_cases
File "/Users/ihincks/ibm/qiskit-terra/qiskit/test/__init__.py", line 15, in <module>
from .base import QiskitTestCase
File "/Users/ihincks/ibm/qiskit-terra/qiskit/test/base.py", line 167, in <module>
class QiskitTestCase(BaseQiskitTestCase):
File "/Users/ihincks/ibm/qiskit-terra/qiskit/utils/classtools.py", line 117, in out
retval = method(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^
TypeError: TestCase.__init_subclass__() missing 1 required positional argument: 'cls'
```
Or, see an example of this happening in CI: https://github.com/Qiskit/qiskit-experiments/actions/runs/3704009805/jobs/6281449337
### How can we reproduce the issue?
Run the tests or simply `import test.python` in Python 3.11.1.
### What should happen?
This should not error.
### Any suggestions?
_No response_
| I'm having trouble reproducing this using only `wrap_method`. For example, the following works as expected:
```python
import qiskit.utils
class Foo:
pass
def print_blerg(*_):
print("blerg")
qiskit.utils.classtools.wrap_method(Foo, "__init_subclass__", after=print_blerg)
class Goo(Foo):
pass
```
If it works on 3.11.0 maybe https://github.com/python/cpython/pull/99646 is related which touches `TestCase.__init_subclass__`? Though it's weird that the failure isn't happening consistently, in some of the CI runs only Mac and in some only Linux failed..
please, revert https://github.com/Qiskit/qiskit-terra/pull/9296 when fixed. | 2022-12-20T16:34:01Z | [] | [] |
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/ihincks/ibm/qiskit-terra/test/__init__.py", line 16, in <module>
from qiskit.test.utils import generate_cases
File "/Users/ihincks/ibm/qiskit-terra/qiskit/test/__init__.py", line 15, in <module>
from .base import QiskitTestCase
File "/Users/ihincks/ibm/qiskit-terra/qiskit/test/base.py", line 167, in <module>
class QiskitTestCase(BaseQiskitTestCase):
File "/Users/ihincks/ibm/qiskit-terra/qiskit/utils/classtools.py", line 117, in out
retval = method(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^
TypeError: TestCase.__init_subclass__() missing 1 required positional argument: 'cls'
| 2,181 |
|||
Qiskit/qiskit | Qiskit__qiskit-9726 | 3284ea088bbdc3b914dc2be98d150a31e04322dc | diff --git a/qiskit/quantum_info/operators/symplectic/pauli.py b/qiskit/quantum_info/operators/symplectic/pauli.py
--- a/qiskit/quantum_info/operators/symplectic/pauli.py
+++ b/qiskit/quantum_info/operators/symplectic/pauli.py
@@ -147,7 +147,8 @@ class initialization (``Pauli('-iXYZ')``). A ``Pauli`` object can be
# Set the max Pauli string size before truncation
__truncate__ = 50
- _VALID_LABEL_PATTERN = re.compile(r"^[+-]?1?[ij]?[IXYZ]+$")
+ _VALID_LABEL_PATTERN = re.compile(r"(?P<coeff>[+-]?1?[ij]?)(?P<pauli>[IXYZ]*)")
+ _CANONICAL_PHASE_LABEL = {"": 0, "-i": 1, "-": 2, "i": 3}
def __init__(self, data=None, x=None, *, z=None, label=None):
"""Initialize the Pauli.
@@ -613,17 +614,15 @@ def _from_label(label):
Raises:
QiskitError: if Pauli string is not valid.
"""
- if Pauli._VALID_LABEL_PATTERN.match(label) is None:
+ match_ = Pauli._VALID_LABEL_PATTERN.fullmatch(label)
+ if match_ is None:
raise QiskitError(f'Pauli string label "{label}" is not valid.')
-
- # Split string into coefficient and Pauli
- pauli, coeff = _split_pauli_label(label)
-
- # Convert coefficient to phase
- phase = 0 if not coeff else _phase_from_label(coeff)
+ phase = Pauli._CANONICAL_PHASE_LABEL[
+ (match_["coeff"] or "").replace("1", "").replace("+", "").replace("j", "i")
+ ]
# Convert to Symplectic representation
- pauli_bytes = np.frombuffer(pauli.encode("ascii"), dtype=np.uint8)[::-1]
+ pauli_bytes = np.frombuffer(match_["pauli"].encode("ascii"), dtype=np.uint8)[::-1]
ys = pauli_bytes == ord("Y")
base_x = np.logical_or(pauli_bytes == ord("X"), ys).reshape(1, -1)
base_z = np.logical_or(pauli_bytes == ord("Z"), ys).reshape(1, -1)
@@ -698,33 +697,5 @@ def _from_circuit(cls, instr):
return ret._z, ret._x, ret._phase
-# ---------------------------------------------------------------------
-# Label parsing helper functions
-# ---------------------------------------------------------------------
-
-
-def _split_pauli_label(label):
- """Split Pauli label into unsigned group label and coefficient label"""
- span = re.search(r"[IXYZ]+", label).span()
- pauli = label[span[0] :]
- coeff = label[: span[0]]
- if span[1] != len(label):
- invalid = set(re.sub(r"[IXYZ]+", "", label[span[0] :]))
- raise QiskitError(
- f"Pauli string contains invalid characters {invalid} ∉ ['I', 'X', 'Y', 'Z']"
- )
- return pauli, coeff
-
-
-def _phase_from_label(label):
- """Return the phase from a label"""
- # Returns None if label is invalid
- label = label.replace("+", "", 1).replace("1", "", 1).replace("j", "i", 1)
- phases = {"": 0, "-i": 1, "-": 2, "i": 3}
- if label not in phases:
- raise QiskitError(f"Invalid Pauli phase label '{label}'")
- return phases[label]
-
-
# Update docstrings for API docs
generate_apidocs(Pauli)
| `Pauli('')` confusion
### Environment
- **Qiskit Terra version**: 0.23.2
- **Python version**: 3.9.7
- **Operating system**: Linux
### What is happening?
I find the following code example puzzling:
```python
>>> from qiskit.quantum_info import Pauli
>>> Pauli("X")[[]]
Pauli('')
>>> Pauli('')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/garrison/serverless/.direnv/python-3.9.7/lib/python3.9/site-packages/qiskit/quantum_info/operators/symplectic/pauli.py", line 182, in __init__
base_z, base_x, base_phase = self._from_label(data)
File "/home/garrison/serverless/.direnv/python-3.9.7/lib/python3.9/site-packages/qiskit/quantum_info/operators/symplectic/pauli.py", line 619, in _from_label
raise QiskitError(f'Pauli string label "{label}" is not valid.')
qiskit.exceptions.QiskitError: 'Pauli string label "" is not valid.'
```
The first REPL line allows me to construct an object whose `__repr__` is `Pauli('')`. However, if I try to instantiate `Pauli('')` directly, it fails.
### How can we reproduce the issue?
Code snippet above.
### What should happen?
Either both statements should fail, or both should be allowed.
### Any suggestions?
I currently see no reason not to simply support `Pauli('')`, such that it works without raising an exception.
| 2023-03-04T01:37:29Z | [] | [] |
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/garrison/serverless/.direnv/python-3.9.7/lib/python3.9/site-packages/qiskit/quantum_info/operators/symplectic/pauli.py", line 182, in __init__
base_z, base_x, base_phase = self._from_label(data)
File "/home/garrison/serverless/.direnv/python-3.9.7/lib/python3.9/site-packages/qiskit/quantum_info/operators/symplectic/pauli.py", line 619, in _from_label
raise QiskitError(f'Pauli string label "{label}" is not valid.')
qiskit.exceptions.QiskitError: 'Pauli string label "" is not valid.'
| 2,226 |
||||
Qiskit/qiskit | Qiskit__qiskit-9777 | 5ce80ab45bfdec0f300d4f2095d4fc8dfe3eaae6 | diff --git a/qiskit/circuit/quantumcircuit.py b/qiskit/circuit/quantumcircuit.py
--- a/qiskit/circuit/quantumcircuit.py
+++ b/qiskit/circuit/quantumcircuit.py
@@ -1623,7 +1623,6 @@ def qasm(
"sx",
"sxdg",
"cz",
- "ccz",
"cy",
"swap",
"ch",
@@ -1636,8 +1635,6 @@ def qasm(
"cp",
"cu3",
"csx",
- "cs",
- "csdg",
"cu",
"rxx",
"rzz",
| ccz roundtrip in OpenQASM 2 broken
@ryahill1 notices that roundtrip in OpenQASM2 for several gates is broken (Originally posted in https://github.com/Qiskit/qiskit-terra/issues/9559#issuecomment-1424824806)
One of those cases is [ccz](https://github.com/Qiskit/qiskit-terra/blob/d4e7144efa9c661817161f84553313bf39406fac/qiskit/circuit/library/standard_gates/z.py#L203):
```python
import qiskit
qc = qiskit.QuantumCircuit(3)
gate = qiskit.circuit.library.CCZGate()
qc.append(gate, qargs=qc.qubits)
qasm = qc.qasm()
print(qasm)
qc_from_qasm = qiskit.QuantumCircuit.from_qasm_str(qasm)
print(qc_from_qasm)
```
```
qiskit.qasm.exceptions.QasmError: "Cannot find gate definition for 'ccz', line 4 file "
```
csdg roundtrip in OpenQASM 2 broken
### Environment
- **Qiskit Terra version**: 0.23.1
- **Python version**: 3.9.15
- **Operating system**: macOS Ventura
### What is happening?
Qiskit supports the [`CSdgGate`](https://github.com/Qiskit/qiskit-terra/blob/d4e7144efa9c661817161f84553313bf39406fac/qiskit/circuit/library/standard_gates/s.py#L228), but raises error when creating a circuit from a qasm file that includes a `csdg` gate. Issue may exist for other gates as well, but so far this is the only one I've identified.
### How can we reproduce the issue?
```python
import os
from qiskit import QuantumCircuit
workdir = os.path.dirname(__file__)
qasm_file = os.path.join(workdir, "test.qasm")
circuit = QuantumCircuit(2)
circuit.csdg(0, 1)
qasm_str = circuit.qasm()
with open(qasm_file, "w") as f:
f.write(qasm_str)
new_circuit = QuantumCircuit().from_qasm_file(qasm_file)
print(new_circuit)
```
```
Traceback (most recent call last):
...
...
File ".../lib/python3.9/site-packages/qiskit/qasm/qasmparser.py", line 138, in verify_as_gate
raise QasmError(
qiskit.qasm.exceptions.QasmError: "Cannot find gate definition for 'csdg', line 4 file line 4 file test.qasm"
```
### What should happen?
Program should print out new circuit without error
<img width="177" alt="Screenshot 2023-02-08 at 11 09 51 PM" src="https://user-images.githubusercontent.com/46977852/217723450-2eb6bac6-4364-4ac0-ba41-01907d765944.png">
### Any suggestions?
_No response_
| (Copied over): this doesn't need a separate issue. I commented the fix for #9559 at the bottom, and it's trivially the same for the three concerned gates.
The bug here is actually in the OpenQASM 2 output rather than the input - Qiskit should have emitted a file that includes a definition for `csdg`, because it's not defined in the standard-library `qelib1.inc` file. The parser is correctly complaining that the OpenQASM input doesn't define that gate.
Ok, gotcha. I did some more testing and the same problem seems to occur for 4 other (a total of 5) gates:
- [ ] [`rzx`](https://github.com/Qiskit/qiskit-terra/blob/d4e7144efa9c661817161f84553313bf39406fac/qiskit/circuit/library/standard_gates/rzx.py#L21)
- [ ] [`ccz`](https://github.com/Qiskit/qiskit-terra/blob/d4e7144efa9c661817161f84553313bf39406fac/qiskit/circuit/library/standard_gates/z.py#L203) https://github.com/Qiskit/qiskit-terra/issues/9721
- [x] [`c3sx`](https://github.com/Qiskit/qiskit-terra/blob/d4e7144efa9c661817161f84553313bf39406fac/qiskit/circuit/library/standard_gates/x.py#L487) (#9183)
- [ ] [`cs`](https://github.com/Qiskit/qiskit-terra/blob/d4e7144efa9c661817161f84553313bf39406fac/qiskit/circuit/library/standard_gates/s.py#L154) https://github.com/Qiskit/qiskit-terra/issues/9722
- [ ] [`csdg`](https://github.com/Qiskit/qiskit-terra/blob/d4e7144efa9c661817161f84553313bf39406fac/qiskit/circuit/library/standard_gates/s.py#L228) https://github.com/Qiskit/qiskit-terra/issues/9723
Let's break this one in 4 other issues!
rzx seems to work, I think:
```python
import qiskit
qc = qiskit.QuantumCircuit(2)
gate = qiskit.circuit.library.RZXGate(0)
qc.append(gate, qargs=qc.qubits)
qasm = qc.qasm()
print(qasm)
qc_from_qasm = qiskit.QuantumCircuit.from_qasm_str(qasm)
print(qc_from_qasm)
```
```
OPENQASM 2.0;
include "qelib1.inc";
gate rzx(param0) q0,q1 { h q1; cx q0,q1; rz(0) q1; cx q0,q1; h q1; }
qreg q[2];
rzx(0) q[0],q[1];
┌─────────┐
q_0: ┤0 ├
│ rzx(0) │
q_1: ┤1 ├
└─────────┘
```
Did we need separate issues for all of these? They're all the exact same thing - the OQ2 exporter considers them "predefined" gates, but they're not. The solution is just to delete one corresponding line each from `QuantumCircuit.qasm.<locals>.existing_gate_names`.
For completeness, the full list of those that won't work right because of the reason I just gave is "ccz", "cs" and "csdg". "rzx" will reimport in the expected manner; it won't be mapped to `RZXGate` fully because it's not in (Terra's mutated) `qelib1.inc`, but it will get pulled back in semi-correctly, up to the failures of Terra to handle parametrised gates properly during export. | 2023-03-10T20:17:09Z | [] | [] |
Traceback (most recent call last):
...
...
File ".../lib/python3.9/site-packages/qiskit/qasm/qasmparser.py", line 138, in verify_as_gate
raise QasmError(
qiskit.qasm.exceptions.QasmError: "Cannot find gate definition for 'csdg', line 4 file line 4 file test.qasm"
| 2,230 |
|||
Qiskit/qiskit | Qiskit__qiskit-9786 | 2ce129a14279a746d309f00e311b930ddbfe633c | diff --git a/qiskit/transpiler/passes/utils/gate_direction.py b/qiskit/transpiler/passes/utils/gate_direction.py
--- a/qiskit/transpiler/passes/utils/gate_direction.py
+++ b/qiskit/transpiler/passes/utils/gate_direction.py
@@ -166,6 +166,8 @@ def _run_coupling_map(self, dag, wire_map, edges=None):
continue
if len(node.qargs) != 2:
continue
+ if dag.has_calibration_for(node):
+ continue
qargs = (wire_map[node.qargs[0]], wire_map[node.qargs[1]])
if qargs not in edges and (qargs[1], qargs[0]) not in edges:
raise TranspilerError(
@@ -209,6 +211,8 @@ def _run_target(self, dag, wire_map):
continue
if len(node.qargs) != 2:
continue
+ if dag.has_calibration_for(node):
+ continue
qargs = (wire_map[node.qargs[0]], wire_map[node.qargs[1]])
swapped = (qargs[1], qargs[0])
if node.name in self._static_replacements:
| Routing pass does not account for calibrations when assessing gate direction
### Environment
- **Qiskit Terra version**: 0.23.2
- **Python version**: 3.10.9
- **Operating system**: Fedora Linux 37
### What is happening?
When transpiling a circuit with a custom gate with a calibration, the gate direction pass does not check the circuit calibrations for the gate. It only checks the target and looks at a special-cased gates.
### How can we reproduce the issue?
```python
from qiskit import QuantumCircuit, pulse, transpile
from qiskit.circuit.gate import Gate
from qiskit.circuit.library import CXGate
from qiskit.transpiler import Target
# Placeholder schedule because the schedule content does not matter
sched = pulse.ScheduleBlock()
# Custom target with one two qubit gate added so that the target coupling map is connected
target = Target(num_qubits=2)
target.add_instruction(CXGate(), properties={(0, 1): None})
gate = Gate("my_2q_gate", 2, [])
circ = QuantumCircuit(2)
circ.append(gate, (0, 1))
circ.add_calibration(gate, (0, 1), sched)
transpile(circ, target=target, optimization_level=0)
```
Running this code produces:
```
Traceback (most recent call last):
File "/reverse.py", line 16, in <module>
transpile(circ, target=target, optimization_level=1)
File "/lib/python3.10/site-packages/qiskit/compiler/transpiler.py", line 381, in transpile
_serial_transpile_circuit(
File "/lib/python3.10/site-packages/qiskit/compiler/transpiler.py", line 474, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File "/lib/python3.10/site-packages/qiskit/transpiler/passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File "/lib/python3.10/site-packages/qiskit/transpiler/passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File "/lib/python3.10/site-packages/qiskit/transpiler/passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File "/lib/python3.10/site-packages/qiskit/transpiler/runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File "/lib/python3.10/site-packages/qiskit/transpiler/runningpassmanager.py", line 173, in _do_pass
dag = self._run_this_pass(pass_, dag)
File "/lib/python3.10/site-packages/qiskit/transpiler/runningpassmanager.py", line 202, in _run_this_pass
new_dag = pass_.run(dag)
File "/lib/python3.10/site-packages/qiskit/transpiler/passes/utils/gate_direction.py", line 300, in run
return self._run_target(dag, layout_map)
File "/lib/python3.10/site-packages/qiskit/transpiler/passes/utils/gate_direction.py", line 270, in _run_target
raise TranspilerError(
qiskit.transpiler.exceptions.TranspilerError: "Flipping of gate direction is only supported for ['cx', 'cz', 'ecr'] at this time, not 'my_2q_gate'."
```
### What should happen?
Transpilation should run without error. It should leave the circuit unmodified.
### Any suggestions?
In `gate_direction.py`, right before/after the target is checked in `_run_target`, the circuit's calibrations should be checked for the gate and no exception should be raised if the calibration is there.
Additionally, there may be an additional bug here because the error message is confusingly about flipping the gate direction when the calibration has the right direction present. A "gate not found" type of message would be more appropriate. I wonder if there are other ways to trigger the "flipping the gate" message where the issue is that the gate was not found at all.
This issue was first noticed in the [qiskit-dynamics PR](https://github.com/Qiskit/qiskit-dynamics/pull/193#discussion_r1126753960) for the DynamicsBackend tutorial. In most cases, we are working only with gates already in the Target so this issue does not come up.
| 2023-03-13T14:52:14Z | [] | [] |
Traceback (most recent call last):
File "/reverse.py", line 16, in <module>
transpile(circ, target=target, optimization_level=1)
File "/lib/python3.10/site-packages/qiskit/compiler/transpiler.py", line 381, in transpile
_serial_transpile_circuit(
File "/lib/python3.10/site-packages/qiskit/compiler/transpiler.py", line 474, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File "/lib/python3.10/site-packages/qiskit/transpiler/passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File "/lib/python3.10/site-packages/qiskit/transpiler/passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File "/lib/python3.10/site-packages/qiskit/transpiler/passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File "/lib/python3.10/site-packages/qiskit/transpiler/runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File "/lib/python3.10/site-packages/qiskit/transpiler/runningpassmanager.py", line 173, in _do_pass
dag = self._run_this_pass(pass_, dag)
File "/lib/python3.10/site-packages/qiskit/transpiler/runningpassmanager.py", line 202, in _run_this_pass
new_dag = pass_.run(dag)
File "/lib/python3.10/site-packages/qiskit/transpiler/passes/utils/gate_direction.py", line 300, in run
return self._run_target(dag, layout_map)
File "/lib/python3.10/site-packages/qiskit/transpiler/passes/utils/gate_direction.py", line 270, in _run_target
raise TranspilerError(
qiskit.transpiler.exceptions.TranspilerError: "Flipping of gate direction is only supported for ['cx', 'cz', 'ecr'] at this time, not 'my_2q_gate'."
| 2,231 |
||||
Qiskit/qiskit | Qiskit__qiskit-9789 | 648da26c6a0dc6fa9710c639e9f37d96ce426ea0 | diff --git a/qiskit/compiler/transpiler.py b/qiskit/compiler/transpiler.py
--- a/qiskit/compiler/transpiler.py
+++ b/qiskit/compiler/transpiler.py
@@ -645,6 +645,11 @@ def _parse_transpile_args(
timing_constraints = target.timing_constraints()
if backend_properties is None:
backend_properties = target_to_backend_properties(target)
+ # If target is not specified and any hardware constraint object is
+ # manually specified then do not use the target from the backend as
+ # it is invalidated by a custom basis gate list or a custom coupling map
+ elif basis_gates is None and coupling_map is None:
+ target = _parse_target(backend, target)
basis_gates = _parse_basis_gates(basis_gates, backend)
initial_layout = _parse_initial_layout(initial_layout, circuits)
@@ -658,7 +663,6 @@ def _parse_transpile_args(
callback = _parse_callback(callback, num_circuits)
durations = _parse_instruction_durations(backend, instruction_durations, dt, circuits)
timing_constraints = _parse_timing_constraints(backend, timing_constraints, num_circuits)
- target = _parse_target(backend, target)
if scheduling_method and any(d is None for d in durations):
raise TranspilerError(
"Transpiling a circuit with a scheduling method"
| Gates with custom pulses fail to transpile with the new provider
### Environment
- **Qiskit Terra version**: 0.41.1
- **Python version**: 3.9.12
- **Operating system**: Linux
### What is happening?
Defining a new gate, and attaching pulses to it, triggers an error during transpilation. The same code works well with the old provider.
### How can we reproduce the issue?
```python
from qiskit import QuantumCircuit, pulse, transpile, IBMQ
from qiskit.circuit import Gate
from qiskit.pulse import InstructionScheduleMap
from qiskit_ibm_provider import IBMProvider
provider = IBMProvider()
# provider = IBMQ.load_account()
backend = provider.backend.ibmq_lima
amp = 0.1
sigma = 10
num_samples = 128
gauss = pulse.library.Gaussian(num_samples, amp, sigma, name="Parametric Gauss")
with pulse.build(backend, default_alignment="sequential") as gatepulse:
with pulse.align_left():
pulse.play(gauss, pulse.control_channels(0, 1)[0])
inst_map = InstructionScheduleMap()
inst_map.add("newgate", [0, 1], gatepulse)
newgate = Gate("newgate", 2, [])
circ = QuantumCircuit(2)
circ.append(newgate, [0, 1])
circ = transpile(circ, backend, inst_map=inst_map, basis_gates=["newgate"])
```
Output:
```
Traceback (most recent call last):
File "/mnt/c/Users/143721756/wsl/balagan/szxbug.py", line 31, in <module>
circ = transpile(circ, backend, inst_map=inst_map, basis_gates=["newgate"])
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/compiler/transpiler.py", line 381, in transpile
_serial_transpile_circuit(
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/compiler/transpiler.py", line 474, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 173, in _do_pass
dag = self._run_this_pass(pass_, dag)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 202, in _run_this_pass
new_dag = pass_.run(dag)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passes/basis/unroll_custom_definitions.py", line 102, in run
raise QiskitError(
qiskit.exceptions.QiskitError: "Cannot unroll the circuit to the given basis, ['newgate']. Instruction newgate not found in equivalence library and no rule found to expand."
```
### What should happen?
Should work.
### Any suggestions?
@haggaila says:
An exception happens in file `qiskit/transpiler/passes/basis/unroll_custom_definitions.py`, in the method `UnrollCustomDefinitions.run`. With the old provider, `self._target` is `None` upon entrance to the function, and everything gets properly initialized (in particular `device_insts` contains the added `newgate`). With the new IBM provider, `self._target` is initialized to something, resulting in an exception.
@nkanazawa1989 please share your observations.
| I didn't check the main branch.
It's known issue https://github.com/Qiskit/qiskit-terra/issues/9489. This will be fixed in next release.
Sorry this is another issue from #9489. One must fix
https://github.com/Qiskit/qiskit-terra/blob/6829bb18cf791960896fe72b9be9611aac44155a/qiskit/transpiler/passes/basis/unroll_custom_definitions.py#L60-L62
this logic. With V2, target is always always provided from the backend and `basis_gates` is ignored. With #9489 probably this will be also fixed (target and operation_names property are updated with instmap), however, if you remove `instmap` (i.e. you may want to manually add calibration after transpile) it doesn't work.
> you may want to manually add calibration after transpile
Maybe I misunderstood, but if the calibrations aren't attached during the transpilation, I would think it's correct behaviour for us to error out saying "we can't translate the circuit for this backend".
(to be clear: I agree that the rest of the issue stands regardless of that point)
I'm fine with raising an error (to be on strict side). However, currently this workflow is supported with backend V1 (because `basis_gates` is context-less).
```python
from qiskit.circuit import Gate, QuantumCircuit
from qiskit.pulse import Schedule
from qiskit import transpile
g = Gate("custom", 1, [])
qc = QuantumCircuit(1)
qc.append(g, [0])
qc_t = transpile(qc, basis_gates=["custom"])
sched1 = Schedule(name="sched1")
sched2 = Schedule(name="sched2")
qc_t1 = qc_t.copy()
qc_t1.add_calibration(g, (0,), sched1)
qc_t2 = qc_t.copy()
qc_t2.add_calibration(g, (0,), sched2)
```
Yeah, the equivalent in `BackendV2` would be to add `custom` as an instruction supported on all single qubits to the given `Target` (_potentially_ we can do this automatically if given `basis_gates`, but I wouldn't like to speak to Matthew's design plans). If that hasn't happened, then we're right to error out.
This is something that is coming from the mix of `Target` and `basis_gates` as an argument in `transpile()` the individual passes tend to favor `Target` as higher priority than `basis_gates` if they're given both because in general target contains a stronger set of constraints and is what should be used if the constructor was given both. The individual pass cannot know that the `basis_gates` argument was manually passed in and overrides the `Target`. In general `transpile()` should invalidate the target if both a backend and any hardware constraint arguments are provided. The quick fix here (for backport) would be to update `transpile()` to set `target` to `None` if `coupling_map` or `basis_gates` (or arguably `backend_properties`, ` instruction_durations`, or `inst_map` although these typically don't override the supported operations or connectivity and are used opportunistically by most passes for extra information) are set.
The longer term fix, hopefully for 0.24, is to implement: https://github.com/Qiskit/qiskit-terra/issues/9256 and use a target for everything from `transpile()`. That path would mean that we always generate a target and in the case of `transpile(qc, backend, basis_gates=[..])` we'd build a new `Target` from `basis_gates` instead of from the backend. But right now I'm blocked on review for https://github.com/Qiskit/qiskit-terra/pull/9263 and optionally https://github.com/Qiskit/qiskit-terra/pull/9255 (although it looks like there is a merge conflict issue there) so I haven't started implementing it yet.
In my ideal world we'd honestly just raise an error if a user specifies both a backend and basis gates or a coupling map because what the intent there is kind of ambiguous from my PoV. But,that's kind of hard behavior to walk back. Regardless `transpile(qc, basis_gates=[..])` should always be valid and just means treat them as globally available and ideal.
Dropping all arguments from transpile seems much cleaner but this requires user to learn the API of `Target`, which is really different from conventional transpiler args. Generating Target on the fly from the transpiler args sounds reasonable to me. My PR https://github.com/Qiskit/qiskit-terra/pull/9587 allows one to build target from the inst_map.
Well I wasn't proposing we drop the arguments, it was more it's not clear what a user's intent is if they specify `transpile(qc, backend, basis_gates=['r', 'custom', 'ccx'])` (especially if `backend` is a `BackendV2`) as a backend and a basis gate list are specifying overlapping information, but the backend's version is much more nuanced. My preference would be to raise an exception if someone does that. But that also wasn't a serious proposal either because of the backwards compatibility issues with doing that. | 2023-03-13T19:46:21Z | [] | [] |
Traceback (most recent call last):
File "/mnt/c/Users/143721756/wsl/balagan/szxbug.py", line 31, in <module>
circ = transpile(circ, backend, inst_map=inst_map, basis_gates=["newgate"])
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/compiler/transpiler.py", line 381, in transpile
_serial_transpile_circuit(
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/compiler/transpiler.py", line 474, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 173, in _do_pass
dag = self._run_this_pass(pass_, dag)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 202, in _run_this_pass
new_dag = pass_.run(dag)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passes/basis/unroll_custom_definitions.py", line 102, in run
raise QiskitError(
qiskit.exceptions.QiskitError: "Cannot unroll the circuit to the given basis, ['newgate']. Instruction newgate not found in equivalence library and no rule found to expand."
| 2,232 |
|||
Qiskit/qiskit | Qiskit__qiskit-9792 | 44cda51974e29fc72fa7e428a14b00af48b32562 | diff --git a/qiskit/compiler/transpiler.py b/qiskit/compiler/transpiler.py
--- a/qiskit/compiler/transpiler.py
+++ b/qiskit/compiler/transpiler.py
@@ -645,6 +645,11 @@ def _parse_transpile_args(
timing_constraints = target.timing_constraints()
if backend_properties is None:
backend_properties = target_to_backend_properties(target)
+ # If target is not specified and any hardware constraint object is
+ # manually specified then do not use the target from the backend as
+ # it is invalidated by a custom basis gate list or a custom coupling map
+ elif basis_gates is None and coupling_map is None:
+ target = _parse_target(backend, target)
basis_gates = _parse_basis_gates(basis_gates, backend)
initial_layout = _parse_initial_layout(initial_layout, circuits)
@@ -658,7 +663,6 @@ def _parse_transpile_args(
callback = _parse_callback(callback, num_circuits)
durations = _parse_instruction_durations(backend, instruction_durations, dt, circuits)
timing_constraints = _parse_timing_constraints(backend, timing_constraints, num_circuits)
- target = _parse_target(backend, target)
if scheduling_method and any(d is None for d in durations):
raise TranspilerError(
"Transpiling a circuit with a scheduling method"
| Gates with custom pulses fail to transpile with the new provider
### Environment
- **Qiskit Terra version**: 0.41.1
- **Python version**: 3.9.12
- **Operating system**: Linux
### What is happening?
Defining a new gate, and attaching pulses to it, triggers an error during transpilation. The same code works well with the old provider.
### How can we reproduce the issue?
```python
from qiskit import QuantumCircuit, pulse, transpile, IBMQ
from qiskit.circuit import Gate
from qiskit.pulse import InstructionScheduleMap
from qiskit_ibm_provider import IBMProvider
provider = IBMProvider()
# provider = IBMQ.load_account()
backend = provider.backend.ibmq_lima
amp = 0.1
sigma = 10
num_samples = 128
gauss = pulse.library.Gaussian(num_samples, amp, sigma, name="Parametric Gauss")
with pulse.build(backend, default_alignment="sequential") as gatepulse:
with pulse.align_left():
pulse.play(gauss, pulse.control_channels(0, 1)[0])
inst_map = InstructionScheduleMap()
inst_map.add("newgate", [0, 1], gatepulse)
newgate = Gate("newgate", 2, [])
circ = QuantumCircuit(2)
circ.append(newgate, [0, 1])
circ = transpile(circ, backend, inst_map=inst_map, basis_gates=["newgate"])
```
Output:
```
Traceback (most recent call last):
File "/mnt/c/Users/143721756/wsl/balagan/szxbug.py", line 31, in <module>
circ = transpile(circ, backend, inst_map=inst_map, basis_gates=["newgate"])
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/compiler/transpiler.py", line 381, in transpile
_serial_transpile_circuit(
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/compiler/transpiler.py", line 474, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 173, in _do_pass
dag = self._run_this_pass(pass_, dag)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 202, in _run_this_pass
new_dag = pass_.run(dag)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passes/basis/unroll_custom_definitions.py", line 102, in run
raise QiskitError(
qiskit.exceptions.QiskitError: "Cannot unroll the circuit to the given basis, ['newgate']. Instruction newgate not found in equivalence library and no rule found to expand."
```
### What should happen?
Should work.
### Any suggestions?
@haggaila says:
An exception happens in file `qiskit/transpiler/passes/basis/unroll_custom_definitions.py`, in the method `UnrollCustomDefinitions.run`. With the old provider, `self._target` is `None` upon entrance to the function, and everything gets properly initialized (in particular `device_insts` contains the added `newgate`). With the new IBM provider, `self._target` is initialized to something, resulting in an exception.
@nkanazawa1989 please share your observations.
| I didn't check the main branch.
It's known issue https://github.com/Qiskit/qiskit-terra/issues/9489. This will be fixed in next release.
Sorry this is another issue from #9489. One must fix
https://github.com/Qiskit/qiskit-terra/blob/6829bb18cf791960896fe72b9be9611aac44155a/qiskit/transpiler/passes/basis/unroll_custom_definitions.py#L60-L62
this logic. With V2, target is always always provided from the backend and `basis_gates` is ignored. With #9489 probably this will be also fixed (target and operation_names property are updated with instmap), however, if you remove `instmap` (i.e. you may want to manually add calibration after transpile) it doesn't work.
> you may want to manually add calibration after transpile
Maybe I misunderstood, but if the calibrations aren't attached during the transpilation, I would think it's correct behaviour for us to error out saying "we can't translate the circuit for this backend".
(to be clear: I agree that the rest of the issue stands regardless of that point)
I'm fine with raising an error (to be on strict side). However, currently this workflow is supported with backend V1 (because `basis_gates` is context-less).
```python
from qiskit.circuit import Gate, QuantumCircuit
from qiskit.pulse import Schedule
from qiskit import transpile
g = Gate("custom", 1, [])
qc = QuantumCircuit(1)
qc.append(g, [0])
qc_t = transpile(qc, basis_gates=["custom"])
sched1 = Schedule(name="sched1")
sched2 = Schedule(name="sched2")
qc_t1 = qc_t.copy()
qc_t1.add_calibration(g, (0,), sched1)
qc_t2 = qc_t.copy()
qc_t2.add_calibration(g, (0,), sched2)
```
Yeah, the equivalent in `BackendV2` would be to add `custom` as an instruction supported on all single qubits to the given `Target` (_potentially_ we can do this automatically if given `basis_gates`, but I wouldn't like to speak to Matthew's design plans). If that hasn't happened, then we're right to error out.
This is something that is coming from the mix of `Target` and `basis_gates` as an argument in `transpile()` the individual passes tend to favor `Target` as higher priority than `basis_gates` if they're given both because in general target contains a stronger set of constraints and is what should be used if the constructor was given both. The individual pass cannot know that the `basis_gates` argument was manually passed in and overrides the `Target`. In general `transpile()` should invalidate the target if both a backend and any hardware constraint arguments are provided. The quick fix here (for backport) would be to update `transpile()` to set `target` to `None` if `coupling_map` or `basis_gates` (or arguably `backend_properties`, ` instruction_durations`, or `inst_map` although these typically don't override the supported operations or connectivity and are used opportunistically by most passes for extra information) are set.
The longer term fix, hopefully for 0.24, is to implement: https://github.com/Qiskit/qiskit-terra/issues/9256 and use a target for everything from `transpile()`. That path would mean that we always generate a target and in the case of `transpile(qc, backend, basis_gates=[..])` we'd build a new `Target` from `basis_gates` instead of from the backend. But right now I'm blocked on review for https://github.com/Qiskit/qiskit-terra/pull/9263 and optionally https://github.com/Qiskit/qiskit-terra/pull/9255 (although it looks like there is a merge conflict issue there) so I haven't started implementing it yet.
In my ideal world we'd honestly just raise an error if a user specifies both a backend and basis gates or a coupling map because what the intent there is kind of ambiguous from my PoV. But,that's kind of hard behavior to walk back. Regardless `transpile(qc, basis_gates=[..])` should always be valid and just means treat them as globally available and ideal.
Dropping all arguments from transpile seems much cleaner but this requires user to learn the API of `Target`, which is really different from conventional transpiler args. Generating Target on the fly from the transpiler args sounds reasonable to me. My PR https://github.com/Qiskit/qiskit-terra/pull/9587 allows one to build target from the inst_map.
Well I wasn't proposing we drop the arguments, it was more it's not clear what a user's intent is if they specify `transpile(qc, backend, basis_gates=['r', 'custom', 'ccx'])` (especially if `backend` is a `BackendV2`) as a backend and a basis gate list are specifying overlapping information, but the backend's version is much more nuanced. My preference would be to raise an exception if someone does that. But that also wasn't a serious proposal either because of the backwards compatibility issues with doing that.
I pushed up https://github.com/Qiskit/qiskit-terra/pull/9789 for the short-term fix to this which can be backported for 0.23.3. Longer term I'd still like to get #9256 for 0.24.0. | 2023-03-14T14:19:05Z | [] | [] |
Traceback (most recent call last):
File "/mnt/c/Users/143721756/wsl/balagan/szxbug.py", line 31, in <module>
circ = transpile(circ, backend, inst_map=inst_map, basis_gates=["newgate"])
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/compiler/transpiler.py", line 381, in transpile
_serial_transpile_circuit(
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/compiler/transpiler.py", line 474, in _serial_transpile_circuit
result = pass_manager.run(circuit, callback=callback, output_name=output_name)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 528, in run
return super().run(circuits, output_name, callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 228, in run
return self._run_single_circuit(circuits, output_name, callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passmanager.py", line 283, in _run_single_circuit
result = running_passmanager.run(circuit, output_name=output_name, callback=callback)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 125, in run
dag = self._do_pass(pass_, dag, passset.options)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 173, in _do_pass
dag = self._run_this_pass(pass_, dag)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py", line 202, in _run_this_pass
new_dag = pass_.run(dag)
File "/home/yaelbh/miniconda3/envs/env1/lib/python3.9/site-packages/qiskit/transpiler/passes/basis/unroll_custom_definitions.py", line 102, in run
raise QiskitError(
qiskit.exceptions.QiskitError: "Cannot unroll the circuit to the given basis, ['newgate']. Instruction newgate not found in equivalence library and no rule found to expand."
| 2,234 |
|||
apache/airflow | apache__airflow-1056 | b52e89203248a89d3d8f3662c5f440aeba2e025a | diff --git a/airflow/operators/bash_operator.py b/airflow/operators/bash_operator.py
--- a/airflow/operators/bash_operator.py
+++ b/airflow/operators/bash_operator.py
@@ -1,7 +1,6 @@
from builtins import bytes
import logging
-import sys
from subprocess import Popen, STDOUT, PIPE
from tempfile import gettempdir, NamedTemporaryFile
@@ -22,6 +21,7 @@ class BashOperator(BaseOperator):
of inheriting the current process environment, which is the default
behavior.
:type env: dict
+ :type output_encoding: output encoding of bash command
"""
template_fields = ('bash_command', 'env')
template_ext = ('.sh', '.bash',)
@@ -33,6 +33,7 @@ def __init__(
bash_command,
xcom_push=False,
env=None,
+ output_encoding='utf-8',
*args, **kwargs):
"""
If xcom_push is True, the last line written to stdout will also
@@ -42,6 +43,7 @@ def __init__(
self.bash_command = bash_command
self.env = env
self.xcom_push_flag = xcom_push
+ self.output_encoding = output_encoding
def execute(self, context):
"""
@@ -70,7 +72,7 @@ def execute(self, context):
logging.info("Output:")
line = ''
for line in iter(sp.stdout.readline, b''):
- line = line.decode().strip()
+ line = line.decode(self.output_encoding).strip()
logging.info(line)
sp.wait()
logging.info("Command exited with "
| UnicodeDecodeError in bash_operator.py
Hi,
I see a lot of these errors when running `airflow backfill` :
```
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 467, in format
s = self._fmt % record.__dict__
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 13: ordinal not in range(128)
Logged from file bash_operator.py, line 72
```
| I encountered the same problem. I have hacked the [line#73 in airflow/operators/bash_operator.py](https://github.com/airbnb/airflow/blob/master/airflow/operators/bash_operator.py#L73) to
`line = line.decode('utf-8').strip()` to fix my problem.
error log
```
[2015-12-22 18:17:51,354] {bash_operator.py:70} INFO - Output:
[2015-12-22 18:17:51,359] {models.py:1041} ERROR - 'ascii' codec can't decode byte 0xe5 in position 4: ordinal not in range(128)
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/airflow/models.py", line 1000, in run
result = task_copy.execute(context=context)
File "/usr/local/lib/python2.7/dist-packages/airflow/operators/bash_operator.py", line 73, in execute
line = line.decode().strip()
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe5 in position 4: ordinal not in range(128)
[2015-12-22 18:17:51,388] {models.py:1077} ERROR - 'ascii' codec can't decode byte 0xe5 in position 4: ordinal not in range(128)
```
There is already a `help-wanted` tag posted on #721
@GeiliCode How about submit a pull request for this? I think it's a common problem. thx
@haitaoyao I am not familiar with encoding, so I don't know how to deal with encoding which is not `ascii` nor `utf-8`.
| 2016-02-22T07:11:38Z | [] | [] |
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 467, in format
s = self._fmt % record.__dict__
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 13: ordinal not in range(128)
| 2,257 |
|||
apache/airflow | apache__airflow-11509 | 31dc6cf82734690bf95ade4554e7ebb183055311 | diff --git a/provider_packages/refactor_provider_packages.py b/provider_packages/refactor_provider_packages.py
--- a/provider_packages/refactor_provider_packages.py
+++ b/provider_packages/refactor_provider_packages.py
@@ -432,6 +432,70 @@ def amazon_package_filter(node: LN, capture: Capture, filename: Filename) -> boo
.rename("airflow.providers.amazon.common.utils.email")
)
+ def refactor_elasticsearch_package(self):
+ """
+ Fixes to "elasticsearch" providers package.
+
+ Copies some of the classes used from core Airflow to "common.utils" package of
+ the provider and renames imports to use them from there.
+
+ We copy file_task_handler.py and change import as in example diff:
+
+ .. code-block:: diff
+
+ --- ./airflow/providers/elasticsearch/log/es_task_handler.py
+ +++ ./airflow/providers/elasticsearch/log/es_task_handler.py
+ @@ -24,7 +24,7 @@
+ from airflow.configuration import conf
+ from airflow.models import TaskInstance
+ from airflow.utils import timezone
+ from airflow.utils.helpers import parse_template_string
+ -from airflow.utils.log.file_task_handler import FileTaskHandler
+ +from airflow.providers.elasticsearch.common.utils.log.file_task_handler import FileTaskHandler
+ from airflow.utils.log.json_formatter import JSONFormatter
+ from airflow.utils.log.logging_mixin import LoggingMixin
+
+ """
+
+ def elasticsearch_package_filter(node: LN, capture: Capture, filename: Filename) -> bool:
+ return filename.startswith("./airflow/providers/elasticsearch/")
+
+ os.makedirs(
+ os.path.join(get_target_providers_package_folder("elasticsearch"), "common", "utils", "log"),
+ exist_ok=True,
+ )
+ copyfile(
+ os.path.join(get_source_airflow_folder(), "airflow", "utils", "__init__.py"),
+ os.path.join(get_target_providers_package_folder("elasticsearch"), "common", "__init__.py"),
+ )
+ copyfile(
+ os.path.join(get_source_airflow_folder(), "airflow", "utils", "__init__.py"),
+ os.path.join(
+ get_target_providers_package_folder("elasticsearch"), "common", "utils", "__init__.py"
+ ),
+ )
+ copyfile(
+ os.path.join(get_source_airflow_folder(), "airflow", "utils", "log", "__init__.py"),
+ os.path.join(
+ get_target_providers_package_folder("elasticsearch"), "common", "utils", "log", "__init__.py"
+ ),
+ )
+ copyfile(
+ os.path.join(get_source_airflow_folder(), "airflow", "utils", "log", "file_task_handler.py"),
+ os.path.join(
+ get_target_providers_package_folder("elasticsearch"),
+ "common",
+ "utils",
+ "log",
+ "file_task_handler.py",
+ ),
+ )
+ (
+ self.qry.select_module("airflow.utils.log.file_task_handler")
+ .filter(callback=elasticsearch_package_filter)
+ .rename("airflow.providers.elasticsearch.common.utils.log.file_task_handler")
+ )
+
def refactor_google_package(self):
r"""
Fixes to "google" providers package.
@@ -651,6 +715,7 @@ def kubernetes_package_filter(node: LN, capture: Capture, filename: Filename) ->
def do_refactor(self, in_process: bool = False) -> None: # noqa
self.rename_deprecated_modules()
self.refactor_amazon_package()
+ self.refactor_elasticsearch_package()
self.refactor_google_package()
self.refactor_odbc_package()
self.remove_tags()
| Elasticsearch Backport Provider Incompatible with Airflow 1.10.12
**Apache Airflow version**: 1.10.12
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): 1.16.9
**Environment**:
- **Cloud provider or hardware configuration**: AWS
- **OS** (e.g. from /etc/os-release):
- **Kernel** (e.g. `uname -a`):
- **Install tools**: Docker image running in k8s Pods
- **Others**: Rancher-provisioned k8s clusters
**What happened**:
After configuring the latest version of the Elasticsearch backport provider as my log handler via `config/airflow_local_settings.py` resulted in an error on the webserver when trying to read logs from Elasticsearch
```
[2020-10-12 21:02:00,487] {app.py:1892} ERROR - Exception on /get_logs_with_metadata [GET]
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/decorators.py", line 121, in wrapper
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/decorators.py", line 56, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/views.py", line 733, in get_logs_with_metadata
logs, metadata = _get_logs_with_metadata(try_number, metadata)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/views.py", line 724, in _get_logs_with_metadata
logs, metadatas = handler.read(ti, try_number, metadata=metadata)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/log/file_task_handler.py", line 194, in read
logs[i] += log
TypeError: can only concatenate str (not "list") to str
```
Here is the relevant section of my customized `airflow_local_settings.py` file with the updated Elasticsearch handler from the backport provider:
```
...
elif ELASTICSEARCH_HOST:
ELASTICSEARCH_LOG_ID_TEMPLATE: str = conf.get('elasticsearch', 'LOG_ID_TEMPLATE')
ELASTICSEARCH_END_OF_LOG_MARK: str = conf.get('elasticsearch', 'END_OF_LOG_MARK')
ELASTICSEARCH_FRONTEND: str = conf.get('elasticsearch', 'frontend')
ELASTICSEARCH_WRITE_STDOUT: bool = conf.getboolean('elasticsearch', 'WRITE_STDOUT')
ELASTICSEARCH_JSON_FORMAT: bool = conf.getboolean('elasticsearch', 'JSON_FORMAT')
ELASTICSEARCH_JSON_FIELDS: str = conf.get('elasticsearch', 'JSON_FIELDS')
ELASTIC_REMOTE_HANDLERS: Dict[str, Dict[str, Union[str, bool]]] = {
'task': {
'class': 'airflow.providers.elasticsearch.log.es_task_handler.ElasticsearchTaskHandler',
'formatter': 'airflow',
'base_log_folder': str(os.path.expanduser(BASE_LOG_FOLDER)),
'log_id_template': ELASTICSEARCH_LOG_ID_TEMPLATE,
'filename_template': FILENAME_TEMPLATE,
'end_of_log_mark': ELASTICSEARCH_END_OF_LOG_MARK,
'host': ELASTICSEARCH_HOST,
'frontend': ELASTICSEARCH_FRONTEND,
'write_stdout': ELASTICSEARCH_WRITE_STDOUT,
'json_format': ELASTICSEARCH_JSON_FORMAT,
'json_fields': ELASTICSEARCH_JSON_FIELDS
},
}
LOGGING_CONFIG['handlers'].update(ELASTIC_REMOTE_HANDLERS)
...
```
**What you expected to happen**:
Airflow's web UI properly displays the logs from Elasticsearch
**How to reproduce it**:
Configure custom logging via `config/airflow_local_settings.py` to `airflow.providers.elasticsearch.log.es_task_handler.ElasticsearchTaskHandler` and set the `logging_config_class` in `airflow.cfg`
When a task has been run, try to view its logs in the web UI and check the webserver logs to see the error above
| This looks like a real problem. Should I assign you to this issue? Do you want to deal with it?
I can take a stab - I think we just need to backport some of the type handling from `airflow/utils/log/file_task_handler.py` on `master` to the 1.10 branch
@marcusianlevine What do you think to vendorize the `file_task_handler` module to `elasticsearch` package? See: https://github.com/apache/airflow/blob/master/provider_packages/refactor_provider_packages.py#L427-L436
This will allow us to fix the problem without having to make any changes to Airflow core.
Sure, I think that will work, I'll open a PR later today | 2020-10-13T17:06:51Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/decorators.py", line 121, in wrapper
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/decorators.py", line 56, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/views.py", line 733, in get_logs_with_metadata
logs, metadata = _get_logs_with_metadata(try_number, metadata)
File "/usr/local/lib/python3.7/site-packages/airflow/www_rbac/views.py", line 724, in _get_logs_with_metadata
logs, metadatas = handler.read(ti, try_number, metadata=metadata)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/log/file_task_handler.py", line 194, in read
logs[i] += log
TypeError: can only concatenate str (not "list") to str
| 2,272 |
|||
apache/airflow | apache__airflow-11723 | b946b4487086f6e1ed5e2ddf45fa258315d77a50 | diff --git a/airflow/cli/commands/task_command.py b/airflow/cli/commands/task_command.py
--- a/airflow/cli/commands/task_command.py
+++ b/airflow/cli/commands/task_command.py
@@ -171,6 +171,7 @@ def task_run(args, dag=None):
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
+ ti.refresh_from_db()
ti.init_run_context(raw=args.raw)
hostname = get_hostname()
| All task logging goes to the log for try_number 1
**Apache Airflow version**: 2.0.0a1
**What happened**:
When a task fails on the first try, the log output for additional tries go to the log for the first attempt.
**What you expected to happen**:
The logs should go to the correct log file. For the default configuration, the log filename template is `log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log`, so additional numbered `.log` files should be created.
**How to reproduce it**:
Create a test dag:
```
from datetime import timedelta
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
with DAG(
dag_id="trynumber_demo",
default_args={"start_date": days_ago(2), "retries": 1, "retry_delay": timedelta(0)},
schedule_interval=None,
) as dag:
def demo_task(ti=None):
print("Running demo_task, try_number =", ti.try_number)
if ti.try_number <= 1:
raise ValueError("Shan't")
task = PythonOperator(task_id="demo_task", python_callable=demo_task)
```
and trigger this dag:
```
$ airflow dags trigger trynumber_demo
```
then observe that `triggernumber_demo/demo_task/<execution_date>/` only contains 1.log, which contains the full output for 2 runs:
```
[...]
--------------------------------------------------------------------------------
[2020-10-21 13:29:07,958] {taskinstance.py:1020} INFO - Starting attempt 1 of 2
[2020-10-21 13:29:07,959] {taskinstance.py:1021} INFO -
--------------------------------------------------------------------------------
[...]
[2020-10-21 13:29:08,163] {logging_mixin.py:110} INFO - Running demo_task, try_number = 1
[2020-10-21 13:29:08,164] {taskinstance.py:1348} ERROR - Shan't
Traceback (most recent call last):
[...]
ValueError: Shan't
[2020-10-21 13:29:08,168] {taskinstance.py:1392} INFO - Marking task as UP_FOR_RETRY. dag_id=trynumber_demo, task_id=demo_task, execution_date=20201021T122907, start_date=20201021T122907, end_date=20201021T122908
[...]
[2020-10-21 13:29:09,121] {taskinstance.py:1019} INFO -
--------------------------------------------------------------------------------
[2020-10-21 13:29:09,121] {taskinstance.py:1020} INFO - Starting attempt 2 of 2
[2020-10-21 13:29:09,121] {taskinstance.py:1021} INFO -
--------------------------------------------------------------------------------
[...]
[2020-10-21 13:29:09,333] {logging_mixin.py:110} INFO - Running demo_task, try_number = 2
[2020-10-21 13:29:09,334] {python.py:141} INFO - Done. Returned value was: None
[2020-10-21 13:29:09,355] {taskinstance.py:1143} INFO - Marking task as SUCCESS.dag_id=trynumber_demo, task_id=demo_task, execution_date=20201021T122907, start_date=20201021T122909, end_date=20201021T122909
[2020-10-21 13:29:09,404] {local_task_job.py:117} INFO - Task exited with return code 0
```
The `TaskInstance()` created for the run needs to first be refreshed from the database, before setting the logging context.
| I can confirm that changing
https://github.com/apache/airflow/blob/172820db4d2009dd26fa8aef4a864fb8a3d7e78d/airflow/cli/commands/task_command.py#L172-L174
to
```
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.refresh_from_db()
ti.init_run_context(raw=args.raw)
```
fixes this issue, but I don't know if the `refresh_..` call should be integrated into `init_run_context()` perhaps.
I found #7370, which removed that call, specifically see [these review comments](https://github.com/apache/airflow/pull/7370#discussion_r375370760):
> Does init_run_context look at any attributes of the TI? If so they won't be populated until this call.
> No, it only setups logging context :)
Unfortunately, the logging context looks at the attributes of the TI!
@mjpieters would you like to open a PR?
Could be same as https://github.com/apache/airflow/issues/11561 (though more detail/cause here) | 2020-10-21T15:46:24Z | [] | [] |
Traceback (most recent call last):
[...]
ValueError: Shan't
| 2,279 |
|||
apache/airflow | apache__airflow-11753 | f603b36aa4a07bf98ebe3b1c81676748173b8b57 | diff --git a/airflow/www/utils.py b/airflow/www/utils.py
--- a/airflow/www/utils.py
+++ b/airflow/www/utils.py
@@ -17,12 +17,14 @@
# under the License.
import json
import time
+from typing import Any, List, Optional
from urllib.parse import urlencode
import markdown
import sqlalchemy as sqla
from flask import Markup, Response, request, url_for
from flask_appbuilder.forms import FieldConverter
+from flask_appbuilder.models.filters import Filters
from flask_appbuilder.models.sqla import filters as fab_sqlafilters
from flask_appbuilder.models.sqla.interface import SQLAInterface
from pygments import highlight, lexers
@@ -437,6 +439,43 @@ def is_utcdatetime(self, col_name):
isinstance(obj.impl, UtcDateTime)
return False
+ # This is a local fix until https://github.com/dpgaspar/Flask-AppBuilder/pull/1493 is merged and released.
+ def get(
+ self,
+ id,
+ filters: Optional[Filters] = None,
+ select_columns: Optional[List[str]] = None,
+ ) -> Any:
+ """
+ Returns the result for a model get, applies filters and supports dotted
+ notation for joins and granular selecting query columns.
+
+ :param id: The model id (pk).
+ :param filters: A Filter class that contains all filters to apply.
+ :param select_columns: A List of columns to be specifically selected.
+ on the query. Supports dotted notation.
+ :return: Model instance if found, or none
+ """
+ pk = self.get_pk_name()
+ if filters:
+ _filters = filters.copy()
+ else:
+ _filters = Filters(self.filter_converter_class, self)
+
+ if self.is_pk_composite():
+ for _pk, _id in zip(pk, id):
+ _filters.add_filter(_pk, self.FilterEqual, _id)
+ else:
+ _filters.add_filter(pk, self.FilterEqual, id)
+ query = self.session.query(self.obj)
+ item = self.apply_all(
+ query, _filters, select_columns=select_columns
+ ).one_or_none()
+ if item:
+ if hasattr(item, self.obj.__name__):
+ return getattr(item, self.obj.__name__)
+ return item
+
filter_converter_class = UtcAwareFilterConverter
| WebUI: Action on selection in task instance list yields an error
**Apache Airflow version**: v2.0.0.dev0 (latest master)
**Environment**:
- **OS**: Ubuntu 18.04.4 LTS
- **Others**: Python 3.6.9
**What happened**:
Selecting a task in the the **task instance list** (*http:localhost:8080/taskinstance/list/*) and **performing an Action** on it (e.g. *Set state to 'failed'*) yields an error.
Error message:
```
Something bad has happened.
Please consider letting us know by creating a bug report using Github.
Python version: 3.6.12
Airflow version: 2.0.0.dev0
Node: emma
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/airflow/.local/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/views.py", line 686, in action_post
self.datamodel.get(self._deserialize_pk_if_composite(pk)) for pk in pks
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/views.py", line 686, in <listcomp>
self.datamodel.get(self._deserialize_pk_if_composite(pk)) for pk in pks
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 870, in get
query, _filters, select_columns=select_columns
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 324, in apply_all
select_columns,
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 272, in _apply_inner_all
query = self.apply_filters(query, inner_filters)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 162, in apply_filters
return filters.apply_all(query)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/filters.py", line 295, in apply_all
query = flt.apply(query, value)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/filters.py", line 137, in apply
query, field = get_field_setup_query(query, self.model, self.column_name)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/filters.py", line 40, in get_field_setup_query
if not hasattr(model, column_name):
TypeError: hasattr(): attribute name must be string
```
**How to reproduce it**:
I wanted to take an Action on a task instance of a DAG with `schedule_interval=None`. I am attaching a minimal DAG file used for reproducing this error.
<details>
<summary>DAG file</summary>
```
from airflow import DAG
from datetime import timedelta, datetime
from airflow.operators.bash_operator import BashOperator
dag = DAG(
'simple_dag',
default_args= {
'owner': 'airflow',
'depends_on_past': False,
'retries' : 0,
'start_date': datetime(1970, 1, 1),
'retry_delay': timedelta(seconds=30),
},
description='',
schedule_interval=None,
catchup=False,
)
t1 = BashOperator(
task_id='task1',
bash_command='echo 1',
dag=dag
)
```
</details>
**Anything else we need to know**:
Taking the same Action on the DagRun list *(http://localhost:8080/dagrun/list)* works.
Great project btw 🙌. Really enjoying using it.
| Thanks for opening your first issue here! Be sure to follow the issue template!
Any ideas here @ashb @mik-laj @ryanahamilton ? I was able to reproduce this
I was able to reproduce as well. The UI refresh updates didn't touch anything related to this. I thought the recent FAB upgrade from 3.0 to 3.1 could be the culprit, but I just tested the commit prior to that upgrade and was able to reproduce with FAB 3.0 as well.
Seeing this too; at `/taskinstance/list/?_flt_0_state=queued#`, selected all tasks, tried to clear them. | 2020-10-22T22:18:47Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/airflow/.local/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/airflow/.local/lib/python3.6/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/views.py", line 686, in action_post
self.datamodel.get(self._deserialize_pk_if_composite(pk)) for pk in pks
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/views.py", line 686, in <listcomp>
self.datamodel.get(self._deserialize_pk_if_composite(pk)) for pk in pks
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 870, in get
query, _filters, select_columns=select_columns
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 324, in apply_all
select_columns,
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 272, in _apply_inner_all
query = self.apply_filters(query, inner_filters)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/interface.py", line 162, in apply_filters
return filters.apply_all(query)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/filters.py", line 295, in apply_all
query = flt.apply(query, value)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/filters.py", line 137, in apply
query, field = get_field_setup_query(query, self.model, self.column_name)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_appbuilder/models/sqla/filters.py", line 40, in get_field_setup_query
if not hasattr(model, column_name):
TypeError: hasattr(): attribute name must be string
| 2,282 |
|||
apache/airflow | apache__airflow-12240 | 45587a664433991b01a24bf0210116c3b562adc7 | diff --git a/airflow/api_connexion/__init__.py b/airflow/api_connexion/__init__.py
new file mode 100644
--- /dev/null
+++ b/airflow/api_connexion/__init__.py
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -123,14 +123,6 @@
"_api/airflow/providers/cncf/index.rst",
# Packages without operators
"_api/airflow/providers/sendgrid",
- # Other API rst files.
- "_api/endpoints/index.rst",
- "_api/endpoints/*/index.rst",
- "_api/exceptions/index.rst",
- "_api/parameters/index.rst",
- "_api/schemas/index.rst",
- "_api/schemas/*/index.rst",
- "_api/security/index.rst",
# Templates or partials
'autoapi_templates',
'howto/operator/google/_partials',
| Airflow v2.0.0b1 package doesnt include "api_connexion/exceptions"
<!--
Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions.
Don't worry if they're not all applicable; just try to include what you can :-)
If you need to include code snippets or logs, please put them in fenced code
blocks. If they're super-long, please use the details tag like
<details><summary>super-long log</summary> lots of stuff </details>
Please delete these comment blocks before submitting the issue.
-->
<!--
IMPORTANT!!!
PLEASE CHECK "SIMILAR TO X EXISTING ISSUES" OPTION IF VISIBLE
NEXT TO "SUBMIT NEW ISSUE" BUTTON!!!
PLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!!
Please complete the next sections or the issue will be closed.
These questions are the first thing we need to know to understand the context.
-->
**Apache Airflow version**: 2.0.0b1
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): NA
**Environment**:
- **Cloud provider or hardware configuration**:
- **OS** (e.g. from /etc/os-release): macOS
- **Kernel** (e.g. `uname -a`):
- **Install tools**:
- **Others**:
**What happened**:
Installed apache-airlfow==2.0.0b1 using pip.
Running `airflow webserver` gave the following error:
```
Traceback (most recent call last):
File "/Users/abagri/Workspace/service-workflows/venv/bin/airflow", line 8, in <module>
sys.exit(main())
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 49, in command
func = import_string(import_path)
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/utils/module_loading.py", line 32, in import_string
module = import_module(module_path)
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1014, in _gcd_import
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 783, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/cli/commands/webserver_command.py", line 43, in <module>
from airflow.www.app import cached_app, create_app
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/www/app.py", line 39, in <module>
from airflow.www.extensions.init_views import (
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/www/extensions/init_views.py", line 25, in <module>
from airflow.api_connexion.exceptions import common_error_handler
ModuleNotFoundError: No module named 'airflow.api_connexion.exceptions'
```
<!-- (please include exact error messages if you can) -->
**What you expected to happen**:
Expect this command to start the webserver
<!-- What do you think went wrong? -->
**How to reproduce it**:
Install a fresh version of airflow, run `airflow db init` followed by `airflow webserver`
<!---
As minimally and precisely as possible. Keep in mind we do not have access to your cluster or dags.
If you are using kubernetes, please attempt to recreate the issue using minikube or kind.
## Install minikube/kind
- Minikube https://minikube.sigs.k8s.io/docs/start/
- Kind https://kind.sigs.k8s.io/docs/user/quick-start/
If this is a UI bug, please provide a screenshot of the bug or a link to a youtube video of the bug in action
You can include images using the .md style of
![alt text](http://url/to/img.png)
To record a screencast, mac users can use QuickTime and then create an unlisted youtube video with the resulting .mov file.
--->
**Anything else we need to know**:
<!--
How often does this problem occur? Once? Every time etc?
Any relevant logs to include? Put them here in side a detail tag:
<details><summary>x.log</summary> lots of stuff </details>
-->
| Thanks for opening your first issue here! Be sure to follow the issue template!
cc: @ashb @kaxil -> looking at it
I downloaded the packages externally from [here](https://pypi.org/project/apache-airflow/2.0.0b1/#files), this one also doesn't contain anything except the `openapi` folder.
> I downloaded the packages externally from [here](https://pypi.org/project/apache-airflow/2.0.0b1/#files), this one also doesn't contain anything except the `openapi` folder.
What command did you run? `pip install apache-airflow==2.0.0b1` or did you download the tar.gz and run `pip install FILEPATH_TO_TAR`
I first ran `pip install apache-airflow==2.0.0b1` and ran the command (airflow webserver) to get those errors.
Then to confirm, I downloaded the tar to check the contents. (didn't install the tar)
@ashb @kaxil -> indeed the problem is with implicit packages. The root cause is that `api_connexion` has no `__init__.py`
aah, we just need to add `__init__.py` file then, wonder why and how it got deleted / missing
It was never there - but the change with implicit packages, changed `find_namespace_packages` into `find_packages` in `setup.py` and now setup.py simply does not find the package.
It was there, it got renamed here: https://github.com/apache/airflow/commit/b2a28d1590410630d66966aa1f2b2a049a8c3b32 / https://github.com/apache/airflow/pull/12082
AAAAAH Github rebase magic finding wrong files!
Bad me!
Sounds like b2 then
@potiuk https://github.com/apache/airflow/pull/12082/files#r518885540
You also resolved this discussion of mine. This file rename was the cause of that problem.
Yeah, let's cut out beta2 today itself
b2? :(
Fix coming. | 2020-11-10T10:52:45Z | [] | [] |
Traceback (most recent call last):
File "/Users/abagri/Workspace/service-workflows/venv/bin/airflow", line 8, in <module>
sys.exit(main())
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 49, in command
func = import_string(import_path)
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/utils/module_loading.py", line 32, in import_string
module = import_module(module_path)
File "/usr/local/Cellar/python@3.8/3.8.5/Frameworks/Python.framework/Versions/3.8/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1014, in _gcd_import
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 783, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/cli/commands/webserver_command.py", line 43, in <module>
from airflow.www.app import cached_app, create_app
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/www/app.py", line 39, in <module>
from airflow.www.extensions.init_views import (
File "/Users/abagri/Workspace/service-workflows/venv/lib/python3.8/site-packages/airflow/www/extensions/init_views.py", line 25, in <module>
from airflow.api_connexion.exceptions import common_error_handler
ModuleNotFoundError: No module named 'airflow.api_connexion.exceptions'
| 2,295 |
|||
apache/airflow | apache__airflow-1242 | a69df7f84b620109b03db7be6d657b3fe6f52e0d | diff --git a/airflow/hooks/postgres_hook.py b/airflow/hooks/postgres_hook.py
--- a/airflow/hooks/postgres_hook.py
+++ b/airflow/hooks/postgres_hook.py
@@ -11,7 +11,7 @@ class PostgresHook(DbApiHook):
'''
conn_name_attr = 'postgres_conn_id'
default_conn_name = 'postgres_default'
- supports_autocommit = True
+ supports_autocommit = False
def get_conn(self):
conn = self.get_connection(self.postgres_conn_id)
@@ -25,4 +25,7 @@ def get_conn(self):
for arg_name, arg_val in conn.extra_dejson.items():
if arg_name in ['sslmode', 'sslcert', 'sslkey', 'sslrootcert', 'sslcrl']:
conn_args[arg_name] = arg_val
- return psycopg2.connect(**conn_args)
+ psycopg2_conn = psycopg2.connect(**conn_args)
+ if psycopg2_conn.server_version < 70400:
+ self.supports_autocommit = True
+ return psycopg2_conn
| GenericTransfer and Postgres - ERROR - SET AUTOCOMMIT TO OFF is no longer supported
Trying to implement a generic transfer
``` python
t1 = GenericTransfer(
task_id = 'copy_small_table',
sql = "select * from my_schema.my_table",
destination_table = "my_schema.my_table",
source_conn_id = "postgres9.1.13",
destination_conn_id = "postgres9.4.5",
dag=dag
)
```
I get the following error:
```
--------------------------------------------------------------------------------
New run starting @2015-11-25T11:05:40.673401
--------------------------------------------------------------------------------
[2015-11-25 11:05:40,698] {models.py:951} INFO - Executing <Task(GenericTransfer): copy_my_table_v1> on 2015-11-24 00:00:00
[2015-11-25 11:05:40,711] {base_hook.py:53} INFO - Using connection to: 10.x.x.x
[2015-11-25 11:05:40,711] {generic_transfer.py:53} INFO - Extracting data from my_db
[2015-11-25 11:05:40,711] {generic_transfer.py:54} INFO - Executing:
select * from my_schema.my_table
[2015-11-25 11:05:40,713] {base_hook.py:53} INFO - Using connection to: 10.x.x.x
[2015-11-25 11:05:40,808] {base_hook.py:53} INFO - Using connection to: 10.x.x.x
[2015-11-25 11:05:45,271] {base_hook.py:53} INFO - Using connection to: 10.x.x.x
[2015-11-25 11:05:45,272] {generic_transfer.py:63} INFO - Inserting rows into 10.x.x.x
[2015-11-25 11:05:45,273] {base_hook.py:53} INFO - Using connection to: 10.x.x.x
[2015-11-25 11:05:45,305] {models.py:1017} ERROR - SET AUTOCOMMIT TO OFF is no longer supported
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/airflow/models.py", line 977, in run
result = task_copy.execute(context=context)
File "/usr/local/lib/python2.7/dist-packages/airflow/operators/generic_transfer.py", line 64, in execute
destination_hook.insert_rows(table=self.destination_table, rows=results)
File "/usr/local/lib/python2.7/dist-packages/airflow/hooks/dbapi_hook.py", line 136, in insert_rows
cur.execute('SET autocommit = 0')
NotSupportedError: SET AUTOCOMMIT TO OFF is no longer supported
[2015-11-25 11:05:45,330] {models.py:1053} ERROR - SET AUTOCOMMIT TO OFF is no longer supported
```
Python 2.7
Airflow 1.6.1
psycopg2 2.6 (Also tried 2.6.1)
Postgeres destination 9.4.5
Any idea on what might cause this problem?
| We don't run postgres at Airbnb so I can't really test a fix, but the first thing I'd try would be to change that line to `False`.
https://github.com/airbnb/airflow/blob/master/airflow/hooks/postgres_hook.py#L12
As a side note, autocommit in DbApiHook should probably be set in a different way, perhaps a `set_autocommit(cur, conn, autocommit=True)` method that receives an active cursor and connection object and does what it needs to do. Class deriving DbApiHook for specific DBs would have to implement it.
The above fix solved our problems.
| 2016-03-29T11:34:01Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/airflow/models.py", line 977, in run
result = task_copy.execute(context=context)
File "/usr/local/lib/python2.7/dist-packages/airflow/operators/generic_transfer.py", line 64, in execute
destination_hook.insert_rows(table=self.destination_table, rows=results)
File "/usr/local/lib/python2.7/dist-packages/airflow/hooks/dbapi_hook.py", line 136, in insert_rows
cur.execute('SET autocommit = 0')
NotSupportedError: SET AUTOCOMMIT TO OFF is no longer supported
| 2,301 |
|||
apache/airflow | apache__airflow-1247 | 5bda74fd9c36f524d0ee922f2183ce9795cc6562 | diff --git a/airflow/utils/db.py b/airflow/utils/db.py
--- a/airflow/utils/db.py
+++ b/airflow/utils/db.py
@@ -42,10 +42,14 @@ def provide_session(func):
@wraps(func)
def wrapper(*args, **kwargs):
needs_session = False
- if 'session' not in kwargs:
+ arg_session = 'session'
+ func_params = func.__code__.co_varnames
+ session_in_args = arg_session in func_params and \
+ func_params.index(arg_session) < len(args)
+ if not (arg_session in kwargs or session_in_args):
needs_session = True
session = settings.Session()
- kwargs['session'] = session
+ kwargs[arg_session] = session
result = func(*args, **kwargs)
if needs_session:
session.expunge_all()
| Scheduler Pickling - conflict with 'session' parameter defined in both args and kwargs
Dear Airflow Maintainers,
Before I tell you about my issue, let me describe my environment:
# Environment
- **Version of Airflow:** master (1db892b)
- **Example code to reproduce the bug:** Using airflow cli: `airflow scheduler -p`
- **Stack trace:**
```
[2016-03-28 19:11:19,952] {jobs.py:656} ERROR - pickle() got multiple values for keyword argument 'session'
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/airflow-1.6.2-py2.7.egg/airflow/jobs.py", line 653, in _execute
self.process_dag(dag, executor)
File "/usr/local/lib/python2.7/dist-packages/airflow-1.6.2-py2.7.egg/airflow/jobs.py", line 459, in process_dag
pickle_id = dag.pickle(session).id
File "/usr/local/lib/python2.7/dist-packages/airflow-1.6.2-py2.7.egg/airflow/utils.py", line 143, in wrapper
result = func(*args, **kwargs)
TypeError: pickle() got multiple values for keyword argument 'session'
```
- **Operating System:** Ubuntu 14.04
- **Python Version:** 2.7.6
Now that you know a little about me, let me tell you about the issue I am having:
The Airflow Scheduler is unable to pickle DAGs using `airflow scheduler -p`. The reason is as follows:
For each DAG, the pickle-enabled scheduler attempts to pickle the DAG:
```
def process_dag(self, dag, executor): # shortened for brevity
session = settings.Session()
# picklin'
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle_id = dag.pickle(session).id
```
Here is the header for the `pickle` function of the DAG class:
```
@provide_session
def pickle(self, session=None):
```
Note that `pickle`
1. has an optional argument named 'session' which is used as a positional argument in `process_dag`
2. uses a decorator function `provide_session`, which is seen below
```
def provide_session(func): # shortened for brevity
@wraps(func)
def wrapper(*args, **kwargs):
needs_session = False
if 'session' not in kwargs: # args is not checked!
needs_session = True
session = settings.Session()
kwargs['session'] = session
result = func(*args, **kwargs)
if needs_session:
session.expunge_all()
session.commit()
session.close()
return result
return wrapper
```
Because `session` is used as a positional argument in `process_dag`, the `provide_session` wrapper does not find a 'session' argument among the keyword arguments and therefore provides one. When the interior function is called, the positional and keyword arguments are reconciled and the two respective `session`s collide to produce the error shown above.
For reference, see [this stack overflow post](http://stackoverflow.com/questions/21764770/typeerror-got-multiple-values-for-argument)
- **What did you expect to happen?** No errors, DAGs should be scheduled normally.
- **What happened instead?** An error with aforementioned stack trace.
## Reproduction Steps
1. Modify airflow.cfg to use `MesosExecutor`
2. from the CLI, input `airflow scheduler -p`
| 2016-03-29T15:37:01Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/airflow-1.6.2-py2.7.egg/airflow/jobs.py", line 653, in _execute
self.process_dag(dag, executor)
File "/usr/local/lib/python2.7/dist-packages/airflow-1.6.2-py2.7.egg/airflow/jobs.py", line 459, in process_dag
pickle_id = dag.pickle(session).id
File "/usr/local/lib/python2.7/dist-packages/airflow-1.6.2-py2.7.egg/airflow/utils.py", line 143, in wrapper
result = func(*args, **kwargs)
TypeError: pickle() got multiple values for keyword argument 'session'
| 2,303 |
||||
apache/airflow | apache__airflow-12595 | ce919912b7ead388c0a99f4254e551ae3385ff50 | diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -1149,7 +1149,8 @@ def _run_raw_task(
session.commit()
- self._run_mini_scheduler_on_child_tasks(session)
+ if not test_mode:
+ self._run_mini_scheduler_on_child_tasks(session)
@provide_session
@Sentry.enrich_errors
| airflow task test failing due to mini scheduler implementation not respecting test mode
**Apache Airflow version**: 2.0.0b3
**Environment**: Python3.7-slim running on docker
**What happened**:
Error when running `airflow tasks test <dag> <task> <date>` there is an error with the task rescheduler, which should not happen as we are testing the tasks and not running via scheduler.
**What you expected to happen**:
No error message should be displayed as the task is success and it is running on test mode
**How to reproduce it**:
just run the `airflow test <dag> <task> <date>` after a vanilla airflow installation using pip install.
**Anything else we need to know**:
this is the log
```
root@add8b3f038cf:/# airflow tasks test docker_test d 2020-11-24
[2020-11-24 08:13:00,796] {dagbag.py:440} INFO - Filling up the DagBag from /opt/airflow/dags/test
[2020-11-24 08:13:01,072] {taskinstance.py:827} INFO - Dependencies all met for <TaskInstance: docker_test.d 2020-11-24T00:00:00+00:00 [None]>
[2020-11-24 08:13:01,077] {taskinstance.py:827} INFO - Dependencies all met for <TaskInstance: docker_test.d 2020-11-24T00:00:00+00:00 [None]>
[2020-11-24 08:13:01,077] {taskinstance.py:1018} INFO -
--------------------------------------------------------------------------------
[2020-11-24 08:13:01,077] {taskinstance.py:1019} INFO - Starting attempt 1 of 4
[2020-11-24 08:13:01,077] {taskinstance.py:1020} INFO -
--------------------------------------------------------------------------------
[2020-11-24 08:13:01,078] {taskinstance.py:1039} INFO - Executing <Task(PythonOperator): d> on 2020-11-24T00:00:00+00:00
[2020-11-24 08:13:01,109] {taskinstance.py:1232} INFO - Exporting the following env vars:
AIRFLOW_CTX_DAG_EMAIL=adilson@zookal.com
AIRFLOW_CTX_DAG_OWNER=airflow
AIRFLOW_CTX_DAG_ID=docker_test
AIRFLOW_CTX_TASK_ID=d
AIRFLOW_CTX_EXECUTION_DATE=2020-11-24T00:00:00+00:00
d
[2020-11-24 08:13:01,110] {python.py:118} INFO - Done. Returned value was: None
[2020-11-24 08:13:01,115] {taskinstance.py:1143} INFO - Marking task as SUCCESS. dag_id=docker_test, task_id=d, execution_date=20201124T000000, start_date=20201124T081301, end_date=20201124T081301
Traceback (most recent call last):
File "/usr/local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/usr/local/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 50, in command
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/cli.py", line 86, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 379, in task_test
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/session.py", line 63, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1350, in run
mark_success=mark_success, test_mode=test_mode, job_id=job_id, pool=pool, session=session
File "/usr/local/lib/python3.7/site-packages/airflow/utils/session.py", line 59, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1152, in _run_raw_task
self._run_mini_scheduler_on_child_tasks(session)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/session.py", line 59, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1165, in _run_mini_scheduler_on_child_tasks
execution_date=self.execution_date,
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3473, in one
raise orm_exc.NoResultFound("No row was found for one()")
sqlalchemy.orm.exc.NoResultFound: No row was found for one()
```
| Thanks for opening your first issue here! Be sure to follow the issue template!
@ashb this issue is the same as https://github.com/apache/airflow/issues/12584 raised by @nathadfield, issues created 1 minute apart.
@AdilsonMendonca Sounds like you've done a better job of explaining it than I. We can close mine. | 2020-11-24T18:06:41Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/usr/local/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 50, in command
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/cli.py", line 86, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 379, in task_test
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/session.py", line 63, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1350, in run
mark_success=mark_success, test_mode=test_mode, job_id=job_id, pool=pool, session=session
File "/usr/local/lib/python3.7/site-packages/airflow/utils/session.py", line 59, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1152, in _run_raw_task
self._run_mini_scheduler_on_child_tasks(session)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/session.py", line 59, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1165, in _run_mini_scheduler_on_child_tasks
execution_date=self.execution_date,
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3473, in one
raise orm_exc.NoResultFound("No row was found for one()")
sqlalchemy.orm.exc.NoResultFound: No row was found for one()
| 2,304 |
|||
apache/airflow | apache__airflow-1261 | 91449a2205ce2c48596416e3207c2b6a26055a8a | diff --git a/airflow/jobs.py b/airflow/jobs.py
--- a/airflow/jobs.py
+++ b/airflow/jobs.py
@@ -401,7 +401,9 @@ def schedule_dag(self, dag):
DagRun.run_id.like(DagRun.ID_PREFIX+'%')))
last_scheduled_run = qry.scalar()
next_run_date = None
- if not last_scheduled_run:
+ if dag.schedule_interval == '@once' and not last_scheduled_run:
+ next_run_date = datetime.now()
+ elif not last_scheduled_run:
# First run
TI = models.TaskInstance
latest_run = (
@@ -417,8 +419,6 @@ def schedule_dag(self, dag):
next_run_date = min([t.start_date for t in dag.tasks])
elif dag.schedule_interval != '@once':
next_run_date = dag.following_schedule(last_scheduled_run)
- elif dag.schedule_interval == '@once' and not last_scheduled_run:
- next_run_date = datetime.now()
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
| DAG with schedule interval '@once' cannot be scheduled
Dear Airflow Maintainers,
Before I tell you about my issue, let me describe my environment:
# Environment
- Version of Airflow (e.g. a release version, running your own fork, running off master -- provide a git log snippet) : **1.7.0**
- Example code to reproduce the bug (as a code snippet in markdown) **example_xcom from stock examples**
- Stack trace if applicable:
```
[2016-03-30 15:36:49,858] {models.py:204} INFO - Importing /usr/local/lib/python2.7/dist-packages/airflow/example_dags/example_xcom.py
[2016-03-30 15:36:49,863] {models.py:296} INFO - Loaded DAG <DAG: example_xcom>
[2016-03-30 15:36:49,874] {jobs.py:642} ERROR - list index out of range
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/airflow/jobs.py", line 638, in _execute
self.schedule_dag(dag)
File "/usr/local/lib/python2.7/dist-packages/airflow/jobs.py", line 397, in schedule_dag
next_run_date = dag.date_range(latest_run, -5)[0]
IndexError: list index out of range
```
- Operating System: (Windows Version or `$ uname -a`) : **Ubuntu 14.04**
- Python Version: `$ python --version` **2.7.6**
Now that you know a little about me, let me tell you about the issue I am having:
# Description of Issue
- What did you expect to happen? **For the DAG to be scheduled**
- What happened instead? **Error shown above**
- Here is how you can reproduce this issue on your machine:
## Reproduction Steps
1. Make sure the stock example DAG `example_xcom` is unpaused
2. If there is no example_xcom example, create any DAG with a schedule_interval of `@once`
3. Run the scheduler
| 2016-03-30T16:30:26Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/airflow/jobs.py", line 638, in _execute
self.schedule_dag(dag)
File "/usr/local/lib/python2.7/dist-packages/airflow/jobs.py", line 397, in schedule_dag
next_run_date = dag.date_range(latest_run, -5)[0]
IndexError: list index out of range
| 2,305 |
||||
apache/airflow | apache__airflow-13260 | c2bedd580c3dd0e971ac394be25e331ba9c1c932 | diff --git a/airflow/configuration.py b/airflow/configuration.py
--- a/airflow/configuration.py
+++ b/airflow/configuration.py
@@ -199,7 +199,7 @@ def __init__(self, default_config=None, *args, **kwargs):
self.is_validated = False
- def _validate(self):
+ def validate(self):
self._validate_config_dependencies()
@@ -452,11 +452,9 @@ def getimport(self, section, key, **kwargs): # noqa
def read(self, filenames, encoding=None):
super().read(filenames=filenames, encoding=encoding)
- self._validate()
def read_dict(self, dictionary, source='<dict>'):
super().read_dict(dictionary=dictionary, source=source)
- self._validate()
def has_option(self, section, option):
try:
@@ -993,3 +991,5 @@ def initialize_secrets_backends() -> List[BaseSecretsBackend]:
secrets_backend_list = initialize_secrets_backends()
+
+conf.validate()
| Import error when using custom backend and sql_alchemy_conn_secret
**Apache Airflow version**: 2.0.0
**Environment**:
- **Cloud provider or hardware configuration**: N/A
- **OS** (e.g. from /etc/os-release): custom Docker image (`FROM python:3.6`) and macOS Big Sur (11.0.1)
- **Kernel** (e.g. `uname -a`):
- `Linux xxx 4.14.174+ #1 SMP x86_64 GNU/Linux`
- `Darwin xxx 20.1.0 Darwin Kernel Version 20.1.0 rRELEASE_X86_64 x86_64`
- **Install tools**:
- **Others**:
**What happened**:
I may have mixed 2 different issues here, but this is what happened to me.
I'm trying to use Airflow with the `airflow.providers.google.cloud.secrets.secret_manager.CloudSecretManagerBackend` and a `sql_alchemy_conn_secret` too, however, I have a `NameError` exception when attempting to run either `airflow scheduler` or `airflow webserver`:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/airflow/__init__.py", line 34, in <module>
from airflow import settings
File "/usr/local/lib/python3.6/site-packages/airflow/settings.py", line 35, in <module>
from airflow.configuration import AIRFLOW_HOME, WEBSERVER_CONFIG, conf # NOQA F401
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 786, in <module>
conf.read(AIRFLOW_CONFIG)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 447, in read
self._validate()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 196, in _validate
self._validate_config_dependencies()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 224, in _validate_config_dependencies
is_sqlite = "sqlite" in self.get('core', 'sql_alchemy_conn')
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 324, in get
option = self._get_option_from_secrets(deprecated_key, deprecated_section, key, section)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 342, in _get_option_from_secrets
option = self._get_secret_option(section, key)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 303, in _get_secret_option
return _get_config_value_from_secret_backend(secrets_path)
NameError: name '_get_config_value_from_secret_backend' is not defined
```
**What you expected to happen**:
A proper import and configuration creation.
**How to reproduce it**:
`airflow.cfg`:
```ini
[core]
# ...
sql_alchemy_conn_secret = some-key
# ...
[secrets]
backend = airflow.providers.google.cloud.secrets.secret_manager.CloudSecretManagerBackend
backend_kwargs = { ... }
# ...
```
**Anything else we need to know**:
Here is the workaround I have for the moment, not sure it works all the way, and probably doesn't cover all edge cases, tho it kinda works for my setup:
Move `get_custom_secret_backend` before (for me it's actually below `_get_config_value_from_secret_backend`): https://github.com/apache/airflow/blob/cc87caa0ce0b31aa29df7bbe90bdcc2426d80ff1/airflow/configuration.py#L794
Then comment: https://github.com/apache/airflow/blob/cc87caa0ce0b31aa29df7bbe90bdcc2426d80ff1/airflow/configuration.py#L232-L236
| Thanks for opening your first issue here! Be sure to follow the issue template!
It looks like you have somethng seriously wrong in your configuration -- looks like the line numbers reported do not match the line numbers from Airlfow installation. can you please remove/reinstall airflow from the scratch and see again? Preferably in a separate virtual environment?
Sorry, indeed, I may have copy/pasted a bad stack trace. Yet, started from a fresh env, and same behaviour arises:
My step:
```
virtualenv venv
source venv/bin/activate
pip install apache-airflow
pip install apache-airflow-providers-google
pip install apache-airflow-providers-postgres
```
Create a `airflow.cfg` at current location with the following content:
<details>
<summary>airflow.cfg</summary>
```ini
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
[core]
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository. This path must be absolute.
dags_folder = workflows/
# Users must supply an Airflow connection id that provides access to the storage
# location.
remote_log_conn_id =
remote_base_log_folder =
encrypt_s3_logs = False
# Hostname by providing a path to a callable, which will resolve the hostname.
# The format is "package:function".
#
# For example, default value "socket:getfqdn" means that result from getfqdn() of "socket"
# package will be used as hostname.
#
# No argument should be required in the function specified.
# If using IP address as hostname is preferred, use value ``airflow.utils.net:get_host_ip_address``
hostname_callable = socket.getfqdn
# Default timezone in case supplied date times are naive
# can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam)
default_timezone = utc
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor, KubernetesExecutor
executor = LocalExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn_secret = sql_alchemy_conn
# The encoding for the databases
sql_engine_encoding = utf-8
# If SqlAlchemy should pool database connections.
sql_alchemy_pool_enabled = True
# The SqlAlchemy pool size is the maximum number of database connections
# in the pool. 0 indicates no limit.
sql_alchemy_pool_size = 5
# The maximum overflow size of the pool.
# When the number of checked-out connections reaches the size set in pool_size,
# additional connections will be returned up to this limit.
# When those additional connections are returned to the pool, they are disconnected and discarded.
# It follows then that the total number of simultaneous connections the pool will allow
# is pool_size + max_overflow,
# and the total number of "sleeping" connections the pool will allow is pool_size.
# max_overflow can be set to -1 to indicate no overflow limit;
# no limit will be placed on the total number of concurrent connections. Defaults to 10.
sql_alchemy_max_overflow = 10
# The SqlAlchemy pool recycle is the number of seconds a connection
# can be idle in the pool before it is invalidated. This config does
# not apply to sqlite. If the number of DB connections is ever exceeded,
# a lower config value will allow the system to recover faster.
sql_alchemy_pool_recycle = 1800
# Check connection at the start of each connection pool checkout.
# Typically, this is a simple statement like "SELECT 1".
# More information here:
# https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic
sql_alchemy_pool_pre_ping = True
# The schema to use for the metadata database.
# SqlAlchemy supports databases with the concept of multiple schemas.
sql_alchemy_schema =
# Import path for connect args in SqlAlchemy. Default to an empty dict.
# This is useful when you want to configure db engine args that SqlAlchemy won't parse
# in connection string.
# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args
# sql_alchemy_connect_args =
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# The number of task instances allowed to run concurrently by the scheduler
dag_concurrency = 16
# Are DAGs paused by default at creation
dags_are_paused_at_creation = True
# The maximum number of active DAG runs per DAG
max_active_runs_per_dag = 16
# Whether to load the DAG examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = False
# Whether to load the default connections that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_default_connections = False
# Where your Airflow plugins are stored
plugins_folder = airflow/plugins/
# Secret key to save connection passwords in the db
fernet_key_secret = fernet_key
# Whether to disable pickling dags
donot_pickle = False
# How long before timing out a python file import
dagbag_import_timeout = 30
# How long before timing out a DagFileProcessor, which processes a dag file
dag_file_processor_timeout = 50
# The class to use for running task instances in a subprocess
task_runner = StandardTaskRunner
# If set, tasks without a ``run_as_user`` argument will be run with this user
# Can be used to de-elevate a sudo user running Airflow when executing tasks
default_impersonation =
# What security module to use (for example kerberos)
security =
# If set to False enables some unsecure features like Charts and Ad Hoc Queries.
# In 2.0 will default to True.
secure_mode = False
# Turn unit test mode on (overwrites many configuration options with test
# values at runtime)
unit_test_mode = False
# Whether to enable pickling for xcom (note that this is insecure and allows for
# RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False).
enable_xcom_pickling = True
# When a task is killed forcefully, this is the amount of time in seconds that
# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
killed_task_cleanup_time = 60
# Whether to override params with dag_run.conf. If you pass some key-value pairs
# through ``airflow dags backfill -c`` or
# ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params.
dag_run_conf_overrides_params = False
# Worker initialisation check to validate Metadata Database connection
worker_precheck = False
# When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``.
dag_discovery_safe_mode = True
# The number of retries each task is going to have by default. Can be overridden at dag or task level.
default_task_retries = 0
# Whether to serialise DAGs and persist them in DB.
# If set to True, Webserver reads from DB instead of parsing DAG files
# More details: https://airflow.apache.org/docs/stable/dag-serialization.html
store_serialized_dags = False
# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate.
min_serialized_dag_update_interval = 30
# Fetching serialized DAG can not be faster than a minimum interval to reduce database
# read rate. This config controls when your DAGs are updated in the Webserver
min_serialized_dag_fetch_interval = 10
# Whether to persist DAG files code in DB.
# If set to True, Webserver reads file contents from DB instead of
# trying to access files in a DAG folder. Defaults to same as the
# ``store_serialized_dags`` setting.
# Example: store_dag_code = False
# store_dag_code =
# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store
# in the Database.
# When Dag Serialization is enabled (``store_serialized_dags=True``), all the template_fields
# for each of Task Instance are stored in the Database.
# Keeping this number small may cause an error when you try to view ``Rendered`` tab in
# TaskInstance view for older tasks.
max_num_rendered_ti_fields_per_task = 30
# On each dagrun check against defined SLAs
check_slas = True
# Path to custom XCom class that will be used to store and resolve operators results
# Example: xcom_backend = path.to.CustomXCom
xcom_backend = airflow.models.xcom.BaseXCom
[logging]
# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
# Set this to True if you want to enable remote logging.
remote_logging = False
# The folder where airflow should store its log files
# This path must be absolute
base_log_folder = airflow/logs/
# Log format for when Colored logs is enabled
colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] %%(blue)s%%(filename)s:%%(reset)s%%(lineno)d %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s
# Format of Log line
log_format = [%%(asctime)s] %%(filename)s:%%(lineno)d %%(levelname)s - %%(message)s
simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
# Logging level
logging_level = INFO
# Logging level for Flask-appbuilder UI
fab_logging_level = WARN
# Logging class
# Specify the class that will specify the logging configuration
# This class has to be on the python classpath
# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
logging_config_class =
# Flag to enable/disable Colored logs in Console
# Colour the logs when the controlling terminal is a TTY.
colored_console_log = True
colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter
# Log filename format
log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
log_processor_filename_template = {{ filename }}.log
dag_processor_manager_log_location = airflow/logs/dag_processor_manager/dag_processor_manager.log
# Name of handler to read task instance logs.
# Default to use task handler.
task_log_reader = task
[secrets]
# Full class name of secrets backend to enable (will precede env vars and metastore in search path)
# Example: backend = airflow.contrib.secrets.aws_systems_manager.SystemsManagerParameterStoreBackend
backend = airflow.providers.google.cloud.secrets.secret_manager.CloudSecretManagerBackend
# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class.
# See documentation for the secrets backend you are using. JSON is expected.
# Example for AWS Systems Manager ParameterStore:
# ``{{"connections_prefix": "/airflow/connections", "profile_name": "default"}}``
backend_kwargs = {"config_prefix": "airflow-config", "connections_prefix": "airflow-conn", "sep": "-"}
[cli]
# In what way should the cli access the API. The LocalClient will use the
# database directly, while the json_client will use the api running on the
# webserver
api_client = airflow.api.client.local_client
# If you set web_server_url_prefix, do NOT forget to append it here, ex:
# ``endpoint_url = http://localhost:8080/myroot``
# So api will look like: ``http://localhost:8080/myroot/api/experimental/...``
endpoint_url = http://localhost:8080
[debug]
# Used only with DebugExecutor. If set to True DAG will fail with first
# failed task. Helpful for debugging purposes.
fail_fast = False
[api]
# How to authenticate users of the API. See
# https://airflow.apache.org/docs/stable/security.html for possible values.
# ("airflow.api.auth.backend.default" allows all requests for historic reasons)
auth_backend = airflow.api.auth.backend.deny_all
[operators]
# The default owner assigned to each new operator, unless
# provided explicitly or passed via ``default_args``
default_owner = airflow
default_cpus = 1
default_ram = 512
default_disk = 512
default_gpus = 0
[hive]
# Default mapreduce queue for HiveOperator tasks
default_hive_mapred_queue =
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is used in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# Default timezone to display all dates in the RBAC UI, can be UTC, system, or
# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the
# default value of core/default_timezone will be used
# Example: default_ui_timezone = America/New_York
default_ui_timezone = UTC
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# Paths to the SSL certificate and key for the web server. When both are
# provided SSL will be enabled. This does not change the web server port.
web_server_ssl_cert =
# Paths to the SSL certificate and key for the web server. When both are
# provided SSL will be enabled. This does not change the web server port.
web_server_ssl_key =
# Number of seconds the webserver waits before killing gunicorn master that doesn't respond
web_server_master_timeout = 120
# Number of seconds the gunicorn webserver waits before timing out on a worker
web_server_worker_timeout = 120
# Number of workers to refresh at a time. When set to 0, worker refresh is
# disabled. When nonzero, airflow periodically refreshes webserver workers by
# bringing up new ones and killing old ones.
worker_refresh_batch_size = 1
# Number of seconds to wait before refreshing a batch of workers.
worker_refresh_interval = 30
# If set to True, Airflow will track files in plugins_folder directory. When it detects changes,
# then reload the gunicorn.
reload_on_plugin_change = False
# Secret key used to run your flask app
# It should be as random as possible
secret_key_secret = secret_key
# Number of workers to run the Gunicorn web server
workers = 4
# The worker class gunicorn should use. Choices include
# sync (default), eventlet, gevent
worker_class = sync
# Log files for the gunicorn webserver. '-' means log to stderr.
access_logfile = -
# Log files for the gunicorn webserver. '-' means log to stderr.
error_logfile = -
# Expose the configuration file in the web server
expose_config = False
# Expose hostname in the web server
expose_hostname = True
# Expose stacktrace in the web server
expose_stacktrace = True
# Set to true to turn on authentication:
# https://airflow.apache.org/security.html#web-authentication
authenticate = True
auth_backend = airflow.contrib.auth.backends.password_auth
# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False
# Filtering mode. Choices include user (default) and ldapgroup.
# Ldap group filtering requires using the ldap backend
#
# Note that the ldap server needs the "memberOf" overlay to be set up
# in order to user the ldapgroup mode.
owner_mode = user
# Default DAG view. Valid values are:
# tree, graph, duration, gantt, landing_times
dag_default_view = tree
# "Default DAG orientation. Valid values are:"
# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
dag_orientation = TB
# Puts the webserver in demonstration mode; blurs the names of Operators for
# privacy.
demo_mode = False
# The amount of time (in secs) webserver will wait for initial handshake
# while fetching logs from other worker machine
log_fetch_timeout_sec = 5
# Time interval (in secs) to wait before next log fetching.
log_fetch_delay_sec = 2
# Distance away from page bottom to enable auto tailing.
log_auto_tailing_offset = 30
# Animation speed for auto tailing log display.
log_animation_speed = 1000
# By default, the webserver shows paused DAGs. Flip this to hide paused
# DAGs by default
hide_paused_dags_by_default = False
# Consistent page size across all listing views in the UI
page_size = 100
# Use FAB-based webserver with RBAC feature
rbac = True
# Define the color of navigation bar
navbar_color = #fff
# Default dagrun to show in UI
default_dag_run_display_number = 25
# Enable werkzeug ``ProxyFix`` middleware for reverse proxy
enable_proxy_fix = False
# Number of values to trust for ``X-Forwarded-For``.
# More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/
proxy_fix_x_for = 1
# Number of values to trust for ``X-Forwarded-Proto``
proxy_fix_x_proto = 1
# Number of values to trust for ``X-Forwarded-Host``
proxy_fix_x_host = 1
# Number of values to trust for ``X-Forwarded-Port``
proxy_fix_x_port = 1
# Number of values to trust for ``X-Forwarded-Prefix``
proxy_fix_x_prefix = 1
# Set secure flag on session cookie
cookie_secure = False
# Set samesite policy on session cookie
cookie_samesite = Strict
# Default setting for wrap toggle on DAG code and TI log views.
default_wrap = False
# Allow the UI to be rendered in a frame
x_frame_enabled = True
# Send anonymous user activity to your analytics tool
# choose from google_analytics, segment, or metarouter
# analytics_tool =
# Unique ID of your account in the analytics tool
# analytics_id =
# Update FAB permissions and sync security manager roles
# on webserver startup
update_fab_perms = True
# The UI cookie lifetime in days
session_lifetime_minutes = 60
[email]
email_backend = airflow.utils.email.send_email_smtp
[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5
# After how much time should the scheduler terminate in seconds
# -1 indicates to run continuously (see also num_runs)
run_duration = 41460
# The number of times to try to schedule each DAG file
# -1 indicates unlimited number
num_runs = -1
# The number of seconds to wait between consecutive DAG file processing
processor_poll_interval = 1
# after how much time (seconds) a new DAGs should be picked up from the filesystem
min_file_process_interval = 0
# How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes.
dag_dir_list_interval = 300
# How often should stats be printed to the logs. Setting to 0 will disable printing stats
print_stats_interval = 30
# If the last scheduler heartbeat happened more than scheduler_health_check_threshold
# ago (in seconds), scheduler is considered unhealthy.
# This is used by the health check in the "/health" endpoint
scheduler_health_check_threshold = 30
child_process_log_directory = airflow/logs/scheduler
# Local task jobs periodically heartbeat to the DB. If the job has
# not heartbeat in this many seconds, the scheduler will mark the
# associated task instance as failed and will re-schedule the task.
scheduler_zombie_task_threshold = 300
# Turn off scheduler catchup by setting this to False.
# Default behavior is unchanged and
# Command Line Backfills still work, but the scheduler
# will not do scheduler catchup if this is False,
# however it can be set on a per DAG basis in the
# DAG definition (catchup)
catchup_by_default = False
# This changes the batch size of queries in the scheduling main loop.
# If this is too high, SQL query performance may be impacted by one
# or more of the following:
# - reversion to full table scan
# - complexity of query predicate
# - excessive locking
# Additionally, you may hit the maximum allowable query length for your db.
# Set this to 0 for no limit (not advised)
max_tis_per_query = 512
statsd_host = localhost
statsd_port = 8125
statsd_prefix = airflow
# If you want to avoid send all the available metrics to StatsD,
# you can configure an allow list of prefixes to send only the metrics that
# start with the elements of the list (e.g: scheduler,executor,dagrun)
statsd_allow_list =
# The scheduler can run multiple threads in parallel to schedule dags.
# This defines how many threads will run.
parsing_processes = 2
authenticate = False
# Turn off scheduler use of cron intervals by setting this to False.
# DAGs submitted manually in the web UI or with trigger_dag will still run.
use_job_schedule = True
# Allow externally triggered DagRuns for Execution Dates in the future
# Only has effect if schedule_interval is set to None in DAG
allow_trigger_in_future = False
[metrics]
# Statsd (https://github.com/etsy/statsd) integration settings
statsd_on = False
[admin]
# UI to hide sensitive variable fields when set to True
hide_sensitive_variable_fields = True
[kubernetes]
# The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run
worker_container_repository =
# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored.
# (This feature is experimental)
pod_template_file =
worker_container_tag =
worker_container_image_pull_policy = IfNotPresent
# If True, all worker pods will be deleted upon termination
delete_worker_pods = True
# If False (and delete_worker_pods is True),
# failed worker pods will not be deleted so users can investigate them.
delete_worker_pods_on_failure = False
# Number of Kubernetes Worker Pod creation calls per scheduler loop
worker_pods_creation_batch_size = 1
# The Kubernetes namespace where airflow workers should be created. Defaults to ``default``
namespace = default
# Allows users to launch pods in multiple namespaces.
# Will require creating a cluster-role for the scheduler
multi_namespace_mode = False
# Use the service account kubernetes gives to pods to connect to kubernetes cluster.
# It's intended for clients that expect to be running inside a pod running on kubernetes.
# It will raise an exception if called from a process not running in a kubernetes environment.
in_cluster = True
# Keyword parameters to pass while calling a kubernetes client core_v1_api methods
# from Kubernetes Executor provided as a single line formatted JSON dictionary string.
# List of supported params are similar for all core_v1_apis, hence a single config
# variable for all apis.
# See:
# https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py
# Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely
# for kubernetes api responses, which will cause the scheduler to hang.
# The timeout is specified as [connect timeout, read timeout]
kube_client_request_args =
# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client
# ``core_v1_api`` method when using the Kubernetes Executor.
# This should be an object and can contain any of the options listed in the ``v1DeleteOptions``
# class defined here:
# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19
# Example: delete_option_kwargs = {{"grace_period_seconds": 10}}
delete_option_kwargs =
[kubernetes_node_selectors]
# The Key-value pairs to be given to worker pods.
# The worker pods will be scheduled to the nodes of the specified key-value pairs.
# Should be supplied in the format: key = value
[kubernetes_annotations]
# The Key-value annotations pairs to be given to worker pods.
# Should be supplied in the format: key = value
[kubernetes_environment_variables]
# The scheduler sets the following environment variables into your workers. You may define as
# many environment variables as needed and the kubernetes launcher will set them in the launched workers.
# Environment variables in this section are defined as follows
# ``<environment_variable_key> = <environment_variable_value>``
#
# For example if you wanted to set an environment variable with value `prod` and key
# ``ENVIRONMENT`` you would follow the following format:
# ENVIRONMENT = prod
#
# Additionally you may override worker airflow settings with the ``AIRFLOW__<SECTION>__<KEY>``
# formatting as supported by airflow normally.
[kubernetes_secrets]
# The scheduler mounts the following secrets into your workers as they are launched by the
# scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the
# defined secrets and mount them as secret environment variables in the launched workers.
# Secrets in this section are defined as follows
# ``<environment_variable_mount> = <kubernetes_secret_object>=<kubernetes_secret_key>``
#
# For example if you wanted to mount a kubernetes secret key named ``postgres_password`` from the
# kubernetes secret object ``airflow-secret`` as the environment variable ``POSTGRES_PASSWORD`` into
# your workers you would follow the following format:
# ``POSTGRES_PASSWORD = airflow-secret=postgres_credentials``
#
# Additionally you may override worker airflow settings with the ``AIRFLOW__<SECTION>__<KEY>``
# formatting as supported by airflow normally.
[kubernetes_labels]
# The Key-value pairs to be given to worker pods.
# The worker pods will be given these static labels, as well as some additional dynamic labels
# to identify the task.
# Should be supplied in the format: ``key = value``
```
</details>
Run the following command:
```bash
AIRFLOW_CONFIG=./airflow.cfg airflow scheduler
```
<details>
<summary>Stack trace</summary>
```bash
Traceback (most recent call last):
File "/some/path/venv/bin/airflow", line 5, in <module>
from airflow.__main__ import main
File "/some/path/venv/lib/python3.6/site-packages/airflow/__init__.py", line 34, in <module>
from airflow import settings
File "/some/path/venv/lib/python3.6/site-packages/airflow/settings.py", line 35, in <module>
from airflow.configuration import AIRFLOW_HOME, WEBSERVER_CONFIG, conf # NOQA F401
File "/some/path/venv/lib/python3.6/site-packages/airflow/configuration.py", line 794, in <module>
conf.read(AIRFLOW_CONFIG)
File "/some/path/venv/lib/python3.6/site-packages/airflow/configuration.py", line 455, in read
self._validate()
File "/some/path/venv/lib/python3.6/site-packages/airflow/configuration.py", line 204, in _validate
self._validate_config_dependencies()
File "/some/path/venv/lib/python3.6/site-packages/airflow/configuration.py", line 232, in _validate_config_dependencies
is_sqlite = "sqlite" in self.get('core', 'sql_alchemy_conn')
File "/some/path/venv/lib/python3.6/site-packages/airflow/configuration.py", line 332, in get
option = self._get_option_from_secrets(deprecated_key, deprecated_section, key, section)
File "/some/path/venv/lib/python3.6/site-packages/airflow/configuration.py", line 350, in _get_option_from_secrets
option = self._get_secret_option(section, key)
File "/some/path/venv/lib/python3.6/site-packages/airflow/configuration.py", line 311, in _get_secret_option
return _get_config_value_from_secret_backend(secrets_path)
File "/some/path/venv/lib/python3.6/site-packages/airflow/configuration.py", line 85, in _get_config_value_from_secret_backend
secrets_client = get_custom_secret_backend()
NameError: name 'get_custom_secret_backend' is not defined
```
</details>
Which now matches lines no...
I think this is an interesting one. Looks like circular dependency where the method is not yet defined because we are calling it during parsing of the python file. | 2020-12-22T19:05:22Z | [] | [] |
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/airflow/__init__.py", line 34, in <module>
from airflow import settings
File "/usr/local/lib/python3.6/site-packages/airflow/settings.py", line 35, in <module>
from airflow.configuration import AIRFLOW_HOME, WEBSERVER_CONFIG, conf # NOQA F401
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 786, in <module>
conf.read(AIRFLOW_CONFIG)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 447, in read
self._validate()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 196, in _validate
self._validate_config_dependencies()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 224, in _validate_config_dependencies
is_sqlite = "sqlite" in self.get('core', 'sql_alchemy_conn')
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 324, in get
option = self._get_option_from_secrets(deprecated_key, deprecated_section, key, section)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 342, in _get_option_from_secrets
option = self._get_secret_option(section, key)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 303, in _get_secret_option
return _get_config_value_from_secret_backend(secrets_path)
NameError: name '_get_config_value_from_secret_backend' is not defined
| 2,323 |
|||
apache/airflow | apache__airflow-13371 | 800e630d0cc9dbbf345a9cee4653861cbfda42c9 | diff --git a/airflow/upgrade/rules/airflow_macro_plugin_removed.py b/airflow/upgrade/rules/airflow_macro_plugin_removed.py
--- a/airflow/upgrade/rules/airflow_macro_plugin_removed.py
+++ b/airflow/upgrade/rules/airflow_macro_plugin_removed.py
@@ -39,9 +39,12 @@ def _check_file(self, file_path):
problems = []
class_name_to_check = self.MACRO_PLUGIN_CLASS.split(".")[-1]
with open(file_path, "r") as file_pointer:
- for line_number, line in enumerate(file_pointer, 1):
- if class_name_to_check in line:
- problems.append(self._change_info(file_path, line_number))
+ try:
+ for line_number, line in enumerate(file_pointer, 1):
+ if class_name_to_check in line:
+ problems.append(self._change_info(file_path, line_number))
+ except UnicodeDecodeError:
+ problems.append("Unable to read python file {}".format(file_path))
return problems
def check(self):
@@ -49,5 +52,7 @@ def check(self):
file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)
problems = []
for file_path in file_paths:
+ if not file_path.endswith(".py"):
+ continue
problems.extend(self._check_file(file_path))
return problems
| AirflowMacroPluginRemovedRule fails on non-python files
**Apache Airflow version**: 1.10.14
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`):
**Environment**:
- **Cloud provider or hardware configuration**: X
- **OS** (e.g. from /etc/os-release): X
- **Kernel** (e.g. `uname -a`): X
- **Install tools**: X
- **Others**: X
**What happened**:
The `AirflowMacroPluginRemovedRule` seems unable to process non-standard python files (e.g. `.xlsx`) and chokes out with an unhelpful error message.:
```python
========================================================================================================================================================== STATUS ==========================================================================================================================================================
Check for latest versions of apache-airflow and checker...........................................................................................................................................................................................................................................................SUCCESS
Traceback (most recent call last):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/bin/airflow", line 37, in <module>
args.func(args)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 88, in run
all_problems = check_upgrade(formatter, rules)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 37, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 52, in check
problems.extend(self._check_file(file_path))
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 42, in _check_file
for line_number, line in enumerate(file_pointer, 1):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x82 in position 16: invalid start byte
```
**What you expected to happen**:
I expected the macro to skip over files it could not process/understand
**How to reproduce it**:
Add an `.xlsx` or other binary document to the DAGs folder and run the upgrade check.
**Suggested resolution**:
I think it's fine to fail out on these files (it led us to add certain items to the `.airflowignore` which should have been there anyway) but I had to modify the upgrade rule directly to tell me _which_ files were the problem. A more helpful error message here, and possibly a message prompting users to add said files to their `.airflowignore` would be ideal.
| Please feel free to assign this to me too 🙂
Sure :). Do you want to take care about #13350 too ?
I can give it a shot too! I know less about what the best approach is for that one. Also do you mind adding the `upgrade_check` label to these two? | 2020-12-29T21:12:20Z | [] | [] |
Traceback (most recent call last):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/bin/airflow", line 37, in <module>
args.func(args)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 88, in run
all_problems = check_upgrade(formatter, rules)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 37, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 52, in check
problems.extend(self._check_file(file_path))
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 42, in _check_file
for line_number, line in enumerate(file_pointer, 1):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x82 in position 16: invalid start byte
| 2,329 |
|||
apache/airflow | apache__airflow-13932 | 7a5aafce08374c75562e3eb728413fefc4ab6e01 | diff --git a/airflow/jobs/scheduler_job.py b/airflow/jobs/scheduler_job.py
--- a/airflow/jobs/scheduler_job.py
+++ b/airflow/jobs/scheduler_job.py
@@ -731,7 +731,7 @@ def __init__(
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
- self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True)
+ self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True, load_op_links=False)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
diff --git a/airflow/models/dagbag.py b/airflow/models/dagbag.py
--- a/airflow/models/dagbag.py
+++ b/airflow/models/dagbag.py
@@ -79,6 +79,10 @@ class DagBag(LoggingMixin):
:param read_dags_from_db: Read DAGs from DB if ``True`` is passed.
If ``False`` DAGs are read from python files.
:type read_dags_from_db: bool
+ :param load_op_links: Should the extra operator link be loaded via plugins when
+ de-serializing the DAG? This flag is set to False in Scheduler so that Extra Operator links
+ are not loaded to not run User code in Scheduler.
+ :type load_op_links: bool
"""
DAGBAG_IMPORT_TIMEOUT = conf.getfloat('core', 'DAGBAG_IMPORT_TIMEOUT')
@@ -92,6 +96,7 @@ def __init__(
safe_mode: bool = conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE'),
read_dags_from_db: bool = False,
store_serialized_dags: Optional[bool] = None,
+ load_op_links: bool = True,
):
# Avoid circular import
from airflow.models.dag import DAG
@@ -128,6 +133,9 @@ def __init__(
include_smart_sensor=include_smart_sensor,
safe_mode=safe_mode,
)
+ # Should the extra operator link be loaded via plugins?
+ # This flag is set to False in Scheduler so that Extra Operator links are not loaded
+ self.load_op_links = load_op_links
def size(self) -> int:
""":return: the amount of dags contained in this dagbag"""
@@ -226,6 +234,7 @@ def _add_dag_from_db(self, dag_id: str, session: Session):
if not row:
raise SerializedDagNotFound(f"DAG '{dag_id}' not found in serialized_dag table")
+ row.load_op_links = self.load_op_links
dag = row.dag
for subdag in dag.subdags:
self.dags[subdag.dag_id] = subdag
diff --git a/airflow/models/serialized_dag.py b/airflow/models/serialized_dag.py
--- a/airflow/models/serialized_dag.py
+++ b/airflow/models/serialized_dag.py
@@ -86,6 +86,8 @@ class SerializedDagModel(Base):
backref=backref('serialized_dag', uselist=False, innerjoin=True),
)
+ load_op_links = True
+
def __init__(self, dag: DAG):
self.dag_id = dag.dag_id
self.fileloc = dag.full_filepath
@@ -163,6 +165,8 @@ def read_all_dags(cls, session: Session = None) -> Dict[str, 'SerializedDAG']:
@property
def dag(self):
"""The DAG deserialized from the ``data`` column"""
+ SerializedDAG._load_operator_extra_links = self.load_op_links # pylint: disable=protected-access
+
if isinstance(self.data, dict):
dag = SerializedDAG.from_dict(self.data) # type: Any
else:
diff --git a/airflow/serialization/serialized_objects.py b/airflow/serialization/serialized_objects.py
--- a/airflow/serialization/serialized_objects.py
+++ b/airflow/serialization/serialized_objects.py
@@ -100,6 +100,11 @@ class BaseSerialization:
_json_schema: Optional[Validator] = None
+ # Should the extra operator link be loaded via plugins when
+ # de-serializing the DAG? This flag is set to False in Scheduler so that Extra Operator links
+ # are not loaded to not run User code in Scheduler.
+ _load_operator_extra_links = True
+
_CONSTRUCTOR_PARAMS: Dict[str, Parameter] = {}
SERIALIZER_VERSION = 1
@@ -407,35 +412,38 @@ def serialize_operator(cls, op: BaseOperator) -> Dict[str, Any]:
@classmethod
def deserialize_operator(cls, encoded_op: Dict[str, Any]) -> BaseOperator:
"""Deserializes an operator from a JSON object."""
- from airflow import plugins_manager
-
- plugins_manager.initialize_extra_operators_links_plugins()
-
- if plugins_manager.operator_extra_links is None:
- raise AirflowException("Can not load plugins")
op = SerializedBaseOperator(task_id=encoded_op['task_id'])
- # Extra Operator Links defined in Plugins
- op_extra_links_from_plugin = {}
-
if "label" not in encoded_op:
# Handle deserialization of old data before the introduction of TaskGroup
encoded_op["label"] = encoded_op["task_id"]
- for ope in plugins_manager.operator_extra_links:
- for operator in ope.operators:
- if (
- operator.__name__ == encoded_op["_task_type"]
- and operator.__module__ == encoded_op["_task_module"]
- ):
- op_extra_links_from_plugin.update({ope.name: ope})
-
- # If OperatorLinks are defined in Plugins but not in the Operator that is being Serialized
- # set the Operator links attribute
- # The case for "If OperatorLinks are defined in the operator that is being Serialized"
- # is handled in the deserialization loop where it matches k == "_operator_extra_links"
- if op_extra_links_from_plugin and "_operator_extra_links" not in encoded_op:
- setattr(op, "operator_extra_links", list(op_extra_links_from_plugin.values()))
+ # Extra Operator Links defined in Plugins
+ op_extra_links_from_plugin = {}
+
+ # We don't want to load Extra Operator links in Scheduler
+ if cls._load_operator_extra_links: # pylint: disable=too-many-nested-blocks
+ from airflow import plugins_manager
+
+ plugins_manager.initialize_extra_operators_links_plugins()
+
+ if plugins_manager.operator_extra_links is None:
+ raise AirflowException("Can not load plugins")
+
+ for ope in plugins_manager.operator_extra_links:
+ for operator in ope.operators:
+ if (
+ operator.__name__ == encoded_op["_task_type"]
+ and operator.__module__ == encoded_op["_task_module"]
+ ):
+ op_extra_links_from_plugin.update({ope.name: ope})
+
+ # If OperatorLinks are defined in Plugins but not in the Operator that is being Serialized
+ # set the Operator links attribute
+ # The case for "If OperatorLinks are defined in the operator that is being Serialized"
+ # is handled in the deserialization loop where it matches k == "_operator_extra_links"
+ if op_extra_links_from_plugin and "_operator_extra_links" not in encoded_op:
+ setattr(op, "operator_extra_links", list(op_extra_links_from_plugin.values()))
for k, v in encoded_op.items():
@@ -450,10 +458,13 @@ def deserialize_operator(cls, encoded_op: Dict[str, Any]) -> BaseOperator:
elif k.endswith("_date"):
v = cls._deserialize_datetime(v)
elif k == "_operator_extra_links":
- op_predefined_extra_links = cls._deserialize_operator_extra_links(v)
+ if cls._load_operator_extra_links:
+ op_predefined_extra_links = cls._deserialize_operator_extra_links(v)
- # If OperatorLinks with the same name exists, Links via Plugin have higher precedence
- op_predefined_extra_links.update(op_extra_links_from_plugin)
+ # If OperatorLinks with the same name exists, Links via Plugin have higher precedence
+ op_predefined_extra_links.update(op_extra_links_from_plugin)
+ else:
+ op_predefined_extra_links = {}
v = list(op_predefined_extra_links.values())
k = "operator_extra_links"
@@ -655,6 +666,9 @@ def deserialize_dag(cls, encoded_dag: Dict[str, Any]) -> 'SerializedDAG':
if k == "_downstream_task_ids":
v = set(v)
elif k == "tasks":
+ # pylint: disable=protected-access
+ SerializedBaseOperator._load_operator_extra_links = cls._load_operator_extra_links
+ # pylint: enable=protected-access
v = {task["task_id"]: SerializedBaseOperator.deserialize_operator(task) for task in v}
k = "task_dict"
elif k == "timezone":
| Unable to start scheduler after stopped
**Apache Airflow version**: 2.0.0rc3
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`):
**Environment**:
- **Cloud provider or hardware configuration**: Linux
- **OS** (e.g. from /etc/os-release): Ubuntu
- **Kernel** (e.g. `uname -a`):
- **Install tools**:
- **Others**:
**What happened**:
After shutting down the scheduler, while tasks were in running state, trying to restart the scheduler results in pk violations..
```[2020-12-15 22:43:29,673] {scheduler_job.py:1293} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "dag_run_dag_id_run_id_key"
DETAIL: Key (dag_id, run_id)=(example_task_group, scheduled__2020-12-14T04:31:00+00:00) already exists.
```
**What you expected to happen**:
Scheduler restarts and picks up where it left off.
**How to reproduce it**:
Set example dag ( I used task_group) to schedule_interval `* * * * *` and start the scheduler and let it run for a few minutes.
Shut down the scheduler
Attempt to restart the scheduler
**Anything else we need to know**:
I came across this doing testing using the LocalExecutor in a virtual env. If no else is able to reproduce it, I'll try again in a clean virtual env.
| Looks serious. If we can confirm that one, I am afraid it might lead to RC4 @kaxil @ashb if this is easily triggerable.
Thanks for reporting @JCoder01 !
I've certainly never seen this, and I've done heave testing of killing and restarting schedulers.
@JCoder01 Few more questions. Maybe you can provide as much information about the deployment you have.
1) which version of Postgres are you using?
2) what kind of deployment you have - is the database managed/on premise/remote
3) do you use any Database HA? Active/Passive?
4) was it a single-scheduler or multiple scheduler case?
5) Did you use our image or another container, or installed airflow manually in host/virtualenv?
6) Is there a possibility you still had for example another (old?) version of airflow still accessing the database at the same time?
7) Did you migrate from airflow 1.10 or was it a fresh installation
Generally - as much information as possible about your case would be tremendously useful to investigate it!
And - is it possible to get a database dump in the "failing" statej? That would be super useful if you could send us a dump of the database so that we can import it and investigate further!
1.) looks like PostgreSQL 13.1
2.) on prem, in docker, used only for personal testing
3.) No,
4.) single-scheduler
5.) manual install in virutual env
6.) as far as I know, no, there were no other versions accessing the database.
7.) Fresh install
I have created another virtualenv/database and am seeing trying to recreate. I don't know about dumping the whole database, I don't want to accidentally share sensitive info.
Do you happen to have the full stack trace (there might be more above or below that one you shared) so I can see where in the scheduler it is happening?
Here's the whole thing.
```
ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "dag_run_dag_id_run_id_key"
DETAIL: Key (dag_id, run_id)=(example_task_group, scheduled__2020-12-14T04:32:00+00:00) already exists.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1275, in _execute
self._run_scheduler_loop()
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1377, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1474, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1567, in _create_dag_runs
creating_job_id=self.id,
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/airflow/utils/session.py", line 62, in wrapper
return func(*args, **kwargs)
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/airflow/models/dag.py", line 1776, in create_dagrun
session.flush()
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2536, in flush
self._flush(objects)
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2678, in _flush
transaction.rollback(_capture_exception=True)
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
with_traceback=exc_tb,
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2638, in _flush
flush_context.execute()
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 1136, in _emit_insert_statements
statement, params
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1011, in execute
return meth(self, multiparams, params)
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1130, in _execute_clauseelement
distilled_params,
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
e, statement, parameters, cursor, context
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.IntegrityError: (psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint "dag_run_dag_id_run_id_key"
DETAIL: Key (dag_id, run_id)=(example_task_group, scheduled__2020-12-14T04:32:00+00:00) already exists.
[SQL: INSERT INTO dag_run (dag_id, execution_date, start_date, end_date, state, run_id, creating_job_id, external_trigger, run_type, conf, last_scheduling_decision, dag_hash) VALUES (%(dag_id)s, %(execution_date)s, %(start_date)s, %(end_date)s, %(state)s, %(run_id)s, %(creat
ing_job_id)s, %(external_trigger)s, %(run_type)s, %(conf)s, %(last_scheduling_decision)s, %(dag_hash)s) RETURNING dag_run.id]
[parameters: {'dag_id': 'example_task_group', 'execution_date': datetime.datetime(2020, 12, 14, 4, 32, tzinfo=Timezone('UTC')), 'start_date': datetime.datetime(2020, 12, 16, 11, 5, 22, 127201, tzinfo=Timezone('UTC')), 'end_date': None, 'state': 'running', 'run_id': 'schedule
d__2020-12-14T04:32:00+00:00', 'creating_job_id': 701, 'external_trigger': False, 'run_type': <DagRunType.SCHEDULED: 'scheduled'>, 'conf': <psycopg2.extensions.Binary object at 0x7fc17b8ee4e0>, 'last_scheduling_decision': None, 'dag_hash': '1628bc132c765d9b68a06841eedfbc7d'}
]
(Background on this error at: http://sqlalche.me/e/13/gkpj)
```
Okay right - as expected, nothing really interesting there -- it's `self._create_dag_runs(query.all(), session)` which is failing, which is just creating the dag runs that the previous query gave it https://github.com/apache/airflow/blob/69801f5ef016bcf21af348f4fdeb67f09db8f394/airflow/models/dag.py#L2220-L2242
I can see in the database is this:
for dag_run:
|id|dag_id|execution_date|state|
|--|------|--------------|-----|
|353|example_task_group|2020-12-13 23:32:00|running|
and in dag, it looks like the scheduler didn't pick up that run?
|dag_id|next_dagrun|next_dagrun_create_after|
|------|-----------|------------------------|
|example_task_group|2020-12-13 23:32:00|2020-12-13 23:33:00|
if you do a `airflow db init` on the database after it gets to this state, it fixes it, and updates the dag table to the correct `next_dagrun` so it almost seems like in the unlikely event that it gets into this state, the scheduler needs to do a round of updates on start up before starting to schedule tasks.
@ashb as requested:
```
[2020-12-15 22:43:15,690] {logging_mixin.py:103} INFO - [2020-12-15 22:43:15,690] {dag.py:2266} INFO - Setting next_dagrun for example_task_group to 2020-12-14T04:21:00+00:00
[2020-12-15 22:43:16,501] {logging_mixin.py:103} INFO - [2020-12-15 22:43:16,501] {dag.py:2266} INFO - Setting next_dagrun for example_task_group to 2020-12-14T04:21:00+00:00
[2020-12-15 22:43:16,606] {logging_mixin.py:103} INFO - [2020-12-15 22:43:16,606] {dag.py:2266} INFO - Setting next_dagrun for example_task_group to 2020-12-14T04:21:00+00:00
```
@JCoder01 COuld we have some more logs surrounding those times -- it would help us tell which places this is getting updated.
of course, sorry about that. I
```
[2020-12-15 22:43:15,642] {scheduler_job.py:181} INFO - Started process (PID=46978) to work on /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:15,644] {scheduler_job.py:629} INFO - Processing file /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py for tasks to queue
[2020-12-15 22:43:15,645] {logging_mixin.py:103} INFO - [2020-12-15 22:43:15,644] {dagbag.py:440} INFO - Filling up the DagBag from /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:15,656] {scheduler_job.py:639} INFO - DAG(s) dict_keys(['example_task_group']) retrieved from /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:15,658] {logging_mixin.py:103} INFO - [2020-12-15 22:43:15,658] {dag.py:1813} INFO - Sync 1 DAGs
[2020-12-15 22:43:15,690] {logging_mixin.py:103} INFO - [2020-12-15 22:43:15,690] {dag.py:2266} INFO - Setting next_dagrun for example_task_group to 2020-12-14T04:21:00+00:00
[2020-12-15 22:43:15,708] {scheduler_job.py:189} INFO - Processing /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py took 0.070 seconds
[2020-12-15 22:43:16,449] {scheduler_job.py:181} INFO - Started process (PID=47049) to work on /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:16,450] {scheduler_job.py:629} INFO - Processing file /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py for tasks to queue
[2020-12-15 22:43:16,451] {logging_mixin.py:103} INFO - [2020-12-15 22:43:16,451] {dagbag.py:440} INFO - Filling up the DagBag from /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:16,460] {scheduler_job.py:639} INFO - DAG(s) dict_keys(['example_task_group']) retrieved from /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:16,481] {logging_mixin.py:103} INFO - [2020-12-15 22:43:16,481] {dag.py:1813} INFO - Sync 1 DAGs
[2020-12-15 22:43:16,501] {logging_mixin.py:103} INFO - [2020-12-15 22:43:16,501] {dag.py:2266} INFO - Setting next_dagrun for example_task_group to 2020-12-14T04:21:00+00:00
[2020-12-15 22:43:16,522] {scheduler_job.py:189} INFO - Processing /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py took 0.078 seconds
[2020-12-15 22:43:16,524] {scheduler_job.py:181} INFO - Started process (PID=47075) to work on /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:16,526] {scheduler_job.py:629} INFO - Processing file /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py for tasks to queue
[2020-12-15 22:43:16,527] {logging_mixin.py:103} INFO - [2020-12-15 22:43:16,527] {dagbag.py:440} INFO - Filling up the DagBag from /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:16,538] {scheduler_job.py:639} INFO - DAG(s) dict_keys(['example_task_group']) retrieved from /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:16,580] {logging_mixin.py:103} INFO - [2020-12-15 22:43:16,580] {dag.py:1813} INFO - Sync 1 DAGs
[2020-12-15 22:43:16,606] {logging_mixin.py:103} INFO - [2020-12-15 22:43:16,606] {dag.py:2266} INFO - Setting next_dagrun for example_task_group to 2020-12-14T04:21:00+00:00
[2020-12-15 22:43:16,628] {scheduler_job.py:189} INFO - Processing /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py took 0.108 seconds
[2020-12-15 22:43:18,346] {scheduler_job.py:181} INFO - Started process (PID=47308) to work on /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:18,348] {scheduler_job.py:629} INFO - Processing file /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py for tasks to queue
[2020-12-15 22:43:18,348] {logging_mixin.py:103} INFO - [2020-12-15 22:43:18,348] {dagbag.py:440} INFO - Filling up the DagBag from /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:18,363] {scheduler_job.py:639} INFO - DAG(s) dict_keys(['example_task_group']) retrieved from /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py
[2020-12-15 22:43:18,365] {logging_mixin.py:103} INFO - [2020-12-15 22:43:18,365] {dag.py:1813} INFO - Sync 1 DAGs
[2020-12-15 22:43:18,402] {logging_mixin.py:103} INFO - [2020-12-15 22:43:18,401] {dag.py:2266} INFO - Setting next_dagrun for example_task_group to 2020-12-14T04:30:00+00:00
[2020-12-15 22:43:18,427] {scheduler_job.py:189} INFO - Processing /home/jcoder/git/airflow_2.0/macs-dags/macs-dags/dags/example_task_group.py took 0.085 seconds```
And last question for now (you may not have this one anymore): Did you get any of the "Setting next_dagrun for example_task_group" messages in the main scheduler output?
I spent a bit more time on this today, and while I can occasionally re-create it in the original environment, I can't recreate it in a fresh install.
What's the diference between the environments? Just airflow installation, database? virtualenv? anything else ?
Different virtual envs and databases. both virtual envs and databases are on the same host. The only difference is what dags and provider packages are in each virtual env. I do have a dag where I was testing out overriding the `following_schedule` DAG method with a custom function to allow running at specific times. I have that dag in both envs thinking it was the culprit but it even with the dag disabled I see the error in the original environment, for a different dag, but not in the second virtual env.
Is it possible that you have different dags with the same DAG_ID (educated guesses here) ?
I think it is most likely because you had overriden `following_schedule`
@potiuk I generally believe anything is possible, but in this case I think it's rather unlikely. If that was the case, do you think it would matter if the dag was enabled or not? I only have two dags enabled.
@kaxil If it is this, do you have any ideas where to look to try and confirm? And if it is this, if the dag is in both environments, wouldn't this happen in both environments? Note that the dag with the custom `following_schedule` is not the one throwing the pk violation.
Marking this as can't reproduce for now -- it may be a race condition, but... 🤷🏻
I'll keep trying to narrow it down. It seems an `airflow db init` does not reliably fix it.
It does seem to point to a race condition, based on after the error occurs, repeatedly trying to restart the scheduler will sometimes sort things out.
The only reliably way to restart the scheduler after the error occurs seems to be to manually update 'next_dag_run_create_after` to a date time in the future.
We are using Airflow 1.10.9. We are calling the dag REST endpoint to trigger. Intermittently we see the call fails. In the postgres logs we see
`
ERROR: duplicate key value violates unique constraint "dag_run_dag_id_run_id_key"
DETAIL: Key (dag_id, run_id)=(ext_trigger_create_cbid_profile_for_duns, manual__2020-12-17T22:39:06.726412+00:00) already exists.
STATEMENT: INSERT INTO dag_run (dag_id, execution_date, start_date, end_date, state, run_id, external_trigger, conf) VALUES ('ext_trigger_create_cbid_profile_for_duns', '2020-12-17T22:39:06.726412+00:00'::timestamptz, '2020-12-17T22:39:06.962088+00:00'::timestamptz, NULL, 'running', 'manual__2020-12-17T22:39:06.726412+00:00', true, ....
`
Since it is related to error reporting it here.
I'm still trying to work through this and am slowing moving dags over from the original environment where the error occured to the clean environment. While I can't seem seem to get the database into the troubled state on it's own, I can force it into that state by stopping the scheduler, and running the below. Then when you start the scheduler, you get the pk violation.
```
update dag set next_dagrun = (select max(execution_date) from dag_run where dag_id = 'example_task_group')
where dag_id = 'example_task_group2'
```
I think what is happening is if you have a very small number of dags, in the time it takes for the scheduler to throw the error, one of the parser processes updates the backend db with the correct `next_dagrun` and starting the scheduler again works fine. As the number of dags grow, the chances that the problematic dag gets updated before the scheduler shuts down due the pk violation decreases, so the error persists until the you are lucky enough to get the problematic dag parsed.
So while it's not clear how the database can get _into_ this state, would it make sense to add some "self healing" in the scheduler start up to reparse all the dags on startup? Or maybe rather than bailing there is some error handling in the scheduler that if a pk violation does arise, the dag is reparsed and tried to be scheduled again?
Yeah, I think at the very least we should catch the unique constraint violation and ignore it/carry on.
That said, I'd like to work out _how_ this is possible, as I thought every update to the DAG model was behind a row-level lock, so this sort of failure wasn't meant to be possible.
> We are using Airflow 1.10.9. We are calling the dag REST endpoint to trigger. Intermittently we see the call fails. In the postgres logs we see
> `ERROR: duplicate key value violates unique constraint "dag_run_dag_id_run_id_key" DETAIL: Key (dag_id, run_id)=(ext_trigger_create_cbid_profile_for_duns, manual__2020-12-17T22:39:06.726412+00:00) already exists. STATEMENT: INSERT INTO dag_run (dag_id, execution_date, start_date, end_date, state, run_id, external_trigger, conf) VALUES ('ext_trigger_create_cbid_profile_for_duns', '2020-12-17T22:39:06.726412+00:00'::timestamptz, '2020-12-17T22:39:06.962088+00:00'::timestamptz, NULL, 'running', 'manual__2020-12-17T22:39:06.726412+00:00', true, ....`
>
> Since it is related to error reporting it here.
@satishcb This is something else -- That error is saying that the pair `(ext_trigger_create_cbid_profile_for_duns, manual__2020-12-17T22:39:06.726412+00:00)` already exists in the DB. Since that is from the API that likely means you have already triggered that exact pair before.
@ashb I think I narrowed this down to being an issue with one of my plugins (a FAB view) that relies on an Airflow Variable. I think this is the same one I pinged you about on Slack a week or two ago and we thought it was likely due to me running the scheduler a debugger, but maybe that wasn't (entirely) the problem. I found that if I commented out the lines that did the call to `Variables` the pk violations stopped happening. If I put them back in, stopped the scheduler long enough for a dag to need to be triggered right away (with one scheduled to run every minute, that was one minute) and then started the scheduler again, it would trigger the pk violation.
In trying to recreate a minimal example, I added the following to a file in plugins/
```
from airflow.models.variable import Variable
v = Variable().get("VAR")
```
and upon starting the it triggered this Error:
`RuntimeError: UNEXPECTED COMMIT - THIS WILL BREAK HA LOCKS!`
which was the same error I pinged you about.
This made be go back and look at the code in the plugin and I had:
```
try:
models = Variables().get("MODELS")
except:
models = []
```
If I took out the generic except, then rather than kicking out a pk violation, it raised the UNEXPECTED COMMIT error. So the pk violation was happening because of the overly broad except in the plugin code.
So this brings up the question about how does one use Variables in Plugins without breaking the HA code?
Oh interesting. Just by putting that in a plugin things break?
It should be possible to do it - when it's this plugin code being run?
In this simple example, it gets called when the scheduler is initializing and loads plugins.
In my actual plug-in, when the view get initialized like this
v_appbuilder_view = TestAppBuilderBaseView()
The call to variables is made.
But both happen when the scheduler loads plugins.
log and stacktrace
```
(pyenv) jcoder@bos-rndab01 ~/airflow_scheduler_test $ airflow scheduler
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
[2020-12-18 16:41:19,262] {scheduler_job.py:1241} INFO - Starting the scheduler
[2020-12-18 16:41:19,262] {scheduler_job.py:1246} INFO - Processing each file at most -1 times
[2020-12-18 16:41:19,323] {dag_processing.py:250} INFO - Launched DagFileProcessorManager with pid: 79
25
[2020-12-18 16:41:19,324] {scheduler_job.py:1751} INFO - Resetting orphaned tasks for active dag runs
[2020-12-18 16:41:19,331] {settings.py:52} INFO - Configured default timezone Timezone('UTC')
[2020-12-18 16:41:19,349] {scheduler_job.py:1805} INFO - Reset the following 2 orphaned TaskInstances:
<TaskInstance: example_task_group2.section_2.inner_section_2.task_2 2020-12-17 21:47:00+00:00
[scheduled]>
<TaskInstance: example_task_group2.section_2.inner_section_2.task_2 2020-12-17 21:48:00+00:00
[scheduled]>
[2020-12-18 16:41:19,451] {plugins_manager.py:231} ERROR - UNEXPECTED COMMIT - THIS WILL BREAK HA LOCKS!
Traceback (most recent call last):
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/airflow/plugins_manager.
py", line 222, in load_plugins_from_plugin_directory
loader.exec_module(mod)
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/jcoder/airflow_scheduler_test/plugins/Plugin.py", line 3, in <module>
v = Variable().get("VAR")
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/airflow/models/variable.
py", line 123, in get
var_val = Variable.get_variable_from_secrets(key=key)
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/airflow/models/variable.
py", line 181, in get_variable_from_secrets
var_val = secrets_backend.get_variable(key=key)
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/airflow/utils/session.py
", line 65, in wrapper
return func(*args, session=session, **kwargs)
File "/opt/anaconda/2020/envs/py_37_pands0.25/lib/python3.7/contextlib.py", line 119, in __exit__
next(self.gen)
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/airflow/utils/session.py
", line 32, in create_session
session.commit()
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/session.p
y", line 1042, in commit
self.transaction.commit()
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/session.p
y", line 504, in commit
self._prepare_impl()
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/sqlalchemy/orm/session.p
y", line 472, in _prepare_impl
self.session.dispatch.before_commit(self.session)
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/sqlalchemy/event/attr.py
", line 322, in __call__
fn(*args, **kw)
File "/home/jcoder/airflow_scheduler_test/pyenv/lib/python3.7/site-packages/airflow/utils/sqlalchemy
.py", line 217, in _validate_commit
raise RuntimeError("UNEXPECTED COMMIT - THIS WILL BREAK HA LOCKS!")
RuntimeError: UNEXPECTED COMMIT - THIS WILL BREAK HA LOCKS!
[2020-12-18 16:41:19,460] {plugins_manager.py:232} ERROR - Failed to import plugin /home/jcoder/airflo
w_scheduler_test/plugins/Plugin.py
```
@JCoder01 where did we get with this issue? Was it a problem with your plugin or is this issue still outstanding?
Happy New Year @ashb
There is a little bit of both. There is an outstanding issue where use of Variables (or probably anything that interacts with the backed db in the global scope) in a plugin raises the `UNEXPECTED COMMIT` RunTime error.
The problem I reported with being unable to restart the scheduler seems to have been caused by wrapping my `Variable.get` in an overly generous try/except, hiding the run time error and causing things in the db to get out of sync.
Running the below should recreate the issue, I did this on python 3.7
```
mkdir airflow_scheduler_test
cd airflow_scheduler_test
export AIRFLOW_HOME=$(pwd)
virtualenv pyenv
source pyenv/bin/activate
pip install apache-airflow==2.0.0
airflow db init
airflow variables set test TEST
mkdir plugins
cat << EOF > plugins/test.py
from airflow.models.variable import Variable
print(Variable.get('test'))
EOF
airflow dags unpause example_bash_operator
airflow scheduler
```
I was able to reproduce this.
> cd airflow_scheduler_test
> export AIRFLOW_HOME=$(pwd)
> virtualenv pyenv
> source pyenv/bin/activate
> pip install apache-airflow==2.0.0
>
> airflow db init
> airflow variables set test TEST
> mkdir plugins
> cat << EOF > plugins/test.py
> from airflow.models.variable import Variable
>
> print(Variable.get('test'))
> EOF
>
> airflow dags unpause example_bash_operator
> airflow scheduler
Stacktrace:
<details><summary>CLICK ME</summary>
<p>
```
❯ airflow scheduler
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
[2021-01-26 13:09:59,150] {scheduler_job.py:1242} INFO - Starting the scheduler
[2021-01-26 13:09:59,151] {scheduler_job.py:1247} INFO - Processing each file at most -1 times
[2021-01-26 13:09:59,154] {dag_processing.py:250} INFO - Launched DagFileProcessorManager with pid: 5278
[2021-01-26 13:09:59,156] {scheduler_job.py:1772} INFO - Resetting orphaned tasks for active dag runs
[2021-01-26 13:09:59,161] {settings.py:52} INFO - Configured default timezone Timezone('UTC')
[2021-01-26 13:09:59,194] {plugins_manager.py:231} ERROR - UNEXPECTED COMMIT - THIS WILL BREAK HA LOCKS!
Traceback (most recent call last):
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/plugins_manager.py", line 222, in load_plugins_from_plugin_directory
loader.exec_module(mod)
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/Users/kaxilnaik/airflow/plugins/test.py", line 3, in <module>
print(Variable.get('test'))
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/models/variable.py", line 127, in get
var_val = Variable.get_variable_from_secrets(key=key)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/models/variable.py", line 193, in get_variable_from_secrets
var_val = secrets_backend.get_variable(key=key)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/utils/session.py", line 65, in wrapper
return func(*args, session=session, **kwargs)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/contextlib.py", line 119, in __exit__
next(self.gen)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/utils/session.py", line 32, in create_session
session.commit()
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 1042, in commit
self.transaction.commit()
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 504, in commit
self._prepare_impl()
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 472, in _prepare_impl
self.session.dispatch.before_commit(self.session)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/event/attr.py", line 322, in __call__
fn(*args, **kw)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/utils/sqlalchemy.py", line 217, in _validate_commit
raise RuntimeError("UNEXPECTED COMMIT - THIS WILL BREAK HA LOCKS!")
RuntimeError: UNEXPECTED COMMIT - THIS WILL BREAK HA LOCKS!
[2021-01-26 13:09:59,201] {plugins_manager.py:232} ERROR - Failed to import plugin /Users/kaxilnaik/airflow/plugins/test.py
[2021-01-26 13:09:59,386] {scheduler_job.py:1623} INFO - DAG scenario1_case2_02 is at (or above) max_active_runs (1 of 1), not creating any more runs
[2021-01-26 13:09:59,387] {scheduler_job.py:1623} INFO - DAG scenario1_case2_01 is at (or above) max_active_runs (1 of 1), not creating any more runs
[2021-01-26 13:09:59,435] {scheduler_job.py:936} INFO - 3 tasks up for execution:
<TaskInstance: scenario1_case2_02.tasks__1_of_10 2021-01-25 00:00:00+00:00 [scheduled]>
<TaskInstance: scenario1_case2_01.tasks__1_of_10 2021-01-25 00:00:00+00:00 [scheduled]>
<TaskInstance: scenario4_case2_1_40.tasks__1_of_10 2021-01-25 00:00:00+00:00 [scheduled]>
[2021-01-26 13:09:59,436] {scheduler_job.py:970} INFO - Figuring out tasks to run in Pool(name=default_pool) with 128 open slots and 3 task instances ready to be queued
[2021-01-26 13:09:59,436] {scheduler_job.py:997} INFO - DAG scenario1_case2_02 has 0/1000000 running and queued tasks
[2021-01-26 13:09:59,436] {scheduler_job.py:997} INFO - DAG scenario1_case2_01 has 0/1000000 running and queued tasks
[2021-01-26 13:09:59,436] {scheduler_job.py:997} INFO - DAG scenario4_case2_1_40 has 0/16 running and queued tasks
[2021-01-26 13:09:59,437] {scheduler_job.py:1058} INFO - Setting the following tasks to queued state:
<TaskInstance: scenario1_case2_02.tasks__1_of_10 2021-01-25 00:00:00+00:00 [scheduled]>
<TaskInstance: scenario1_case2_01.tasks__1_of_10 2021-01-25 00:00:00+00:00 [scheduled]>
<TaskInstance: scenario4_case2_1_40.tasks__1_of_10 2021-01-25 00:00:00+00:00 [scheduled]>
[2021-01-26 13:09:59,438] {scheduler_job.py:1100} INFO - Sending TaskInstanceKey(dag_id='scenario1_case2_02', task_id='tasks__1_of_10', execution_date=datetime.datetime(2021, 1, 25, 0, 0, tzinfo=Timezone('UTC')), try_number=1) to executor with priority 10 and queue default
[2021-01-26 13:09:59,439] {base_executor.py:82} INFO - Adding to queue: ['airflow', 'tasks', 'run', 'scenario1_case2_02', 'tasks__1_of_10', '2021-01-25T00:00:00+00:00', '--local', '--pool', 'default_pool', '--subdir', '/Users/kaxilnaik/airflow/dags/scenario1_case2_02.py']
[2021-01-26 13:09:59,439] {scheduler_job.py:1100} INFO - Sending TaskInstanceKey(dag_id='scenario1_case2_01', task_id='tasks__1_of_10', execution_date=datetime.datetime(2021, 1, 25, 0, 0, tzinfo=Timezone('UTC')), try_number=1) to executor with priority 10 and queue default
[2021-01-26 13:09:59,439] {base_executor.py:82} INFO - Adding to queue: ['airflow', 'tasks', 'run', 'scenario1_case2_01', 'tasks__1_of_10', '2021-01-25T00:00:00+00:00', '--local', '--pool', 'default_pool', '--subdir', '/Users/kaxilnaik/airflow/dags/scenario1_case2_01.py']
[2021-01-26 13:09:59,439] {scheduler_job.py:1100} INFO - Sending TaskInstanceKey(dag_id='scenario4_case2_1_40', task_id='tasks__1_of_10', execution_date=datetime.datetime(2021, 1, 25, 0, 0, tzinfo=Timezone('UTC')), try_number=1) to executor with priority 10 and queue default
[2021-01-26 13:09:59,439] {base_executor.py:82} INFO - Adding to queue: ['airflow', 'tasks', 'run', 'scenario4_case2_1_40', 'tasks__1_of_10', '2021-01-25T00:00:00+00:00', '--local', '--pool', 'default_pool', '--subdir', '/Users/kaxilnaik/airflow/dags/scenario4_case2_1.py']
[2021-01-26 13:09:59,440] {sequential_executor.py:59} INFO - Executing command: ['airflow', 'tasks', 'run', 'scenario1_case2_02', 'tasks__1_of_10', '2021-01-25T00:00:00+00:00', '--local', '--pool', 'default_pool', '--subdir', '/Users/kaxilnaik/airflow/dags/scenario1_case2_02.py']
[2021-01-26 13:10:00,926] {dagbag.py:440} INFO - Filling up the DagBag from /Users/kaxilnaik/airflow/dags/scenario1_case2_02.py
TEST
Running <TaskInstance: scenario1_case2_02.tasks__1_of_10 2021-01-25T00:00:00+00:00 [queued]> on host 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa
[2021-01-26 13:10:01,570] {sequential_executor.py:59} INFO - Executing command: ['airflow', 'tasks', 'run', 'scenario1_case2_01', 'tasks__1_of_10', '2021-01-25T00:00:00+00:00', '--local', '--pool', 'default_pool', '--subdir', '/Users/kaxilnaik/airflow/dags/scenario1_case2_01.py']
[2021-01-26 13:10:02,992] {dagbag.py:440} INFO - Filling up the DagBag from /Users/kaxilnaik/airflow/dags/scenario1_case2_01.py
TEST
Running <TaskInstance: scenario1_case2_01.tasks__1_of_10 2021-01-25T00:00:00+00:00 [queued]> on host 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa
[2021-01-26 13:10:03,628] {sequential_executor.py:59} INFO - Executing command: ['airflow', 'tasks', 'run', 'scenario4_case2_1_40', 'tasks__1_of_10', '2021-01-25T00:00:00+00:00', '--local', '--pool', 'default_pool', '--subdir', '/Users/kaxilnaik/airflow/dags/scenario4_case2_1.py']
[2021-01-26 13:10:05,054] {dagbag.py:440} INFO - Filling up the DagBag from /Users/kaxilnaik/airflow/dags/scenario4_case2_1.py
TEST
Running <TaskInstance: scenario4_case2_1_40.tasks__1_of_10 2021-01-25T00:00:00+00:00 [queued]> on host 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa
[2021-01-26 13:10:05,750] {scheduler_job.py:1201} INFO - Executor reports execution of scenario1_case2_02.tasks__1_of_10 execution_date=2021-01-25 00:00:00+00:00 exited with status success for try_number 1
[2021-01-26 13:10:05,750] {scheduler_job.py:1201} INFO - Executor reports execution of scenario1_case2_01.tasks__1_of_10 execution_date=2021-01-25 00:00:00+00:00 exited with status success for try_number 1
[2021-01-26 13:10:05,750] {scheduler_job.py:1201} INFO - Executor reports execution of scenario4_case2_1_40.tasks__1_of_10 execution_date=2021-01-25 00:00:00+00:00 exited with status success for try_number 1
[2021-01-26 13:10:05,792] {scheduler_job.py:1293} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "dag_run_dag_id_run_id_key"
DETAIL: Key (dag_id, run_id)=(scenario1_case2_02, scheduled__2021-01-25T00:00:00+00:00) already exists.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1275, in _execute
self._run_scheduler_loop()
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1377, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1474, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1582, in _create_dag_runs
creating_job_id=self.id,
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/utils/session.py", line 62, in wrapper
return func(*args, **kwargs)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/airflow/models/dag.py", line 1781, in create_dagrun
session.flush()
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2523, in flush
self._flush(objects)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2664, in _flush
transaction.rollback(_capture_exception=True)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py", line 69, in __exit__
exc_value, with_traceback=exc_tb,
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2624, in _flush
flush_context.execute()
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
insert,
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 1136, in _emit_insert_statements
statement, params
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1011, in execute
return meth(self, multiparams, params)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1130, in _execute_clauseelement
distilled_params,
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
e, statement, parameters, cursor, context
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/Users/kaxilnaik/opt/anaconda3/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.IntegrityError: (psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint "dag_run_dag_id_run_id_key"
DETAIL: Key (dag_id, run_id)=(scenario1_case2_02, scheduled__2021-01-25T00:00:00+00:00) already exists.
[SQL: INSERT INTO dag_run (dag_id, execution_date, start_date, end_date, state, run_id, creating_job_id, external_trigger, run_type, conf, last_scheduling_decision, dag_hash) VALUES (%(dag_id)s, %(execution_date)s, %(start_date)s, %(end_date)s, %(state)s, %(run_id)s, %(creating_job_id)s, %(external_trigger)s, %(run_type)s, %(conf)s, %(last_scheduling_decision)s, %(dag_hash)s) RETURNING dag_run.id]
[parameters: {'dag_id': 'scenario1_case2_02', 'execution_date': datetime.datetime(2021, 1, 25, 0, 0, tzinfo=Timezone('UTC')), 'start_date': datetime.datetime(2021, 1, 26, 13, 10, 5, 790285, tzinfo=Timezone('UTC')), 'end_date': None, 'state': 'running', 'run_id': 'scheduled__2021-01-25T00:00:00+00:00', 'creating_job_id': 1, 'external_trigger': False, 'run_type': <DagRunType.SCHEDULED: 'scheduled'>, 'conf': <psycopg2.extensions.Binary object at 0x7f95c0b11db0>, 'last_scheduling_decision': None, 'dag_hash': '3bb8093d821e43ddfc6ab56cd9eccfd2'}]
(Background on this error at: http://sqlalche.me/e/13/gkpj)
[2021-01-26 13:10:06,814] {process_utils.py:100} INFO - Sending Signals.SIGTERM to GPID 5278
[2021-01-26 13:10:07,005] {process_utils.py:206} INFO - Waiting up to 5 seconds for processes to exit...
[2021-01-26 13:10:07,041] {process_utils.py:66} INFO - Process psutil.Process(pid=5388, status='terminated', started='13:10:05') (5388) terminated with exit code None
[2021-01-26 13:10:07,043] {process_utils.py:66} INFO - Process psutil.Process(pid=5385, status='terminated', started='13:10:05') (5385) terminated with exit code None
[2021-01-26 13:10:07,043] {process_utils.py:66} INFO - Process psutil.Process(pid=5278, status='terminated', exitcode=0, started='13:09:59') (5278) terminated with exit code 0
[2021-01-26 13:10:07,043] {scheduler_job.py:1296} INFO - Exited execute loop
```
</p>
</details>
This looks like the same issue as https://github.com/apache/airflow/issues/13685
#13920 should fix the issue with Duplicate Dag Run.
I still need to figure out the fix to allow retrieving Variable / Connections in the Plugin
Looks like this is where the Scheduler runs Plugin code:
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/jobs/scheduler_job.py#L1275
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/jobs/scheduler_job.py#L1377
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/jobs/scheduler_job.py#L1439
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/jobs/scheduler_job.py#L1474
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/jobs/scheduler_job.py#L1568
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/models/dagbag.py#L171
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/models/dagbag.py#L229
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/models/serialized_dag.py#L167
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/serialization/serialized_objects.py#L658
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/serialization/serialized_objects.py#L412
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/plugins_manager.py#L346
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/plugins_manager.py#L276
https://github.com/apache/airflow/blob/65e49fc56f32b3e815fdf4a17be6b4e1c1e43c11/airflow/plugins_manager.py#L222
| 2021-01-27T19:40:16Z | [] | [] |
Traceback (most recent call last):
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/home/jcoder/git/airflow_2.0/pyenv/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "dag_run_dag_id_run_id_key"
| 2,339 |
|||
apache/airflow | apache__airflow-14274 | 3709503ecf180bd8c85190bcc7e5e755b60d9bfb | diff --git a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
--- a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
+++ b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
@@ -43,16 +43,23 @@ def check(self, session=None):
min_req_sqlite_version = Version('3.15')
installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())
if installed_sqlite_version < min_req_sqlite_version:
- return "From Airflow 2.0, SQLite version below 3.15 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, SQLite version below {} is no longer supported. \n{}".format(
+ min_req_sqlite_version, more_info
+ )
elif "postgres" in conn_str:
min_req_postgres_version = Version('9.6')
installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())
if installed_postgres_version < min_req_postgres_version:
- return "From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, PostgreSQL version below {} is no longer supported. \n{}".format(
+ min_req_postgres_version, more_info
+ )
elif "mysql" in conn_str:
min_req_mysql_version = Version('5.7')
- installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
+ # special treatment is needed here, because MySQL version may include a suffix like '-log'
+ installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar().split('-')[0])
if installed_mysql_version < min_req_mysql_version:
- return "From Airflow 2.0, MySQL version below 5.7 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, MySQL version below {} is no longer supported. \n{}".format(
+ min_req_mysql_version, more_info
+ )
| upgrade_check fails db version check
**Apache Airflow version**: 1.10.14 with AWS RDS mysql 5.7.26 as metastore db
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): v1.16.15
**Environment**: DEV
- **Cloud provider or hardware configuration**: AWS
- **OS** (e.g. from /etc/os-release): Debian GNU/Linux 10 (buster)
- **Kernel** (e.g. `uname -a`): Linux airflow-scheduler-765f664c56-4bsfq 4.14.186-146.268.amzn2.x86_64 #1 SMP Tue Jul 14 18:16:52 UTC 2020 x86_64 GNU/Linux
- **Install tools**:
- **Others**: Running on K8S as docker container with apache/airflow:1.10.14 as base
**What happened**: Running `airflow upgrade_check` returns the following error:
```
airflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$ airflow upgrade_check
==================================================== STATUS ====================================================
Check for latest versions of apache-airflow and checker...............................................SUCCESS
Remove airflow.AirflowMacroPlugin class...............................................................SUCCESS
/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/helpers.py:442: DeprecationWarning: Importing 'DummyOperator' directly from 'airflow.operators' has been deprecated. Please import from 'airflow.operators.[operator_module]' instead. Support for direct imports will be dropped entirely in Airflow 2.0.
DeprecationWarning)
Ensure users are not using custom metaclasses in custom operators.....................................SUCCESS
Chain between DAG and operator not allowed............................................................SUCCESS
Connection.conn_type is not nullable..................................................................SUCCESS
Custom Executors now require full path................................................................SUCCESS
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 37, in <module>
args.func(args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 118, in run
all_problems = check_upgrade(formatter, rules)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 38, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py", line 56, in check
installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
File "/home/airflow/.local/lib/python3.6/site-packages/packaging/version.py", line 298, in __init__
raise InvalidVersion("Invalid version: '{0}'".format(version))
packaging.version.InvalidVersion: Invalid version: '5.7.26-log'
airflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$
```
**What you expected to happen**: commands runs through and prints helpful messages
<!-- What do you think went wrong? -->
Running `SELECT VERSION();').scalar()` against the metastore db returns "5.7.26-log' which is possibly not a valid value for Version class `__init__` function because of the "-log" ending?
```
mysql> select VERSION();
+------------+
| VERSION() |
+------------+
| 5.7.26-log |
+------------+
1 row in set (0.00 sec)
```
**How to reproduce it**: Run `airflow upgrade_check` again.
**Anything else we need to know**:
Dockerfile snippet:
```
FROM apache/airflow:1.10.14
...
USER ${AIRFLOW_UID}
RUN pip install --user \
airflow-kubernetes-job-operator \
apache-airflow-backport-providers-cncf-kubernetes \
apache-airflow-backport-providers-ssh \
apache-airflow-upgrade-check
```
How often does this problem occur? Once? Every time etc?: Every time since last week. Has worked before.
| Thanks for opening your first issue here! Be sure to follow the issue template!
| 2021-02-17T13:18:14Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 37, in <module>
args.func(args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 118, in run
all_problems = check_upgrade(formatter, rules)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 38, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py", line 56, in check
installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
File "/home/airflow/.local/lib/python3.6/site-packages/packaging/version.py", line 298, in __init__
raise InvalidVersion("Invalid version: '{0}'".format(version))
packaging.version.InvalidVersion: Invalid version: '5.7.26-log'
| 2,343 |
|||
apache/airflow | apache__airflow-14869 | 16f43605f3370f20611ba9e08b568ff8a7cd433d | diff --git a/airflow/providers/mysql/hooks/mysql.py b/airflow/providers/mysql/hooks/mysql.py
--- a/airflow/providers/mysql/hooks/mysql.py
+++ b/airflow/providers/mysql/hooks/mysql.py
@@ -18,11 +18,17 @@
"""This module allows to connect to a MySQL database."""
import json
-from typing import Dict, Optional, Tuple
+from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
from airflow.hooks.dbapi import DbApiHook
from airflow.models import Connection
+if TYPE_CHECKING:
+ from mysql.connector.abstracts import MySQLConnectionAbstract
+ from MySQLdb.connections import Connection as MySQLdbConnection
+
+MySQLConnectionTypes = Union['MySQLdbConnection', 'MySQLConnectionAbstract']
+
class MySqlHook(DbApiHook):
"""
@@ -50,20 +56,36 @@ def __init__(self, *args, **kwargs) -> None:
self.schema = kwargs.pop("schema", None)
self.connection = kwargs.pop("connection", None)
- def set_autocommit(self, conn: Connection, autocommit: bool) -> None: # noqa: D403
- """MySql connection sets autocommit in a different way."""
- conn.autocommit(autocommit)
+ def set_autocommit(self, conn: MySQLConnectionTypes, autocommit: bool) -> None:
+ """
+ The MySQLdb (mysqlclient) client uses an `autocommit` method rather
+ than an `autocommit` property to set the autocommit setting
+
+ :param conn: connection to set autocommit setting
+ :type MySQLConnectionTypes: connection object.
+ :param autocommit: autocommit setting
+ :type bool: True to enable autocommit, False to disable autocommit
+ :rtype: None
+ """
+ if hasattr(conn.__class__, 'autocommit') and isinstance(conn.__class__.autocommit, property):
+ conn.autocommit = autocommit
+ else:
+ conn.autocommit(autocommit)
- def get_autocommit(self, conn: Connection) -> bool: # noqa: D403
+ def get_autocommit(self, conn: MySQLConnectionTypes) -> bool:
"""
- MySql connection gets autocommit in a different way.
+ The MySQLdb (mysqlclient) client uses a `get_autocommit` method
+ rather than an `autocommit` property to get the autocommit setting
:param conn: connection to get autocommit setting from.
- :type conn: connection object.
+ :type MySQLConnectionTypes: connection object.
:return: connection autocommit setting
:rtype: bool
"""
- return conn.get_autocommit()
+ if hasattr(conn.__class__, 'autocommit') and isinstance(conn.__class__.autocommit, property):
+ return conn.autocommit
+ else:
+ return conn.get_autocommit()
def _get_conn_config_mysql_client(self, conn: Connection) -> Dict:
conn_config = {
@@ -122,7 +144,7 @@ def _get_conn_config_mysql_connector_python(self, conn: Connection) -> Dict:
return conn_config
- def get_conn(self):
+ def get_conn(self) -> MySQLConnectionTypes:
"""
Establishes a connection to a mysql database
by extracting the connection configuration from the Airflow connection.
| MySQL hook uses wrong autocommit calls for mysql-connector-python
**Apache Airflow version**: 2.0.1
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): n/a
**Environment**:
* **Cloud provider or hardware configuration**: WSL2/Docker running `apache/airflow:2.0.1-python3.7` image
* **OS** (e.g. from /etc/os-release): Host: Ubuntu 20.04 LTS, Docker Image: Debian GNU/Linux 10 (buster)
* **Kernel** (e.g. `uname -a`): 5.4.72-microsoft-standard-WSL2 x86_64
* **Others**: Docker version 19.03.8, build afacb8b7f0
**What happened**:
Received a `'bool' object is not callable` error when attempting to use the mysql-connector-python client for a task:
```
[2021-03-17 10:20:13,247] {{taskinstance.py:1455}} ERROR - 'bool' object is not callable
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1112, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1285, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1310, in _execute_task
result = task_copy.execute(context=context)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/mysql/operators/mysql.py", line 74, in execute
hook.run(self.sql, autocommit=self.autocommit, parameters=self.parameters)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/hooks/dbapi.py", line 175, in run
self.set_autocommit(conn, autocommit)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/mysql/hooks/mysql.py", line 55, in set_autocommit
conn.autocommit(autocommit)
```
**What you expected to happen**:
The task to run without complaints.
**How to reproduce it**:
Create and use a MySQL connection with `{"client": "mysql-connector-python"}` specified in the Extra field.
**Anything else we need to know**:
The MySQL hook seems to be using `conn.get_autocommit()` and `conn.autocommit()` to get/set the autocommit flag for both mysqlclient and mysql-connector-python. These method don't actually exist in mysql-connector-python as it uses autocommit as a property rather than a method.
I was able to work around it by adding an `if not callable(conn.autocommit)` condition to detect when mysql-connector-python is being used, but I'm sure there's probably a more elegant way of detecting which client is being used.
mysql-connector-python documentation:
https://dev.mysql.com/doc/connector-python/en/connector-python-api-mysqlconnection-autocommit.html
Autocommit calls:
https://github.com/apache/airflow/blob/2a2adb3f94cc165014d746102e12f9620f271391/airflow/providers/mysql/hooks/mysql.py#L55
https://github.com/apache/airflow/blob/2a2adb3f94cc165014d746102e12f9620f271391/airflow/providers/mysql/hooks/mysql.py#L66
| Thanks for opening your first issue here! Be sure to follow the issue template!
You can know which is used by using `client_name`:
https://github.com/apache/airflow/blob/2a2adb3f94cc165014d746102e12f9620f271391/airflow/providers/mysql/hooks/mysql.py#L140
https://github.com/apache/airflow/blob/2a2adb3f94cc165014d746102e12f9620f271391/airflow/providers/mysql/hooks/mysql.py#L142
https://github.com/apache/airflow/blob/2a2adb3f94cc165014d746102e12f9620f271391/airflow/providers/mysql/hooks/mysql.py#L148
Will you submit a PR to fix the issue?
FYI @feluelle I believe you added the library?
@eladkal I did try that, but `conn` seems to point to the mysql-connector-python instance rather than the Airflow Connection instance in `set_autocommit` and `get_autocommit` | 2021-03-18T05:58:43Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1112, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1285, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1310, in _execute_task
result = task_copy.execute(context=context)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/mysql/operators/mysql.py", line 74, in execute
hook.run(self.sql, autocommit=self.autocommit, parameters=self.parameters)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/hooks/dbapi.py", line 175, in run
self.set_autocommit(conn, autocommit)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/mysql/hooks/mysql.py", line 55, in set_autocommit
conn.autocommit(autocommit)
```
**What you expected to happen**:
The task to run without complaints.
| 2,348 |
|||
apache/airflow | apache__airflow-15074 | b4374d33b0e5d62c3510f1f5ac4a48e7f48cb203 | diff --git a/airflow/www/utils.py b/airflow/www/utils.py
--- a/airflow/www/utils.py
+++ b/airflow/www/utils.py
@@ -126,9 +126,11 @@ def generate_pages(current_page, num_of_pages, search=None, status=None, window=
output = [Markup('<ul class="pagination" style="margin-top:0;">')]
is_disabled = 'disabled' if current_page <= 0 else ''
+
+ first_node_link = void_link if is_disabled else f'?{get_params(page=0, search=search, status=status)}'
output.append(
first_node.format(
- href_link=f"?{get_params(page=0, search=search, status=status)}", # noqa
+ href_link=first_node_link,
disabled=is_disabled,
)
)
@@ -171,9 +173,13 @@ def is_current(current, page): # noqa
)
output.append(next_node.format(href_link=page_link, disabled=is_disabled)) # noqa
+
+ last_node_link = (
+ void_link if is_disabled else f'?{get_params(page=last_page, search=search, status=status)}'
+ )
output.append(
last_node.format(
- href_link=f"?{get_params(page=last_page, search=search, status=status)}", # noqa
+ href_link=last_node_link,
disabled=is_disabled,
)
)
| WEB UI, last page button does not work when all dags are in not active state
**Apache Airflow version**:
1.10.10
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`):
none
**Environment**:
- **Cloud provider or hardware configuration**:
- **OS** (e.g. from /etc/os-release):
NAME="CentOS Linux"
VERSION="7 (Core)"
ID="centos"
ID_LIKE="rhel fedora"
VERSION_ID="7"
PRETTY_NAME="CentOS Linux 7 (Core)"
ANSI_COLOR="0;31"
CPE_NAME="cpe:/o:centos:centos:7"
HOME_URL="https://www.centos.org/"
BUG_REPORT_URL="https://bugs.centos.org/"
CENTOS_MANTISBT_PROJECT="CentOS-7"
CENTOS_MANTISBT_PROJECT_VERSION="7"
REDHAT_SUPPORT_PRODUCT="centos"
REDHAT_SUPPORT_PRODUCT_VERSION="7"
- **Kernel** (e.g. `uname -a`):
Linux mid1-e-1 3.10.0-514.2.2.el7.x86_64 #1 SMP Tue Dec 6 23:06:41 UTC 2016 x86_64 x86_64 x86_64 GNU/Linux
- **Install tools**:
pip yum
- **Others**:
**What happened**:
It there are no dags the last page button ( >> ) has
`http://airflow-test.buongiorno.com/home?search=&page=-1` it should be _page=0_
set as link and this lead this exception:
```
Node: datalake-test.docomodigital.com
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask_appbuilder/security/decorators.py", line 101, in wraps
return f(self, *args, **kwargs)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/airflow/www_rbac/views.py", line 302, in index
joinedload(DagModel.tags)).offset(start).limit(dags_per_page).all()
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/orm/query.py", line 3244, in all
return list(self)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/orm/query.py", line 3403, in __iter__
return self._execute_and_instances(context)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/orm/query.py", line 3428, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 984, in execute
return meth(self, multiparams, params)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1103, in _execute_clauseelement
distilled_params,
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1288, in _execute_context
e, statement, parameters, cursor, context
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1482, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
cursor, statement, parameters, context
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/default.py", line 588, in do_execute
cursor.execute(statement, parameters)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
ProgrammingError: (_mysql_exceptions.ProgrammingError) (1064, "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-25, 25) AS anon_1 LEFT OUTER JOIN dag_tag AS dag_tag_1 ON anon_1.dag_dag_id = d' at line 7")
[SQL: SELECT anon_1.dag_dag_id AS anon_1_dag_dag_id, anon_1.dag_root_dag_id AS anon_1_dag_root_dag_id, anon_1.dag_is_paused AS anon_1_dag_is_paused, anon_1.dag_is_subdag AS anon_1_dag_is_subdag, anon_1.dag_is_active AS anon_1_dag_is_active, anon_1.dag_last_scheduler_run AS anon_1_dag_last_scheduler_run, anon_1.dag_last_pickled AS anon_1_dag_last_pickled, anon_1.dag_last_expired AS anon_1_dag_last_expired, anon_1.dag_scheduler_lock AS anon_1_dag_scheduler_lock, anon_1.dag_pickle_id AS anon_1_dag_pickle_id, anon_1.dag_fileloc AS anon_1_dag_fileloc, anon_1.dag_owners AS anon_1_dag_owners, anon_1.dag_description AS anon_1_dag_description, anon_1.dag_default_view AS anon_1_dag_default_view, anon_1.dag_schedule_interval AS anon_1_dag_schedule_interval, dag_tag_1.name AS dag_tag_1_name, dag_tag_1.dag_id AS dag_tag_1_dag_id
FROM (SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval
FROM dag
WHERE dag.is_subdag = 0 AND dag.is_active = 1 AND (EXISTS (SELECT 1
FROM dag_tag
WHERE dag.dag_id = dag_tag.dag_id AND dag_tag.name IN (%s))) ORDER BY dag.dag_id
LIMIT %s, %s) AS anon_1 LEFT OUTER JOIN dag_tag AS dag_tag_1 ON anon_1.dag_dag_id = dag_tag_1.dag_id ORDER BY anon_1.dag_dag_id]
[parameters: (u'example', -25, 25)]
(Background on this error at: http://sqlalche.me/e/f405)
```
**What you expected to happen**:
I expected the last page button acts like the 1st page button: nothing has to happen.
**How to reproduce it**:
Set all the dags as inactive, the click last page.
**Anything else we need to know**:
It's just an annoyance.
| Hello.
Thanks for reporting a bug. This looks like something worth working on.
Would you like to work on a fix for this bug? We are open to contributions from everyone.
Best regards,
Kamil Breguła
ok, I'll try to write patches to bug I'll find. I'm just back from holidays, just the time to organise my work. | 2021-03-29T18:28:54Z | [] | [] |
Traceback (most recent call last):
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/flask_appbuilder/security/decorators.py", line 101, in wraps
return f(self, *args, **kwargs)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib/python2.7/site-packages/airflow/www_rbac/views.py", line 302, in index
joinedload(DagModel.tags)).offset(start).limit(dags_per_page).all()
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/orm/query.py", line 3244, in all
return list(self)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/orm/query.py", line 3403, in __iter__
return self._execute_and_instances(context)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/orm/query.py", line 3428, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 984, in execute
return meth(self, multiparams, params)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1103, in _execute_clauseelement
distilled_params,
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1288, in _execute_context
e, statement, parameters, cursor, context
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1482, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1248, in _execute_context
cursor, statement, parameters, context
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/sqlalchemy/engine/default.py", line 588, in do_execute
cursor.execute(statement, parameters)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/jhub/_prod/server_global_unifieddata_hadoop_airflow_daemon/lib64/python2.7/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
ProgrammingError: (_mysql_exceptions.ProgrammingError) (1064, "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-25, 25) AS anon_1 LEFT OUTER JOIN dag_tag AS dag_tag_1 ON anon_1.dag_dag_id = d' at line 7")
| 2,350 |
|||
apache/airflow | apache__airflow-15132 | a6070026576d6a266c8df380a57deea3b43772d5 | diff --git a/airflow/stats.py b/airflow/stats.py
--- a/airflow/stats.py
+++ b/airflow/stats.py
@@ -343,7 +343,7 @@ def timer(self, stat=None, *args, tags=None, **kwargs):
"""Timer metric that can be cancelled"""
if stat and self.allow_list_validator.test(stat):
tags = tags or []
- return Timer(self.dogstatsd.timer(stat, *args, tags=tags, **kwargs))
+ return Timer(self.dogstatsd.timed(stat, *args, tags=tags, **kwargs))
return Timer()
| Enabling Datadog to tag metrics results in AttributeError
**Apache Airflow version**: 2.0.1
**Python version**: 3.8
**Cloud provider or hardware configuration**: AWS
**What happened**:
In order to add tags to [Airflow metrics,](https://airflow.apache.org/docs/apache-airflow/stable/logging-monitoring/metrics.html), it's required to set `AIRFLOW__METRICS__STATSD_DATADOG_ENABLED` to `True` and add tags in the `AIRFLOW__METRICS__STATSD_DATADOG_TAGS` variable. We were routing our statsd metrics to Datadog anyway, so this should theoretically have not changed anything other than the addition of any specified tags.
Setting the environment variable `AIRFLOW__METRICS__STATSD_DATADOG_ENABLED` to `True` (along with the other required statsd connection variables) results in the following error, which causes the process to terminate. This is from the scheduler, but this would apply anywhere that `Stats.timer()` is being called.
```
AttributeError: 'DogStatsd' object has no attribute 'timer'
return Timer(self.dogstatsd.timer(stat, *args, tags=tags, **kwargs))
File "/usr/local/lib/python3.8/site-packages/airflow/stats.py", line 345, in timer
return fn(_self, stat, *args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/stats.py", line 233, in wrapper
timer = Stats.timer('scheduler.critical_section_duration')
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1538, in _do_scheduling
num_queued_tis = self._do_scheduling(session)
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1382, in _run_scheduler_loop
self._run_scheduler_loop()
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1280, in _execute
Traceback (most recent call last):
```
**What you expected to happen**:
The same default Airflow metrics get sent by connecting to datadog, tagged with the metrics specified in `AIRFLOW__METRICS__STATSD_DATADOG_TAGS`.
**What do you think went wrong?**:
There is a bug in the implementation of the `Timer` method of `SafeDogStatsdLogger`. https://github.com/apache/airflow/blob/master/airflow/stats.py#L341-L347
`DogStatsd` has no method called `timer`. Instead it should be `timed`: https://datadogpy.readthedocs.io/en/latest/#datadog.dogstatsd.base.DogStatsd.timed
**How to reproduce it**:
Set the environment variables (or their respective config values) `AIRFLOW__METRICS__STATSD_ON`, `AIRFLOW__METRICS__STATSD_HOST`, `AIRFLOW__METRICS__STATSD_PORT`, and then set `AIRFLOW__METRICS__STATSD_DATADOG_ENABLED` to `True` and start up Airflow.
**Anything else we need to know**:
How often does this problem occur? Every time
| Thanks for opening your first issue here! Be sure to follow the issue template!
| 2021-04-01T11:49:50Z | [] | [] |
Traceback (most recent call last):
```
**What you expected to happen**:
The same default Airflow metrics get sent by connecting to datadog, tagged with the metrics specified in `AIRFLOW__METRICS__STATSD_DATADOG_TAGS`.
| 2,356 |
|||
apache/airflow | apache__airflow-15212 | 18066703832319968ee3d6122907746fdfda5d4c | diff --git a/airflow/cli/commands/info_command.py b/airflow/cli/commands/info_command.py
--- a/airflow/cli/commands/info_command.py
+++ b/airflow/cli/commands/info_command.py
@@ -15,7 +15,6 @@
# specific language governing permissions and limitations
# under the License.
"""Config sub-commands"""
-import getpass
import locale
import logging
import os
@@ -33,6 +32,7 @@
from airflow.providers_manager import ProvidersManager
from airflow.typing_compat import Protocol
from airflow.utils.cli import suppress_logs_and_warning
+from airflow.utils.platform import getuser
from airflow.version import version as airflow_version
log = logging.getLogger(__name__)
@@ -67,7 +67,7 @@ class PiiAnonymizer(Anonymizer):
def __init__(self):
home_path = os.path.expanduser("~")
- username = getpass.getuser()
+ username = getuser()
self._path_replacements = {home_path: "${HOME}", username: "${USER}"}
def process_path(self, value):
diff --git a/airflow/jobs/base_job.py b/airflow/jobs/base_job.py
--- a/airflow/jobs/base_job.py
+++ b/airflow/jobs/base_job.py
@@ -17,7 +17,6 @@
# under the License.
#
-import getpass
from time import sleep
from typing import Optional
@@ -37,6 +36,7 @@
from airflow.utils.helpers import convert_camel_to_snake
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
+from airflow.utils.platform import getuser
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import State
@@ -100,7 +100,7 @@ def __init__(self, executor=None, heartrate=None, *args, **kwargs):
self.latest_heartbeat = timezone.utcnow()
if heartrate is not None:
self.heartrate = heartrate
- self.unixname = getpass.getuser()
+ self.unixname = getuser()
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
super().__init__(*args, **kwargs)
diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -16,7 +16,6 @@
# specific language governing permissions and limitations
# under the License.
import contextlib
-import getpass
import hashlib
import logging
import math
@@ -68,6 +67,7 @@
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.operator_helpers import context_to_airflow_vars
+from airflow.utils.platform import getuser
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import UtcDateTime, with_row_locks
from airflow.utils.state import State
@@ -327,7 +327,7 @@ def __init__(self, task, execution_date: datetime, state: Optional[str] = None):
self.execution_date = execution_date
self.try_number = 0
- self.unixname = getpass.getuser()
+ self.unixname = getuser()
if state:
self.state = state
self.hostname = ''
diff --git a/airflow/providers/microsoft/winrm/hooks/winrm.py b/airflow/providers/microsoft/winrm/hooks/winrm.py
--- a/airflow/providers/microsoft/winrm/hooks/winrm.py
+++ b/airflow/providers/microsoft/winrm/hooks/winrm.py
@@ -17,7 +17,6 @@
# under the License.
#
"""Hook for winrm remote execution."""
-import getpass
from typing import Optional
from winrm.protocol import Protocol
@@ -25,6 +24,11 @@
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
+try:
+ from airflow.utils.platform import getuser
+except ImportError:
+ from getpass import getuser
+
# TODO: Fixme please - I have too complex implementation
# pylint: disable=too-many-instance-attributes,too-many-arguments,too-many-branches
@@ -201,7 +205,7 @@ def get_conn(self):
self.remote_host,
self.ssh_conn_id,
)
- self.username = getpass.getuser()
+ self.username = getuser()
# If endpoint is not set, then build a standard wsman endpoint from host and port.
if not self.endpoint:
diff --git a/airflow/providers/ssh/hooks/ssh.py b/airflow/providers/ssh/hooks/ssh.py
--- a/airflow/providers/ssh/hooks/ssh.py
+++ b/airflow/providers/ssh/hooks/ssh.py
@@ -16,7 +16,6 @@
# specific language governing permissions and limitations
# under the License.
"""Hook for SSH connections."""
-import getpass
import os
import warnings
from base64 import decodebytes
@@ -30,6 +29,11 @@
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
+try:
+ from airflow.utils.platform import getuser
+except ImportError:
+ from getpass import getuser
+
class SSHHook(BaseHook): # pylint: disable=too-many-instance-attributes
"""
@@ -173,7 +177,7 @@ def __init__( # pylint: disable=too-many-statements
self.remote_host,
self.ssh_conn_id,
)
- self.username = getpass.getuser()
+ self.username = getuser()
user_ssh_config_filename = os.path.expanduser('~/.ssh/config')
if os.path.isfile(user_ssh_config_filename):
diff --git a/airflow/task/task_runner/base_task_runner.py b/airflow/task/task_runner/base_task_runner.py
--- a/airflow/task/task_runner/base_task_runner.py
+++ b/airflow/task/task_runner/base_task_runner.py
@@ -16,7 +16,6 @@
# specific language governing permissions and limitations
# under the License.
"""Base task runner"""
-import getpass
import os
import subprocess
import threading
@@ -29,6 +28,7 @@
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
+from airflow.utils.platform import getuser
PYTHONPATH_VAR = 'PYTHONPATH'
@@ -60,7 +60,7 @@ def __init__(self, local_task_job):
# Add sudo commands to change user if we need to. Needed to handle SubDagOperator
# case using a SequentialExecutor.
self.log.debug("Planning to run as the %s user", self.run_as_user)
- if self.run_as_user and (self.run_as_user != getpass.getuser()):
+ if self.run_as_user and (self.run_as_user != getuser()):
# We want to include any environment variables now, as we won't
# want to have to specify them in the sudo call - they would show
# up in `ps` that way! And run commands now, as the other user
diff --git a/airflow/task/task_runner/cgroup_task_runner.py b/airflow/task/task_runner/cgroup_task_runner.py
--- a/airflow/task/task_runner/cgroup_task_runner.py
+++ b/airflow/task/task_runner/cgroup_task_runner.py
@@ -19,7 +19,6 @@
"""Task runner for cgroup to run Airflow task"""
import datetime
-import getpass
import os
import uuid
@@ -28,6 +27,7 @@
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils.operator_resources import Resources
+from airflow.utils.platform import getuser
from airflow.utils.process_utils import reap_process_group
@@ -70,7 +70,7 @@ def __init__(self, local_task_job):
self.cpu_cgroup_name = None
self._created_cpu_cgroup = False
self._created_mem_cgroup = False
- self._cur_user = getpass.getuser()
+ self._cur_user = getuser()
def _create_cgroup(self, path):
"""
diff --git a/airflow/utils/cli.py b/airflow/utils/cli.py
--- a/airflow/utils/cli.py
+++ b/airflow/utils/cli.py
@@ -18,7 +18,6 @@
#
"""Utilities module for cli"""
import functools
-import getpass
import json
import logging
import os
@@ -35,7 +34,7 @@
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.utils import cli_action_loggers
-from airflow.utils.platform import is_terminal_support_colors
+from airflow.utils.platform import getuser, is_terminal_support_colors
from airflow.utils.session import provide_session
T = TypeVar("T", bound=Callable) # pylint: disable=invalid-name
@@ -131,7 +130,7 @@ def _build_metrics(func_name, namespace):
'sub_command': func_name,
'start_datetime': datetime.utcnow(),
'full_command': f'{full_command}',
- 'user': getpass.getuser(),
+ 'user': getuser(),
}
if not isinstance(namespace, Namespace):
diff --git a/airflow/utils/platform.py b/airflow/utils/platform.py
--- a/airflow/utils/platform.py
+++ b/airflow/utils/platform.py
@@ -16,6 +16,7 @@
# under the License.
"""Platform and system specific function."""
+import getpass
import logging
import os
import pkgutil
@@ -57,3 +58,25 @@ def get_airflow_git_version():
log.debug(e)
return git_version
+
+
+def getuser() -> str:
+ """
+ Gets the username associated with the current user, or error with a nice
+ error message if there's no current user.
+
+ We don't want to fall back to os.getuid() because not having a username
+ probably means the rest of the user environment is wrong (e.g. no $HOME).
+ Explicit failure is better than silently trying to work badly.
+ """
+ try:
+ return getpass.getuser()
+ except KeyError:
+ # Inner import to avoid circular import
+ from airflow.exceptions import AirflowConfigException
+
+ raise AirflowConfigException(
+ "The user that Airflow is running as has no username; you must run"
+ "Airflow as a full user, with a username and home directory, "
+ "in order for it to function properly."
+ )
| Don't crash when a getpass.getuser() call fails
**Apache Airflow version**: 1.10.11
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): 1.18.4
**Environment**:
- **Cloud provider or hardware configuration**:
- **OS** (e.g. from /etc/os-release): Ubuntu 20.04
- **Kernel** (e.g. `uname -a`):
- **Install tools**:
- **Others**:
**What happened**:
There are many call occurrences to `getpass.getuser()` in the code. None are fenced with a try/except to protect them from failures. Though, in a containerized environment, using a process with an uid that is not defined in `/etc/passwd` is not uncommon when using a existing image. This makes airflow crash with errors like the one below, though the call to `getpass.getuser()` serves no critical purposes (it is sometimes used to annotate metrics...) AFAICT.
```
Traceback (most recent call last):
File "/usr/local/bin/airflow", line 37, in <module>
args.func(args)
File "/usr/local/lib/python3.8/site-packages/airflow/utils/cli.py", line 76, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/bin/cli.py", line 1189, in scheduler
job = jobs.SchedulerJob(
File "<string>", line 4, in __init__
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/state.py", line 433, in _initialize_instance
manager.dispatch.init_failure(self, args, kwargs)
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.raise_(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/state.py", line 430, in _initialize_instance
return manager.original_init(*mixed[1:], **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 387, in __init__
super(SchedulerJob, self).__init__(*args, **kwargs)
File "<string>", line 6, in __init__
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/base_job.py", line 92, in __init__
self.unixname = getpass.getuser()
File "/usr/local/lib/python3.8/getpass.py", line 169, in getuser
return pwd.getpwuid(os.getuid())[0]
KeyError: 'getpwuid(): uid not found: 1000'
```
I think i many cases it could be simply replaced with:
```python
try:
<var> = getpass.getuser()
except KeyError:
<var> = str(os.getuid())
```
**What you expected to happen**:
I expected Airflow to not crash because the uid is not mapped to a user defined in `/etc/passwd`
**How to reproduce it**:
Deploy the official airflow image in docker with `--user=9999` (or any other uid that is not 50000) or in k8s with a security context like:
```
securityContext:
runAsUser: 9999
```
**Anything else we need to know**:
It even looks like the value returned by `getuser()` is hardly used, e.g. jobs see their unixname attribute assigned the value, but use of that attribute seem hardly critical:
```
grep -HInr "\.unixname" "${VIRTUAL_ENV}"/lib/python3.8/site-packages/airflow{,_exporter}
airflow/migrations/versions/e3a246e0dc1_current_schema.py:105: sa.Column('unixname', sa.String(length=1000), nullable=True),
airflow/migrations/versions/e3a246e0dc1_current_schema.py:166: sa.Column('unixname', sa.String(length=1000), nullable=True),
airflow/migrations/versions/6e96a59344a4_make_taskinstance_pool_not_nullable.py:71: unixname = Column(String(1000))
airflow/jobs/base_job.py:67: unixname = Column(String(1000))
airflow/jobs/base_job.py:92: self.unixname = getpass.getuser()
airflow/www/views.py:2809: 'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
airflow/www/views.py:3015: 'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
airflow/www_rbac/views.py:2491: 'executor_class', 'hostname', 'unixname']
airflow/www_rbac/views.py:2494: 'hostname', 'unixname']
airflow/www_rbac/views.py:2689: 'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
airflow/models/taskinstance.py:157: unixname = Column(String(1000))
airflow/models/taskinstance.py:201: self.unixname = getpass.getuser()
airflow/models/taskinstance.py:487: self.unixname = ti.unixname
```
(there is a `run_as_user` attribute that is)
| 2021-04-05T19:42:00Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/bin/airflow", line 37, in <module>
args.func(args)
File "/usr/local/lib/python3.8/site-packages/airflow/utils/cli.py", line 76, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/bin/cli.py", line 1189, in scheduler
job = jobs.SchedulerJob(
File "<string>", line 4, in __init__
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/state.py", line 433, in _initialize_instance
manager.dispatch.init_failure(self, args, kwargs)
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.raise_(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/state.py", line 430, in _initialize_instance
return manager.original_init(*mixed[1:], **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 387, in __init__
super(SchedulerJob, self).__init__(*args, **kwargs)
File "<string>", line 6, in __init__
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/base_job.py", line 92, in __init__
self.unixname = getpass.getuser()
File "/usr/local/lib/python3.8/getpass.py", line 169, in getuser
return pwd.getpwuid(os.getuid())[0]
KeyError: 'getpwuid(): uid not found: 1000'
| 2,359 |
||||
apache/airflow | apache__airflow-15680 | 30eeac7b7ed4ab5ea191691a3b713e3d66c0baff | diff --git a/airflow/providers/amazon/aws/transfers/mongo_to_s3.py b/airflow/providers/amazon/aws/transfers/mongo_to_s3.py
--- a/airflow/providers/amazon/aws/transfers/mongo_to_s3.py
+++ b/airflow/providers/amazon/aws/transfers/mongo_to_s3.py
@@ -40,7 +40,7 @@ class MongoToS3Operator(BaseOperator):
:param mongo_collection: reference to a specific collection in your mongo db
:type mongo_collection: str
:param mongo_query: query to execute. A list including a dict of the query
- :type mongo_query: list
+ :type mongo_query: Union[list, dict]
:param s3_bucket: reference to a specific S3 bucket to store the data
:type s3_bucket: str
:param s3_key: in which S3 key the file will be stored
@@ -49,8 +49,8 @@ class MongoToS3Operator(BaseOperator):
:type mongo_db: str
:param replace: whether or not to replace the file in S3 if it previously existed
:type replace: bool
- :param allow_disk_use: in the case you are retrieving a lot of data, you may have
- to use the disk to save it instead of saving all in the RAM
+ :param allow_disk_use: enables writing to temporary files in the case you are handling large dataset.
+ This only takes effect when `mongo_query` is a list - running an aggregate pipeline
:type allow_disk_use: bool
:param compression: type of compression to use for output file in S3. Currently only gzip is supported.
:type compression: str
@@ -115,7 +115,6 @@ def execute(self, context) -> bool:
mongo_collection=self.mongo_collection,
query=cast(dict, self.mongo_query),
mongo_db=self.mongo_db,
- allowDiskUse=self.allow_disk_use,
)
# Performs transform then stringifies the docs results into json format
| MongoToS3Operator failed when running with a single query (not aggregate pipeline)
**Apache Airflow version**: 2.0.2
**What happened**:
`MongoToS3Operator` failed when running with a single query (not aggregate pipeline):
```sh
Traceback (most recent call last):
File "/home/airflow//bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow//lib/python3.8/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/home/airflow//lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/utils/cli.py", line 89, in wrapper
return f(*args, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 385, in task_test
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
File "/home/airflow//lib/python3.8/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1413, in run
self._run_raw_task(
File "/home/airflow//lib/python3.8/site-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1138, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/home/airflow//lib/python3.8/site-packages/airflow/providers/amazon/aws/transfers/mongo_to_s3.py", line 116, in execute
results = MongoHook(self.mongo_conn_id).find(
File "/home/airflow//lib/python3.8/site-packages/airflow/providers/mongo/hooks/mongo.py", line 144, in find
return collection.find(query, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/pymongo/collection.py", line 1523, in find
return Cursor(self, *args, **kwargs)
TypeError: __init__() got an unexpected keyword argument 'allowDiskUse'
```
**What you expected to happen**:
I expect the data from MongoDB to be exported to a file in S3 with no errors.
**How to reproduce it**:
Run the following operator with a single `mongo_query` (no aggregate pipeline):
```python
export_to_s3 = MongoToS3Operator(
task_id='export_to_s3',
mongo_conn_id=Variable.get('mongo_conn_id'),
s3_conn_id=Variable.get('aws_conn_id'),
mongo_collection='my_mongo_collection',
mongo_query={},
s3_bucket=Variable.get('s3_bucket'),
s3_key="my_data.json",
replace=True,
dag=dag,
)
```
| 2021-05-05T17:14:15Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow//bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow//lib/python3.8/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/home/airflow//lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/utils/cli.py", line 89, in wrapper
return f(*args, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 385, in task_test
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
File "/home/airflow//lib/python3.8/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1413, in run
self._run_raw_task(
File "/home/airflow//lib/python3.8/site-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1138, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/home/airflow//lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/home/airflow//lib/python3.8/site-packages/airflow/providers/amazon/aws/transfers/mongo_to_s3.py", line 116, in execute
results = MongoHook(self.mongo_conn_id).find(
File "/home/airflow//lib/python3.8/site-packages/airflow/providers/mongo/hooks/mongo.py", line 144, in find
return collection.find(query, **kwargs)
File "/home/airflow//lib/python3.8/site-packages/pymongo/collection.py", line 1523, in find
return Cursor(self, *args, **kwargs)
TypeError: __init__() got an unexpected keyword argument 'allowDiskUse'
| 2,369 |
||||
apache/airflow | apache__airflow-15794 | b272f9cec99fd0e3373d23b706f33892cbcb9626 | diff --git a/airflow/providers/apache/spark/hooks/spark_sql.py b/airflow/providers/apache/spark/hooks/spark_sql.py
--- a/airflow/providers/apache/spark/hooks/spark_sql.py
+++ b/airflow/providers/apache/spark/hooks/spark_sql.py
@@ -17,11 +17,14 @@
# under the License.
#
import subprocess
-from typing import Any, List, Optional, Union
+from typing import TYPE_CHECKING, Any, List, Optional, Union
-from airflow.exceptions import AirflowException
+from airflow.exceptions import AirflowException, AirflowNotFoundException
from airflow.hooks.base import BaseHook
+if TYPE_CHECKING:
+ from airflow.models.connection import Connection
+
class SparkSqlHook(BaseHook):
"""
@@ -45,6 +48,7 @@ class SparkSqlHook(BaseHook):
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param master: spark://host:port, mesos://host:port, yarn, or local
+ (Default: The ``host`` and ``port`` set in the Connection, or ``"yarn"``)
:type master: str
:param name: Name of the job.
:type name: str
@@ -52,7 +56,8 @@ class SparkSqlHook(BaseHook):
:type num_executors: int
:param verbose: Whether to pass the verbose flag to spark-sql
:type verbose: bool
- :param yarn_queue: The YARN queue to submit to (Default: "default")
+ :param yarn_queue: The YARN queue to submit to
+ (Default: The ``queue`` value set in the Connection, or ``"default"``)
:type yarn_queue: str
"""
@@ -72,16 +77,35 @@ def __init__(
executor_memory: Optional[str] = None,
keytab: Optional[str] = None,
principal: Optional[str] = None,
- master: str = 'yarn',
+ master: Optional[str] = None,
name: str = 'default-name',
num_executors: Optional[int] = None,
verbose: bool = True,
- yarn_queue: str = 'default',
+ yarn_queue: Optional[str] = None,
) -> None:
super().__init__()
+
+ try:
+ conn: "Optional[Connection]" = self.get_connection(conn_id)
+ except AirflowNotFoundException:
+ conn = None
+ options = {}
+ else:
+ options = conn.extra_dejson
+
+ # Set arguments to values set in Connection if not explicitly provided.
+ if master is None:
+ if conn is None:
+ master = "yarn"
+ elif conn.port:
+ master = f"{conn.host}:{conn.port}"
+ else:
+ master = conn.host
+ if yarn_queue is None:
+ yarn_queue = options.get("queue", "default")
+
self._sql = sql
self._conf = conf
- self._conn = self.get_connection(conn_id)
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
diff --git a/airflow/providers/apache/spark/operators/spark_sql.py b/airflow/providers/apache/spark/operators/spark_sql.py
--- a/airflow/providers/apache/spark/operators/spark_sql.py
+++ b/airflow/providers/apache/spark/operators/spark_sql.py
@@ -47,6 +47,7 @@ class SparkSqlOperator(BaseOperator):
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param master: spark://host:port, mesos://host:port, yarn, or local
+ (Default: The ``host`` and ``port`` set in the Connection, or ``"yarn"``)
:type master: str
:param name: Name of the job
:type name: str
@@ -54,7 +55,8 @@ class SparkSqlOperator(BaseOperator):
:type num_executors: int
:param verbose: Whether to pass the verbose flag to spark-sql
:type verbose: bool
- :param yarn_queue: The YARN queue to submit to (Default: "default")
+ :param yarn_queue: The YARN queue to submit to
+ (Default: The ``queue`` value set in the Connection, or ``"default"``)
:type yarn_queue: str
"""
@@ -73,11 +75,11 @@ def __init__(
executor_memory: Optional[str] = None,
keytab: Optional[str] = None,
principal: Optional[str] = None,
- master: str = 'yarn',
+ master: Optional[str] = None,
name: str = 'default-name',
num_executors: Optional[int] = None,
verbose: bool = True,
- yarn_queue: str = 'default',
+ yarn_queue: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
| Spark SQL Hook not using connections
**Apache Airflow version**: 1.10.10
**What happened**:
`SparkSqlHook` is not using any connection, the default conn_id is `spark_sql_default`, if this connection doesn't exist, the hook returns an error:
```
Traceback (most recent call last):
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 983, in _run_raw_task
result = task_copy.execute(context=context)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/operators/spark_sql_operator.py", line 109, in execute
yarn_queue=self._yarn_queue
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/hooks/spark_sql_hook.py", line 75, in __init__
self._conn = self.get_connection(conn_id)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/hooks/base_hook.py", line 84, in get_connection
conn = random.choice(list(cls.get_connections(conn_id)))
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/hooks/base_hook.py", line 80, in get_connections
return secrets.get_connections(conn_id)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/secrets/__init__.py", line 56, in get_connections
raise AirflowException("The conn_id `{0}` isn't defined".format(conn_id))
airflow.exceptions.AirflowException: The conn_id `spark_sql_default` isn't defined
```
If specified any valid connection, it does nothing, the `self._conn` variable is never used and there is an empty `get_conn` method.
```
def get_conn(self):
pass
```
**What you expected to happen**:
It should follow the same behaviour of `SparkSubmitHook` to receive the master host and extra parameters from the connection OR don't request a connection ID.
**How to reproduce it**:
Just create a DAG with a `SparkSqlOperator` and have not created the connection `spark_sql_default`.
```
sql_job = SparkSqlOperator(
sql="SELECT * FROM test",
master="local",
task_id="sql_job"
)
```
**Anything else we need to know**:
I am happy to implement any of these solutions.
| Thanks for opening your first issue here! Be sure to follow the issue template!
Can I take this issue?
Hi @danielenricocahall
Sure, go ahead.
I've assigned it to you @danielenricocahall
@danielenricocahall I'm unasinging you as you didn't complete the PR if you wish to finish it let us know.
This issue is open to anyone who wants to work on it. | 2021-05-12T09:12:38Z | [] | [] |
Traceback (most recent call last):
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 983, in _run_raw_task
result = task_copy.execute(context=context)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/operators/spark_sql_operator.py", line 109, in execute
yarn_queue=self._yarn_queue
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/hooks/spark_sql_hook.py", line 75, in __init__
self._conn = self.get_connection(conn_id)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/hooks/base_hook.py", line 84, in get_connection
conn = random.choice(list(cls.get_connections(conn_id)))
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/hooks/base_hook.py", line 80, in get_connections
return secrets.get_connections(conn_id)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/secrets/__init__.py", line 56, in get_connections
raise AirflowException("The conn_id `{0}` isn't defined".format(conn_id))
airflow.exceptions.AirflowException: The conn_id `spark_sql_default` isn't defined
| 2,371 |
|||
apache/airflow | apache__airflow-15822 | 1edef28b315809c8367b29f3f1a83984dc6566c4 | diff --git a/airflow/models/dag.py b/airflow/models/dag.py
--- a/airflow/models/dag.py
+++ b/airflow/models/dag.py
@@ -1463,13 +1463,8 @@ def partial_subset(
"""
# deep-copying self.task_dict and self._task_group takes a long time, and we don't want all
# the tasks anyway, so we copy the tasks manually later
- task_dict = self.task_dict
- task_group = self._task_group
- self.task_dict = {}
- self._task_group = None # type: ignore
- dag = copy.deepcopy(self)
- self.task_dict = task_dict
- self._task_group = task_group
+ memo = {id(self.task_dict): None, id(self._task_group): None}
+ dag = copy.deepcopy(self, memo) # type: ignore
if isinstance(task_ids_or_regex, (str, RePatternType)):
matched_tasks = [t for t in self.tasks if re.findall(task_ids_or_regex, t.task_id)]
diff --git a/airflow/operators/python.py b/airflow/operators/python.py
--- a/airflow/operators/python.py
+++ b/airflow/operators/python.py
@@ -432,6 +432,11 @@ def _read_result(self, filename):
)
raise
+ def __deepcopy__(self, memo):
+ # module objects can't be copied _at all__
+ memo[id(self.pickling_library)] = self.pickling_library
+ return super().__deepcopy__(memo)
+
def get_current_context() -> Dict[str, Any]:
"""
| Task preceeding PythonVirtualenvOperator fails: "cannot pickle 'module' object"
**Apache Airflow version**
13faa6912f7cd927737a1dc15630d3bbaf2f5d4d
**Environment**
- **Configuration**: Local Executor
- **OS** (e.g. from /etc/os-release): Mac OS 11.3
- **Kernel**: Darwin Kernel Version 20.4.0
- **Install tools**: `pip install -e .`
**The DAG**
```python
def callable():
print("hi")
with DAG(dag_id="two_virtualenv") as dag:
a = PythonOperator(
task_id="a",
python_callable=callable,
)
# b = PythonOperator( # works
b = PythonVirtualenvOperator( # doesn't work
task_id="b",
python_callable=callable,
)
a >> b
```
**What happened**:
Failure somewhere between first task and second:
```
INFO - Marking task as SUCCESS. dag_id=two_virtualenv, task_id=a
ERROR - Failed to execute task: cannot pickle 'module' object.
Traceback (most recent call last):
File "/Users/matt/src/airflow/airflow/executors/debug_executor.py", line 79, in _run_task
ti._run_raw_task(job_id=ti.job_id, **params) # pylint: disable=protected-access
File "/Users/matt/src/airflow/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/Users/matt/src/airflow/airflow/models/taskinstance.py", line 1201, in _run_raw_task
self._run_mini_scheduler_on_child_tasks(session)
File "/Users/matt/src/airflow/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/Users/matt/src/airflow/airflow/models/taskinstance.py", line 1223, in _run_mini_scheduler_on_child_tasks
partial_dag = self.task.dag.partial_subset(
File "/Users/matt/src/airflow/airflow/models/dag.py", line 1490, in partial_subset
dag.task_dict = {
File "/Users/matt/src/airflow/airflow/models/dag.py", line 1491, in <dictcomp>
t.task_id: copy.deepcopy(t, {id(t.dag): dag}) # type: ignore
File "/usr/local/Cellar/python@3.9/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/copy.py", line 153, in deepcopy
y = copier(memo)
File "/Users/matt/src/airflow/airflow/models/baseoperator.py", line 961, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo)) # noqa
File "/usr/local/Cellar/python@3.9/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/copy.py", line 161, in deepcopy
rv = reductor(4)
TypeError: cannot pickle 'module' object
ERROR - Task instance <TaskInstance: two_virtualenv.a 2021-05-11 00:00:00+00:00 [failed]> failed
```
**What you expected to happen**:
Both tasks say "hi" and succeed
**To Replicate**
The DAG and output above are shortened for brevity. A more complete story: https://gist.github.com/MatrixManAtYrService/6b27378776470491eb20b60e01cfb675
Ran it like this:
```
$ airflow dags test two_virtualenv $(date "+%Y-%m-%d")
```
| I hate pickle :/
Turns out it's nothing really to do with pickle, just to do with trying to copy _any_ module object.
And the horrifying thing? This has been broken since 2.0.0. | 2021-05-13T13:50:32Z | [] | [] |
Traceback (most recent call last):
File "/Users/matt/src/airflow/airflow/executors/debug_executor.py", line 79, in _run_task
ti._run_raw_task(job_id=ti.job_id, **params) # pylint: disable=protected-access
File "/Users/matt/src/airflow/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/Users/matt/src/airflow/airflow/models/taskinstance.py", line 1201, in _run_raw_task
self._run_mini_scheduler_on_child_tasks(session)
File "/Users/matt/src/airflow/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/Users/matt/src/airflow/airflow/models/taskinstance.py", line 1223, in _run_mini_scheduler_on_child_tasks
partial_dag = self.task.dag.partial_subset(
File "/Users/matt/src/airflow/airflow/models/dag.py", line 1490, in partial_subset
dag.task_dict = {
File "/Users/matt/src/airflow/airflow/models/dag.py", line 1491, in <dictcomp>
t.task_id: copy.deepcopy(t, {id(t.dag): dag}) # type: ignore
File "/usr/local/Cellar/python@3.9/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/copy.py", line 153, in deepcopy
y = copier(memo)
File "/Users/matt/src/airflow/airflow/models/baseoperator.py", line 961, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo)) # noqa
File "/usr/local/Cellar/python@3.9/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/copy.py", line 161, in deepcopy
rv = reductor(4)
TypeError: cannot pickle 'module' object
| 2,373 |
|||
apache/airflow | apache__airflow-16108 | aeb93f8e5bb4a9175e8834d476a6b679beff4a7e | diff --git a/airflow/cli/commands/task_command.py b/airflow/cli/commands/task_command.py
--- a/airflow/cli/commands/task_command.py
+++ b/airflow/cli/commands/task_command.py
@@ -88,6 +88,7 @@ def _run_task_by_executor(args, dag, ti):
print(e)
raise e
executor = ExecutorLoader.get_default_executor()
+ executor.job_id = "manual"
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
| Could not get scheduler_job_id
**Apache Airflow version:**
2.0.0
**Kubernetes version (if you are using kubernetes) (use kubectl version):**
1.18.3
**Environment:**
Cloud provider or hardware configuration: AWS
**What happened:**
When trying to run a DAG, it gets scheduled, but task is never run. When attempting to run task manually, it shows an error:
```
Something bad has happened.
Please consider letting us know by creating a bug report using GitHub.
Python version: 3.8.7
Airflow version: 2.0.0
Node: airflow-web-ffdd89d6-h98vj
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.8/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/www/views.py", line 1366, in run
executor.start()
File "/usr/local/lib/python3.8/site-packages/airflow/executors/kubernetes_executor.py", line 493, in start
raise AirflowException("Could not get scheduler_job_id")
airflow.exceptions.AirflowException: Could not get scheduler_job_id
```
**What you expected to happen:**
The task to be run successfully without
**How to reproduce it:**
Haven't pinpointed what causes the issue, besides an attempted upgrade from Airflow 1.10.14 to Airflow 2.0.0
**Anything else we need to know:**
This error is encountered in an upgrade of Airflow from 1.10.14 to Airflow 2.0.0
EDIT: Formatted to fit the issue template
| Thanks for opening your first issue here! Be sure to follow the issue template!
Does it only happen with Kubernetes Executor?
Looks like a bug with Kubernetes Executor. Related issue: https://github.com/apache/airflow/issues/13805
> Does it only happen with Kubernetes Executor?
Yes, it happens only with Kubernetes Executor, apparently. Tried LocalExecutor and task completes without problems when triggering the DAG.
Hi @ClassyLion can you post the logs leading up to that error? Does the executor just never start?
What's strange is that this error seems to suggest that the executor never receives a job_id.
I currently don't have access to the specific AirFlow deployment, but I checked logs thoroughly multiple times and there were no issues with anything starting. This was the only problematic issue that appeared.
When I get access to the deployment, I'll update with specific information.
Just checking here --- This only happens when you try to run the "task" in the UI.
Can you try by just triggering the DAG itself and not an individual Task please
If I only trigger the DAG, the task will not run. It will be queued but not finished, or even running. And the DAG stays as running without any progress.
EDIT: That is the reason, why I tried to run the "task" in the UI.
Can you post your DAG please? Does other DAGs run ? Can you post the logs from the Scheduler too with DEBUG level logging.
Did you unpause the DAG too?
Other DAGs don't run aswell. I have logging set to DEBUG and it didn't change anything in the logs. I will share logs tomorrow, when I get access to deployment.
I unpaused the dag and it got scheduled and started, but the tasks were not running, therefore, not finishing.
I am facing the same error while trying to backfill:
Logs:
```
[2021-01-27 06:35:50,209] {airflow-log-cleanup.py:44} INFO - ENABLE_DELETE_CHILD_LOG False
/home/airflow/.local/lib/python3.7/site-packages/airflow/configuration.py:320 DeprecationWarning: The statsd_on option in [scheduler] has been moved to the statsd_on option in [metrics] - the old setting has been used, but please update your config.
[2021-01-27 06:35:50,376] {kubernetes_executor.py:491} INFO - Start Kubernetes executor
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/cli.py", line 89, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/commands/dag_command.py", line 116, in dag_backfill
run_backwards=args.run_backwards,
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dag.py", line 1701, in run
job.run()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/base_job.py", line 237, in run
self._execute()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 65, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/backfill_job.py", line 788, in _execute
executor.start()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/executors/kubernetes_executor.py", line 493, in start
raise AirflowException("Could not get scheduler_job_id")
airflow.exceptions.AirflowException: Could not get scheduler_job_id
```
> Can you post your DAG please? Does other DAGs run ? Can you post the logs from the Scheduler too with DEBUG level logging.
>
> Did you unpause the DAG too?
```
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
[2021-01-27 10:07:10,148] {scheduler_job.py:1241} INFO - Starting the scheduler
[2021-01-27 10:07:10,149] {scheduler_job.py:1246} INFO - Processing each file at most -1 times
[2021-01-27 10:07:10,150] {kubernetes_executor.py:491} INFO - Start Kubernetes executor
[2021-01-27 10:07:10,186] {kubernetes_executor.py:462} INFO - When executor started up, found 0 queued task instances
[2021-01-27 10:07:10,192] {dag_processing.py:250} INFO - Launched DagFileProcessorManager with pid: 42
[2021-01-27 10:07:10,193] {scheduler_job.py:1751} INFO - Resetting orphaned tasks for active dag runs
[2021-01-27 10:07:10,193] {kubernetes_executor.py:126} INFO - Event: and now my watch begins starting at resource_version: 0
[2021-01-27 10:07:10,211] {settings.py:52} INFO - Configured default timezone Timezone('UTC')
[2021-01-27 10:11:15,626] {scheduler_job.py:938} INFO - 2 tasks up for execution:
<TaskInstance: test.monthly.weekly.trigger.test_task 2021-01-26 16:58:38.208907+00:00 [scheduled]>
<TaskInstance: test.monthly.weekly.trigger.test_task 2021-01-27 10:11:06.043581+00:00 [scheduled]>
[2021-01-27 10:11:15,635] {scheduler_job.py:967} INFO - Figuring out tasks to run in Pool(name=default_pool) with 128 open slots and 2 task instances ready to be queued
[2021-01-27 10:11:15,635] {scheduler_job.py:995} INFO - DAG test.monthly.weekly.trigger has 0/16 running and queued tasks
[2021-01-27 10:11:15,635] {scheduler_job.py:995} INFO - DAG test.monthly.weekly.trigger has 1/16 running and queued tasks
[2021-01-27 10:11:15,636] {scheduler_job.py:1060} INFO - Setting the following tasks to queued state:
<TaskInstance: test.monthly.weekly.trigger.test_task 2021-01-26 16:58:38.208907+00:00 [scheduled]>
<TaskInstance: test.monthly.weekly.trigger.test_task 2021-01-27 10:11:06.043581+00:00 [scheduled]>
[2021-01-27 10:11:15,639] {scheduler_job.py:1102} INFO - Sending TaskInstanceKey(dag_id='test.monthly.weekly.trigger', task_id='test_task', execution_date=datetime.datetime(2021, 1, 26, 16, 58, 38, 208907, tzinfo=Timezone('UTC')), try_number=1) to executor with priority 1 and queue default
[2021-01-27 10:11:15,640] {base_executor.py:79} INFO - Adding to queue: ['airflow', 'tasks', 'run', 'test.monthly.weekly.trigger', 'test_task', '2021-01-26T16:58:38.208907+00:00', '--local', '--pool', 'default_pool', '--subdir', '/usr/local/airflow/dags/repo/dags/test.monthly.weekly.trigger.py']
[2021-01-27 10:11:15,640] {scheduler_job.py:1102} INFO - Sending TaskInstanceKey(dag_id='test.monthly.weekly.trigger', task_id='test_task', execution_date=datetime.datetime(2021, 1, 27, 10, 11, 6, 43581, tzinfo=Timezone('UTC')), try_number=1) to executor with priority 1 and queue default
[2021-01-27 10:11:15,640] {base_executor.py:79} INFO - Adding to queue: ['airflow', 'tasks', 'run', 'test.monthly.weekly.trigger', 'test_task', '2021-01-27T10:11:06.043581+00:00', '--local', '--pool', 'default_pool', '--subdir', '/usr/local/airflow/dags/repo/dags/test.monthly.weekly.trigger.py']
[2021-01-27 10:11:15,684] {kubernetes_executor.py:510} INFO - Add task TaskInstanceKey(dag_id='test.monthly.weekly.trigger', task_id='test_task', execution_date=datetime.datetime(2021, 1, 26, 16, 58, 38, 208907, tzinfo=Timezone('UTC')), try_number=1) with command ['airflow', 'tasks', 'run', 'test.monthly.weekly.trigger', 'test_task', '2021-01-26T16:58:38.208907+00:00', '--local', '--pool', 'default_pool', '--subdir', '/usr/local/airflow/dags/repo/dags/test.monthly.weekly.trigger.py'] with executor_config {'KubernetesExecutor': {'request_memory': '256Mi', 'limit_memory': '512Mi'}}
/usr/local/lib/python3.8/site-packages/airflow/kubernetes/pod_generator.py:193 DeprecationWarning: Using a dictionary for the executor_config is deprecated and will soon be removed.please use a `kubernetes.client.models.V1Pod` class with a "pod_override" key instead.
[2021-01-27 10:11:15,901] {kubernetes_executor.py:510} INFO - Add task TaskInstanceKey(dag_id='test.monthly.weekly.trigger', task_id='test_task', execution_date=datetime.datetime(2021, 1, 27, 10, 11, 6, 43581, tzinfo=Timezone('UTC')), try_number=1) with command ['airflow', 'tasks', 'run', 'test.monthly.weekly.trigger', 'test_task', '2021-01-27T10:11:06.043581+00:00', '--local', '--pool', 'default_pool', '--subdir', '/usr/local/airflow/dags/repo/dags/test.monthly.weekly.trigger.py'] with executor_config {'KubernetesExecutor': {'request_memory': '256Mi', 'limit_memory': '512Mi'}}
[2021-01-27 10:11:15,906] {kubernetes_executor.py:277} INFO - Kubernetes job is (TaskInstanceKey(dag_id='test.monthly.weekly.trigger', task_id='test_task', execution_date=datetime.datetime(2021, 1, 26, 16, 58, 38, 208907, tzinfo=Timezone('UTC')), try_number=1), ['airflow', 'tasks', 'run', 'test.monthly.weekly.trigger', 'test_task', '2021-01-26T16:58:38.208907+00:00', '--local', '--pool', 'default_pool', '--subdir', '/usr/local/airflow/dags/repo/dags/test.monthly.weekly.trigger.py'], {'api_version': 'v1',
'kind': 'Pod',
'metadata': {'annotations': None,
'cluster_name': None,
'creation_timestamp': None,
'deletion_grace_period_seconds': None,
'deletion_timestamp': None,
'finalizers': None,
'generate_name': None,
'generation': None,
'initializers': None,
'labels': None,
'managed_fields': None,
'name': None,
'namespace': None,
'owner_references': None,
'resource_version': None,
'self_link': None,
'uid': None},
'spec': {'active_deadline_seconds': None,
'affinity': None,
'automount_service_account_token': None,
'containers': [{'args': [],
'command': [],
'env': [],
'env_from': [],
'image': None,
'image_pull_policy': None,
'lifecycle': None,
'liveness_probe': None,
'name': 'base',
'ports': [],
'readiness_probe': None,
'resources': {'limits': {'memory': '512Mi'},
'requests': {'memory': '256Mi'}},
'security_context': None,
'stdin': None,
'stdin_once': None,
'termination_message_path': None,
'termination_message_policy': None,
'tty': None,
'volume_devices': None,
'volume_mounts': [],
'working_dir': None}],
'dns_config': None,
'dns_policy': None,
'enable_service_links': None,
'host_aliases': None,
'host_ipc': None,
'host_network': False,
'host_pid': None,
'hostname': None,
'image_pull_secrets': [],
'init_containers': None,
'node_name': None,
'node_selector': None,
'preemption_policy': None,
'priority': None,
'priority_class_name': None,
'readiness_gates': None,
'restart_policy': None,
'runtime_class_name': None,
'scheduler_name': None,
'security_context': None,
'service_account': None,
'service_account_name': None,
'share_process_namespace': None,
'subdomain': None,
'termination_grace_period_seconds': None,
'tolerations': None,
'volumes': []},
'status': None}, None)
[2021-01-27 10:11:15,984] {scheduler_job.py:1193} INFO - Executor reports execution of test.monthly.weekly.trigger.test_task execution_date=2021-01-26 16:58:38.208907+00:00 exited with status queued for try_number 1
[2021-01-27 10:11:15,984] {scheduler_job.py:1193} INFO - Executor reports execution of test.monthly.weekly.trigger.test_task execution_date=2021-01-27 10:11:06.043581+00:00 exited with status queued for try_number 1
[2021-01-27 10:11:15,987] {kubernetes_executor.py:147} INFO - Event: testmonthlyweeklytriggertesttask-b2dda5ecac3c44b9870e38a018c4c421 had an event of type ADDED
[2021-01-27 10:11:15,988] {kubernetes_executor.py:202} INFO - Event: testmonthlyweeklytriggertesttask-b2dda5ecac3c44b9870e38a018c4c421 Pending
[2021-01-27 10:11:15,992] {kubernetes_executor.py:147} INFO - Event: testmonthlyweeklytriggertesttask-b2dda5ecac3c44b9870e38a018c4c421 had an event of type MODIFIED
[2021-01-27 10:11:15,992] {kubernetes_executor.py:202} INFO - Event: testmonthlyweeklytriggertesttask-b2dda5ecac3c44b9870e38a018c4c421 Pending
[2021-01-27 10:11:15,996] {kubernetes_executor.py:147} INFO - Event: testmonthlyweeklytriggertesttask-b2dda5ecac3c44b9870e38a018c4c421 had an event of type MODIFIED
[2021-01-27 10:11:15,996] {kubernetes_executor.py:202} INFO - Event: testmonthlyweeklytriggertesttask-b2dda5ecac3c44b9870e38a018c4c421 Pending
```
Seems to be related to this commit: https://github.com/apache/airflow/commit/3ca11eb9b02a2c2591292fd6b76e0e98b8f22656
In `airflow/jobs/scheduler_job.py`, `executor.job_id` is set but not for `airflow/jobs/backfill_job.py`. The same can be observed in `airflow/www/views.py` too.
So the solution here would be to give an arbitrary scheduler_job_id "a.k.a. 'backfill' or 'manual'" For these two use-cases.
@yjwong @ClassyLion would either of you be interested in taking this on? I'd be glad to help you get set up with a dev environment/take you through the PR process (no problem if you don't have the cycles but this seems like a pretty great first ticket :) ).
> So the solution here would be to give an arbitrary scheduler_job_id "a.k.a. 'backfill' or 'manual'" For these two use-cases.
>
> @yjwong @ClassyLion would either of you be interested in taking this on? I'd be glad to help you get set up with a dev environment/take you through the PR process (no problem if you don't have the cycles but this seems like a pretty great first ticket :) ).
I can say that I am interested and I'm up for it... At least to try it.
@ClassyLion Awesome! Glad to help you get set up :).
So here is the documentation on getting a dev environment set up https://github.com/apache/airflow/blob/master/CONTRIBUTORS_QUICK_START.rst
Please let me know if you run into any roadbumps (would be good for me to know so I can fix them)
After that I'm glad to jump on a zoom to help or feel free to comment here and I can answer any questions :)
Hey, @ClassyLion I don't mean to poach this one and I would be happy to collaborate, but this is holding up our deployment so I thought I would get started. @dimberman PR #14160 adds the hardcoded `job_id`'s, but I could use some guidance on how best to add test coverage. i.e. I'm still getting familiar with the testing landscape and I'm not sure if it would make more sense to add tests to an existing file or if this is new territory.
Hey, @MaxTaggart. I understand how it is. I managed to fully setup the dev environment on my side and made some attempts at hardcoding the values, but it didn't work for me. Also, dayjob takes requires attention as well, resulting in me not being able dedicate as much time as I would want to.
Thank you @MaxTaggart.
Here is a test that should work for the BackFillJob
```
def test_backfill_has_job_id(self):
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
executor = MockExecutor(parallelism=16)
job = BackfillJob(
executor=executor,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True,
)
job.run()
assert executor.job_id != None
```
and here is one that should work for test_views.py
```
@mock.patch('airflow.executors.executor_loader.ExecutorLoader.get_default_executor')
def test_run_executor_has_job_id(self, get_default_executor_function):
executor = CeleryExecutor()
executor.heartbeat = lambda: True
get_default_executor_function.return_value = executor
task_id = 'runme_0'
form = dict(
task_id=task_id,
dag_id="example_bash_operator",
ignore_all_deps="false",
ignore_ti_state="false",
execution_date=self.EXAMPLE_DAG_DEFAULT_DATE,
origin='/home',
)
resp = self.client.post('run', data=form, follow_redirects=True)
assert executor.job_id !=None
```
Please let me know if those work as expected.
@ClassyLion Sorry this one didn't work out but now that you're set up if you wanna pick any of the "first issue" tagged issues for the project I'm still glad to help you get a commit in :)
@dimberman Thanks for those tests, they are passing now and I pushed a new commit to that PR that includes them. Just out of curiosity, do we not need to create a unique `job_id` for each backfill/manual job? (Instead of hardcoding a constant)
@MaxTaggart the job_id is mostly to identify which scheduler launched the job. With how Scheduler HA works in 2.0, once the tasks launches, the other schedulers should be able to adopt the orphan task. No unique names needed :)
@MaxTaggart you might need to rebase from master to get tests passing
@dimberman It looks like PR #14160 is getting hung up on a language check that doesn't like "WhiteListRoundRobin" from the cassandra hook in `airflow/providers/apache/cassandra/hooks/cassandra.py`. What's weird is that the file should be excluded from that check since it is in the exclude list in `.pre-commit-config.yaml:317`:
``` yaml
- id: language-matters
language: pygrep
name: Check for language that we do not accept as community
description: Please use "deny_list" or "allow_list" instead.
entry: "(?i)(black|white)[_-]?list"
pass_filenames: true
exclude: >
(?x)
^airflow/providers/apache/cassandra/hooks/cassandra.py$|
^airflow/providers/apache/hive/operators/hive_stats.py$|
^airflow/providers/apache/hive/.*PROVIDER_CHANGES_*|
^airflow/providers/apache/hive/.*README.md$|
^tests/providers/apache/cassandra/hooks/test_cassandra.py$|
^docs/apache-airflow-providers-apache-cassandra/connections/cassandra.rst$|
^docs/apache-airflow-providers-apache-hive/commits.rst$|git|
^CHANGELOG.txt$
```
Any ideas?
@MaxTaggart It is complaining about the `tags` file you added, not cassandra.py itself. You probably didn't mean to commit the tags file anyway :)
Aha, good call, I did not. Embarassing...
Alright @dimberman or @ashb, I need to raise the flag again. The CI/CD is now failing in two spots. I can't find any error messages and I tried re-running it after catching up with `master` but it still isn't passing. These are the checks that are failing:
1. MySQL8, Py3.6: Always Core Other API CLI Providers WWW Integration Heisentests
2. MySQL8, Py3.8: Always Core Other API CLI Providers WWW Integration Heisentests
Is it possible that the `BackFillJob` test is missing a mark, or is in the wrong section?
I'll take a look in my morning.
Hey @ashb, have you had a chance to take a look at this? I'm also happy to do more digging if you have any hunches about what might be going wrong.
I also encounter this problem, but curious about another questions, why load same DAG three times before the exception happen, and the last two of loaded DAG path seems incorrect.
```
[2021-02-19 03:35:50,204] {dagbag.py:413} DEBUG - Loaded DAG <DAG: etl_dag1>
[2021-02-19 03:35:50,205] {dagbag.py:287} DEBUG - Importing /home/airflow/dags/..data/code-dag.py
[2021-02-19 03:35:50,222] {dagbag.py:413} DEBUG - Loaded DAG <DAG: etl_dag1>
[2021-02-19 03:35:50,223] {dagbag.py:287} DEBUG - Importing /home/airflow/dags/..2021_02_19_03_05_19.306169647/code-dag.py
[2021-02-19 03:35:50,239] {dagbag.py:413} DEBUG - Loaded DAG <DAG: etl_dag1>
```
```
airflow@webserver-6b975954d-pf6h8:/opt/airflow$ airflow dags backfill etl_dag1 -s 2020-01-20 -e 2020-01-21
[2021-02-19 03:35:48,536] {settings.py:210} DEBUG - Setting up DB connection pool (PID 214)
[2021-02-19 03:35:48,537] {settings.py:281} DEBUG - settings.prepare_engine_args(): Using pool settings. pool_size=5, max_overflow=10, pool_recycle=1800, pid=214
[2021-02-19 03:35:48,636] {cli_action_loggers.py:40} DEBUG - Adding <function default_action_log at 0x7f7b77054320> to pre execution callback
[2021-02-19 03:35:50,016] {cli_action_loggers.py:66} DEBUG - Calling callbacks: [<function default_action_log at 0x7f7b77054320>]
/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/commands/dag_command.py:62 PendingDeprecationWarning: --ignore-first-depends-on-past is deprecated as the value is always set to True
[2021-02-19 03:35:50,050] {dagbag.py:448} INFO - Filling up the DagBag from /home/airflow/dags
[2021-02-19 03:35:50,051] {dagbag.py:287} DEBUG - Importing /home/airflow/dags/code-dag.py
/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/cncf/kubernetes/backcompat/backwards_compat_converters.py:26 DeprecationWarning: This module is deprecated. Please use `kubernetes.client.models.V1Volume`.
/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/cncf/kubernetes/backcompat/backwards_compat_converters.py:27 DeprecationWarning: This module is deprecated. Please use `kubernetes.client.models.V1VolumeMount`.
[2021-02-19 03:35:50,204] {dagbag.py:413} DEBUG - Loaded DAG <DAG: etl_dag1>
[2021-02-19 03:35:50,205] {dagbag.py:287} DEBUG - Importing /home/airflow/dags/..data/code-dag.py
[2021-02-19 03:35:50,222] {dagbag.py:413} DEBUG - Loaded DAG <DAG: etl_dag1>
[2021-02-19 03:35:50,223] {dagbag.py:287} DEBUG - Importing /home/airflow/dags/..2021_02_19_03_05_19.306169647/code-dag.py
[2021-02-19 03:35:50,239] {dagbag.py:413} DEBUG - Loaded DAG <DAG: etl_dag1>
[2021-02-19 03:35:50,240] {executor_loader.py:82} DEBUG - Loading core executor: KubernetesExecutor
[2021-02-19 03:35:50,398] {kubernetes_executor.py:473} INFO - Start Kubernetes executor
[2021-02-19 03:35:50,415] {cli_action_loggers.py:84} DEBUG - Calling callbacks: []
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/cli.py", line 89, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/commands/dag_command.py", line 116, in dag_backfill
run_backwards=args.run_backwards,
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dag.py", line 1706, in run
job.run()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/base_job.py", line 237, in run
self._execute()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 65, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/backfill_job.py", line 788, in _execute
executor.start()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/executors/kubernetes_executor.py", line 475, in start
raise AirflowException("Could not get scheduler_job_id")
airflow.exceptions.AirflowException: Could not get scheduler_job_id
[2021-02-19 03:35:50,429] {settings.py:292} DEBUG - Disposing DB connection pool (PID 214)
```
Is it possible to implement this fix in deployment?
Hi all,
Thank you for investigating into this. May I ask, when can we expect to have this fix released? Because it is preventing us from doing a manual backfill.
> Hi all,
> Thank you for investigating into this. May I ask, when can we expect to have this fix released? Because it is preventing us from doing a manual backfill.
The fix will be available in Airflow 2.0.2 -- which should be released in a week or so
> > Hi all,
> > Thank you for investigating into this. May I ask, when can we expect to have this fix released? Because it is preventing us from doing a manual backfill.
>
> The fix will be available in Airflow 2.0.2 -- which should be released in a week or so
Hi @kaxil, Thanks for getting back. In this case we will wait a few more weeks 😉
We're actually running into this issue just running the `CeleryKubernetesExecutor` without doing backfill at all. Same issue as airflow-helm/charts#114.
@dimberman unless I'm mistaken, it appears that although @MaxTaggart fixed the backfill issue related to this issue, the run command is still throwing the original error in 2.0.2.
Would adding `job_id = 'manual'` in airflow/cli/commands/task_command.py fix the issue you think? I noticed it doesn't look it job_id getting set when using the Kubernetes Executor.
```
def _run_task_by_executor(args, dag, ti):
"""
Sends the task to the executor for execution. This can result in the task being started by another host
if the executor implementation does
"""
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
with create_session() as session:
pickle = DagPickle(dag)
session.add(pickle)
pickle_id = pickle.id
# TODO: This should be written to a log
print(f'Pickled dag {dag} as pickle_id: {pickle_id}')
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = ExecutorLoader.get_default_executor()
executor.job_id = 'manual'
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
)
executor.heartbeat()
executor.end()
```
If so, wondering how i should test it. Happy to take this on. | 2021-05-27T08:00:16Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.8/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/www/views.py", line 1366, in run
executor.start()
File "/usr/local/lib/python3.8/site-packages/airflow/executors/kubernetes_executor.py", line 493, in start
raise AirflowException("Could not get scheduler_job_id")
airflow.exceptions.AirflowException: Could not get scheduler_job_id
| 2,382 |
|||
apache/airflow | apache__airflow-16118 | 6736290ca3ca31223717825be0ae3625cf7d214c | diff --git a/airflow/utils/log/secrets_masker.py b/airflow/utils/log/secrets_masker.py
--- a/airflow/utils/log/secrets_masker.py
+++ b/airflow/utils/log/secrets_masker.py
@@ -16,6 +16,7 @@
# under the License.
"""Mask sensitive information from logs"""
import collections
+import io
import logging
import re
from typing import TYPE_CHECKING, Iterable, Optional, Set, TypeVar, Union
@@ -27,6 +28,10 @@
RedactableItem = TypeVar('RedactableItem')
+
+log = logging.getLogger(__name__)
+
+
DEFAULT_SENSITIVE_FIELDS = frozenset(
{
'password',
@@ -173,24 +178,36 @@ def redact(self, item: "RedactableItem", name: str = None) -> "RedactableItem":
is redacted.
"""
- if name and should_hide_value_for_key(name):
- return self._redact_all(item)
-
- if isinstance(item, dict):
- return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()}
- elif isinstance(item, str):
- if self.replacer:
- # We can't replace specific values, but the key-based redacting
- # can still happen, so we can't short-circuit, we need to walk
- # the structure.
- return self.replacer.sub('***', item)
- return item
- elif isinstance(item, (tuple, set)):
- # Turn set in to tuple!
- return tuple(self.redact(subval) for subval in item)
- elif isinstance(item, Iterable):
- return list(self.redact(subval) for subval in item)
- else:
+ try:
+ if name and should_hide_value_for_key(name):
+ return self._redact_all(item)
+
+ if isinstance(item, dict):
+ return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()}
+ elif isinstance(item, str):
+ if self.replacer:
+ # We can't replace specific values, but the key-based redacting
+ # can still happen, so we can't short-circuit, we need to walk
+ # the structure.
+ return self.replacer.sub('***', item)
+ return item
+ elif isinstance(item, (tuple, set)):
+ # Turn set in to tuple!
+ return tuple(self.redact(subval) for subval in item)
+ elif isinstance(item, io.IOBase):
+ return item
+ elif isinstance(item, Iterable):
+ return list(self.redact(subval) for subval in item)
+ else:
+ return item
+ except Exception as e: # pylint: disable=broad-except
+ log.warning(
+ "Unable to redact %r, please report this via <https://github.com/apache/airflow/issues>. "
+ "Error was: %s: %s",
+ item,
+ type(e).__name__,
+ str(e),
+ )
return item
# pylint: enable=too-many-return-statements
| Secret masking fails on io objects
**Apache Airflow version**: 2.1.0
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): N/A
**Environment**: *NIX
- **Cloud provider or hardware configuration**: N/A
- **OS** (e.g. from /etc/os-release): N/A
- **Kernel** (e.g. `uname -a`): N/A
- **Install tools**:
- **Others**:
**What happened**:
Due to the new secrets masker, logging will fail when an IO object is passed to a logging call.
**What you expected to happen**:
Logging should succeed when an IO object is passed to the logging cal.
**How to reproduce it**:
Sample DAG:
```python
import logging
from datetime import datetime
from airflow import DAG
from airflow.operators.python import PythonOperator
log = logging.getLogger(__name__)
def log_io():
file = open("/tmp/foo", "w")
log.info("File: %s", file)
# Create the DAG -----------------------------------------------------------------------
dag = DAG(
dag_id="Test_Log_IO",
schedule_interval=None,
catchup=False,
default_args={
"owner": "madison.swain-bowden",
"depends_on_past": False,
"start_date": datetime(2021, 5, 4),
},
)
with dag:
PythonOperator(
task_id="log_io",
python_callable=log_io,
)
```
Logging that occurs when run on Airflow (task subsequently fails):
```
[2021-05-25 11:27:08,080] {logging_mixin.py:104} INFO - Running <TaskInstance: Test_Log_IO.log_io 2021-05-25T18:25:17.679660+00:00 [running]> on host Madisons-MacBook-Pro
[2021-05-25 11:27:08,137] {taskinstance.py:1280} INFO - Exporting the following env vars:
AIRFLOW_CTX_DAG_OWNER=madison.swain-bowden
AIRFLOW_CTX_DAG_ID=Test_Log_IO
AIRFLOW_CTX_TASK_ID=log_io
AIRFLOW_CTX_EXECUTION_DATE=2021-05-25T18:25:17.679660+00:00
AIRFLOW_CTX_DAG_RUN_ID=manual__2021-05-25T18:25:17.679660+00:00
[2021-05-25 11:27:08,138] {taskinstance.py:1481} ERROR - Task failed with exception
Traceback (most recent call last):
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1137, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/operators/python.py", line 150, in execute
return_value = self.execute_callable()
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/operators/python.py", line 161, in execute_callable
return self.python_callable(*self.op_args, **self.op_kwargs)
File "/Users/madison/git/airflow-dags/ookla/dags/Test_Log_IO/log_io.py", line 13, in log_io
log.info("File: %s", file)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 1446, in info
self._log(INFO, msg, args, **kwargs)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 1589, in _log
self.handle(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 1599, in handle
self.callHandlers(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 1661, in callHandlers
hdlr.handle(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 948, in handle
rv = self.filter(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 806, in filter
result = f.filter(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 157, in filter
record.__dict__[k] = self.redact(v)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 205, in redact
return list(self.redact(subval) for subval in item)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 205, in <genexpr>
return list(self.redact(subval) for subval in item)
io.UnsupportedOperation: not readable
[2021-05-25 11:27:08,145] {taskinstance.py:1524} INFO - Marking task as FAILED. dag_id=Test_Log_IO, task_id=log_io, execution_date=20210525T182517, start_date=20210525T182707, end_date=20210525T182708
[2021-05-25 11:27:08,197] {local_task_job.py:151} INFO - Task exited with return code 1
```
**Anything else we need to know**:
If I set the value defined here to `False`, the task completes successfully and the line is logged appropriately: https://github.com/apache/airflow/blob/2.1.0/airflow/cli/commands/task_command.py#L205
Example output (when set to `False`):
```
[2021-05-25 11:48:54,185] {logging_mixin.py:104} INFO - Running <TaskInstance: Test_Log_IO.log_io 2021-05-25T18:48:45.911082+00:00 [running]> on host Madisons-MacBook-Pro
[2021-05-25 11:48:54,262] {taskinstance.py:1280} INFO - Exporting the following env vars:
AIRFLOW_CTX_DAG_OWNER=madison.swain-bowden
AIRFLOW_CTX_DAG_ID=Test_Log_IO
AIRFLOW_CTX_TASK_ID=log_io
AIRFLOW_CTX_EXECUTION_DATE=2021-05-25T18:48:45.911082+00:00
AIRFLOW_CTX_DAG_RUN_ID=manual__2021-05-25T18:48:45.911082+00:00
[2021-05-25 11:48:54,264] {log_io.py:13} INFO - File: <_io.TextIOWrapper name='/tmp/foo' mode='w' encoding='UTF-8'>
[2021-05-25 11:48:54,264] {python.py:151} INFO - Done. Returned value was: None
[2021-05-25 11:48:54,274] {taskinstance.py:1184} INFO - Marking task as SUCCESS. dag_id=Test_Log_IO, task_id=log_io, execution_date=20210525T184845, start_date=20210525T184854, end_date=20210525T184854
[2021-05-25 11:48:54,305] {taskinstance.py:1245} INFO - 0 downstream tasks scheduled from follow-on schedule check
[2021-05-25 11:48:54,339] {local_task_job.py:151} INFO - Task exited with return code 0
```
Unfortunately the logging that caused this problem for me originally is being done by a third party library, so I can't alter the way this works on our end.
| Cc: @ashb | 2021-05-27T14:54:02Z | [] | [] |
Traceback (most recent call last):
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1137, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/operators/python.py", line 150, in execute
return_value = self.execute_callable()
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/operators/python.py", line 161, in execute_callable
return self.python_callable(*self.op_args, **self.op_kwargs)
File "/Users/madison/git/airflow-dags/ookla/dags/Test_Log_IO/log_io.py", line 13, in log_io
log.info("File: %s", file)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 1446, in info
self._log(INFO, msg, args, **kwargs)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 1589, in _log
self.handle(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 1599, in handle
self.callHandlers(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 1661, in callHandlers
hdlr.handle(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 948, in handle
rv = self.filter(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/logging/__init__.py", line 806, in filter
result = f.filter(record)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 157, in filter
record.__dict__[k] = self.redact(v)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 205, in redact
return list(self.redact(subval) for subval in item)
File "/Users/madison/programs/anaconda3/envs/ookla-airflow/lib/python3.9/site-packages/airflow/utils/log/secrets_masker.py", line 205, in <genexpr>
return list(self.redact(subval) for subval in item)
io.UnsupportedOperation: not readable
| 2,383 |
|||
apache/airflow | apache__airflow-16345 | ce28bc52a8d477e137f9293ce0f3f90d4e291883 | diff --git a/airflow/models/serialized_dag.py b/airflow/models/serialized_dag.py
--- a/airflow/models/serialized_dag.py
+++ b/airflow/models/serialized_dag.py
@@ -280,7 +280,7 @@ def get_last_updated_datetime(cls, dag_id: str, session: Session = None) -> Opti
@classmethod
@provide_session
- def get_max_last_updated_datetime(cls, session: Session = None) -> datetime:
+ def get_max_last_updated_datetime(cls, session: Session = None) -> Optional[datetime]:
"""
Get the maximum date when any DAG was last updated in serialized_dag table
@@ -291,7 +291,7 @@ def get_max_last_updated_datetime(cls, session: Session = None) -> datetime:
@classmethod
@provide_session
- def get_latest_version_hash(cls, dag_id: str, session: Session = None) -> str:
+ def get_latest_version_hash(cls, dag_id: str, session: Session = None) -> Optional[str]:
"""
Get the latest DAG version for a given DAG ID.
@@ -299,7 +299,7 @@ def get_latest_version_hash(cls, dag_id: str, session: Session = None) -> str:
:type dag_id: str
:param session: ORM Session
:type session: Session
- :return: DAG Hash
+ :return: DAG Hash, or None if the DAG is not found
:rtype: str | None
"""
return session.query(cls.dag_hash).filter(cls.dag_id == dag_id).scalar()
diff --git a/airflow/www/views.py b/airflow/www/views.py
--- a/airflow/www/views.py
+++ b/airflow/www/views.py
@@ -4093,7 +4093,8 @@ def list(self):
title = "DAG Dependencies"
if timezone.utcnow() > self.last_refresh + self.refresh_interval:
- if SerializedDagModel.get_max_last_updated_datetime() > self.last_refresh:
+ max_last_updated = SerializedDagModel.get_max_last_updated_datetime()
+ if max_last_updated is None or max_last_updated > self.last_refresh:
self._calculate_graph()
self.last_refresh = timezone.utcnow()
| error on click in dag-dependencies - airflow 2.1
Python version: 3.7.9
Airflow version: 2.1.0
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/decorators.py", line 97, in view_func
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/views.py", line 4003, in list
if SerializedDagModel.get_max_last_updated_datetime() > self.last_refresh:
TypeError: '>' not supported between instances of 'NoneType' and 'datetime.datetime'
**What you expected to happen**:
See the dags dependencies
**What do you think went wrong?**
It's happen only if I don't have any dag yet.
**How to reproduce it**:
With any dag created click in
menu -> browser -> dag-dependencies
<!---
| Thanks for opening your first issue here! Be sure to follow the issue template!
@jcmartins thanks for submitting. Do you want to submit a fix as well?
| 2021-06-09T03:45:18Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/decorators.py", line 97, in view_func
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/views.py", line 4003, in list
if SerializedDagModel.get_max_last_updated_datetime() > self.last_refresh:
TypeError: '>' not supported between instances of 'NoneType' and 'datetime.datetime'
| 2,385 |
|||
apache/airflow | apache__airflow-16383 | e72e5295fd5e710599bc0ecc9a70b0b3b5728f38 | diff --git a/airflow/utils/json.py b/airflow/utils/json.py
--- a/airflow/utils/json.py
+++ b/airflow/utils/json.py
@@ -17,6 +17,7 @@
# under the License.
from datetime import date, datetime
+from decimal import Decimal
import numpy as np
from flask.json import JSONEncoder
@@ -37,12 +38,19 @@ def __init__(self, *args, **kwargs):
self.default = self._default
@staticmethod
- def _default(obj):
+ def _default(obj): # pylint: disable=too-many-return-statements
"""Convert dates and numpy objects in a json serializable format."""
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
+ elif isinstance(obj, Decimal):
+ _, _, exponent = obj.as_tuple()
+ if exponent >= 0: # No digits after the decimal point.
+ return int(obj)
+ # Technically lossy due to floating point errors, but the best we
+ # can do without implementing a custom encode function.
+ return float(obj)
elif isinstance(
obj,
(
| Airflow Stable REST API [GET api/v1/pools] issue
**Apache Airflow version**: v2.0.2
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): N/A
**Environment**: AWS
- **Cloud provider or hardware configuration**: AWS EC2 Instance
- **OS** (e.g. from /etc/os-release): Ubuntu Server 20.04 LTS
- **Kernel** (e.g. `uname -a`): Linux ip-172-31-23-31 5.4.0-1048-aws #50-Ubuntu SMP Mon May 3 21:44:17 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux
- **Install tools**:
- **Others**: Python version: 3.8.5
**What happened**: Using Airflow Stable REST API [GET api/v1/pools] results in Ooops! This only occurs when the pools have "Running Slots". If no tasks are running and the slots are zero, then it works just fine.
<!-- (please include exact error messages if you can) -->
Something bad has happened.
Please consider letting us know by creating a bug report using GitHub.
Python version: 3.8.5
Airflow version: 2.0.2
Node: ip-172-31-23-31.ec2.internal
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/decorator.py", line 48, in wrapper
response = function(request)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/uri_parsing.py", line 144, in wrapper
response = function(request)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/validation.py", line 384, in wrapper
return function(request)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/response.py", line 104, in wrapper
return _wrapper(request, response)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/response.py", line 89, in _wrapper
self.operation.api.get_connexion_response(response, self.mimetype)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/abstract.py", line 351, in get_connexion_response
response = cls._response_from_handler(response, mimetype)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/abstract.py", line 331, in _response_from_handler
return cls._build_response(mimetype=mimetype, data=response, extra_context=extra_context)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/flask_api.py", line 173, in _build_response
data, status_code, serialized_mimetype = cls._prepare_body_and_status_code(data=data, mimetype=mimetype, status_code=status_code, extra_context=extra_context)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/abstract.py", line 403, in _prepare_body_and_status_code
body, mimetype = cls._serialize_data(data, mimetype)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/flask_api.py", line 190, in _serialize_data
body = cls.jsonifier.dumps(data)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/jsonifier.py", line 44, in dumps
return self.json.dumps(data, **kwargs) + '\n'
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/json/__init__.py", line 211, in dumps
rv = _json.dumps(obj, **kwargs)
File "/usr/lib/python3.8/json/__init__.py", line 234, in dumps
return cls(
File "/usr/lib/python3.8/json/encoder.py", line 201, in encode
chunks = list(chunks)
File "/usr/lib/python3.8/json/encoder.py", line 431, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/usr/lib/python3.8/json/encoder.py", line 405, in _iterencode_dict
yield from chunks
File "/usr/lib/python3.8/json/encoder.py", line 325, in _iterencode_list
yield from chunks
File "/usr/lib/python3.8/json/encoder.py", line 405, in _iterencode_dict
yield from chunks
File "/usr/lib/python3.8/json/encoder.py", line 438, in _iterencode
o = _default(o)
File "/home/tool/gto_env/lib/python3.8/site-packages/airflow/utils/json.py", line 74, in _default
raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable")
TypeError: Object of type 'Decimal' is not JSON serializable
**What you expected to happen**: I expect the appropriate JSON response
<!-- What do you think went wrong? -->
**How to reproduce it**:
On an Airflow instance, run some tasks and while the tasks are running query the pools via the API. NOTE: That you have to query the specific pool that has tasks running, if you avoid the pool using limit and/or offset then the issue will not occur. You must try to return a pool with running_slots > 0
**Anything else we need to know**:
Not really
| Thanks for opening your first issue here! Be sure to follow the issue template!
| 2021-06-11T10:50:45Z | [] | [] |
Traceback (most recent call last):
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/decorator.py", line 48, in wrapper
response = function(request)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/uri_parsing.py", line 144, in wrapper
response = function(request)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/validation.py", line 384, in wrapper
return function(request)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/response.py", line 104, in wrapper
return _wrapper(request, response)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/decorators/response.py", line 89, in _wrapper
self.operation.api.get_connexion_response(response, self.mimetype)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/abstract.py", line 351, in get_connexion_response
response = cls._response_from_handler(response, mimetype)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/abstract.py", line 331, in _response_from_handler
return cls._build_response(mimetype=mimetype, data=response, extra_context=extra_context)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/flask_api.py", line 173, in _build_response
data, status_code, serialized_mimetype = cls._prepare_body_and_status_code(data=data, mimetype=mimetype, status_code=status_code, extra_context=extra_context)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/abstract.py", line 403, in _prepare_body_and_status_code
body, mimetype = cls._serialize_data(data, mimetype)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/apis/flask_api.py", line 190, in _serialize_data
body = cls.jsonifier.dumps(data)
File "/home/tool/gto_env/lib/python3.8/site-packages/connexion/jsonifier.py", line 44, in dumps
return self.json.dumps(data, **kwargs) + '\n'
File "/home/tool/gto_env/lib/python3.8/site-packages/flask/json/__init__.py", line 211, in dumps
rv = _json.dumps(obj, **kwargs)
File "/usr/lib/python3.8/json/__init__.py", line 234, in dumps
return cls(
File "/usr/lib/python3.8/json/encoder.py", line 201, in encode
chunks = list(chunks)
File "/usr/lib/python3.8/json/encoder.py", line 431, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/usr/lib/python3.8/json/encoder.py", line 405, in _iterencode_dict
yield from chunks
File "/usr/lib/python3.8/json/encoder.py", line 325, in _iterencode_list
yield from chunks
File "/usr/lib/python3.8/json/encoder.py", line 405, in _iterencode_dict
yield from chunks
File "/usr/lib/python3.8/json/encoder.py", line 438, in _iterencode
o = _default(o)
File "/home/tool/gto_env/lib/python3.8/site-packages/airflow/utils/json.py", line 74, in _default
raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable")
TypeError: Object of type 'Decimal' is not JSON serializable
| 2,387 |
|||
apache/airflow | apache__airflow-16393 | ce28bc52a8d477e137f9293ce0f3f90d4e291883 | diff --git a/airflow/models/serialized_dag.py b/airflow/models/serialized_dag.py
--- a/airflow/models/serialized_dag.py
+++ b/airflow/models/serialized_dag.py
@@ -313,18 +313,12 @@ def get_dag_dependencies(cls, session: Session = None) -> Dict[str, List['DagDep
:param session: ORM Session
:type session: Session
"""
- dependencies = {}
-
if session.bind.dialect.name in ["sqlite", "mysql"]:
- for row in session.query(cls.dag_id, func.json_extract(cls.data, "$.dag.dag_dependencies")).all():
- dependencies[row[0]] = [DagDependency(**d) for d in json.loads(row[1])]
+ query = session.query(cls.dag_id, func.json_extract(cls.data, "$.dag.dag_dependencies"))
+ iterator = ((dag_id, json.loads(deps_data) if deps_data else []) for dag_id, deps_data in query)
elif session.bind.dialect.name == "mssql":
- for row in session.query(cls.dag_id, func.json_query(cls.data, "$.dag.dag_dependencies")).all():
- dependencies[row[0]] = [DagDependency(**d) for d in json.loads(row[1])]
+ query = session.query(cls.dag_id, func.json_query(cls.data, "$.dag.dag_dependencies"))
+ iterator = ((dag_id, json.loads(deps_data) if deps_data else []) for dag_id, deps_data in query)
else:
- for row in session.query(
- cls.dag_id, func.json_extract_path(cls.data, "dag", "dag_dependencies")
- ).all():
- dependencies[row[0]] = [DagDependency(**d) for d in row[1]]
-
- return dependencies
+ iterator = session.query(cls.dag_id, func.json_extract_path(cls.data, "dag", "dag_dependencies"))
+ return {dag_id: [DagDependency(**d) for d in (deps_data or [])] for dag_id, deps_data in iterator}
| exception when root account goes to http://airflow.ordercapital.com/dag-dependencies
Happens every time
Python version: 3.8.10
Airflow version: 2.1.0
Node: airflow-web-55974db849-5bdxq
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/decorators.py", line 97, in view_func
return f(*args, **kwargs)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/views.py", line 4004, in list
self._calculate_graph()
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/views.py", line 4023, in _calculate_graph
for dag, dependencies in SerializedDagModel.get_dag_dependencies().items():
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/models/serialized_dag.py", line 321, in get_dag_dependencies
dependencies[row[0]] = [DagDependency(**d) for d in row[1]]
TypeError: 'NoneType' object is not iterable
| Thanks for opening your first issue here! Be sure to follow the issue template!
I'm guessing this might be related to #16328. Different traceback, but might be the same root cause?
Actually no. The error message suggests `row[1]` is `None`, which is a value returned by Postgres’s `json_extract_path`:
https://github.com/apache/airflow/blob/304e174674ff6921cb7ed79c0158949b50eff8fe/airflow/models/serialized_dag.py#L318-L321
I *think* this means `data["dag"]["dag_dependencies"]` is not found or None? This is a new field in 2.1.0, but I think we didn’t migrate previously-serialised DAG rows correctly. Reading its original implementation (#13199), I think we should either add a data migration to backfill previously-serialised rows, or just pretend those DAGs don’t have dependencies.
@uranusjr Just assume no dependencies -- the serialized dag _should_ get updated quickly enough by the scheduler anyway.
Ohh, I wonder if this happens if _no_ dags have any dependencies.
@apyshkin What database are you using? | 2021-06-11T14:33:52Z | [] | [] |
Traceback (most recent call last):
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/decorators.py", line 97, in view_func
return f(*args, **kwargs)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/views.py", line 4004, in list
self._calculate_graph()
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/www/views.py", line 4023, in _calculate_graph
for dag, dependencies in SerializedDagModel.get_dag_dependencies().items():
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/opt/bitnami/airflow/venv/lib/python3.8/site-packages/airflow/models/serialized_dag.py", line 321, in get_dag_dependencies
dependencies[row[0]] = [DagDependency(**d) for d in row[1]]
TypeError: 'NoneType' object is not iterable
| 2,390 |
|||
apache/airflow | apache__airflow-16415 | 0c80a7d41100bf8d18b661c8286d6056e6d5d2f1 | diff --git a/airflow/models/baseoperator.py b/airflow/models/baseoperator.py
--- a/airflow/models/baseoperator.py
+++ b/airflow/models/baseoperator.py
@@ -559,6 +559,14 @@ def __init__(
if wait_for_downstream:
self.depends_on_past = True
+ if retries is not None and not isinstance(retries, int):
+ try:
+ parsed_retries = int(retries)
+ except (TypeError, ValueError):
+ raise AirflowException(f"'retries' type must be int, not {type(retries).__name__}")
+ self.log.warning("Implicitly converting 'retries' for %s from %r to int", self, retries)
+ retries = parsed_retries
+
self.retries = retries
self.queue = queue
self.pool = Pool.DEFAULT_POOL_NAME if pool is None else pool
| Unable to clear Failed task with retries
**Apache Airflow version**: 2.0.1
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): NA
**Environment**: Windows WSL2 (Ubuntu) Local
- **Cloud provider or hardware configuration**:
- **OS** (e.g. from /etc/os-release): Ubuntu 18.04
- **Kernel** (e.g. `uname -a`): Linux d255bce4dcd5 5.4.72-microsoft-standard-WSL2
- **Install tools**: Docker -compose
- **Others**:
**What happened**:
I have a dag with tasks:
Task1 - Get Date
Task 2 - Get data from Api call (Have set retires to 3)
Task 3 - Load Data
Task 2 had failed after three attempts. I am unable to clear the task Instance and get the below error in UI.
[Dag Code](https://github.com/anilkulkarni87/airflow-docker/blob/master/dags/covidNyDaily.py)
```
Python version: 3.8.7
Airflow version: 2.0.1rc2
Node: d255bce4dcd5
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/airflow/.local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/www/views.py", line 1547, in clear
return self._clear_dag_tis(
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/www/views.py", line 1475, in _clear_dag_tis
count = dag.clear(
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/session.py", line 65, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/dag.py", line 1324, in clear
clear_task_instances(
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 160, in clear_task_instances
ti.max_tries = ti.try_number + task_retries - 1
TypeError: unsupported operand type(s) for +: 'int' and 'str'
```
**What you expected to happen**:
I expected to clear the Task Instance so that the task could be scheduled again.
**How to reproduce it**:
1) Clone the repo link shared above
2) Follow instructions to setup cluster.
3) Change code to enforce error in Task 2
4) Execute and try to clear task instance after three attempts.
![Error pops up when clicked on Clear](https://user-images.githubusercontent.com/10644132/107998258-8e1ee180-6f99-11eb-8442-0c0be5b23478.png)
| I have the same problem with Airflow 2.0.1. I made an update from version 1.10.14 and the clear function is not working anymore. Can we try to fix this issue? I think it's a quite important function.
we also have the same issues on Airflow 1.10.10, Python 3.6. This is happening only on few dags the clear function is not working anymore. Can someone help us try to fix the issues? And it is super important for our Production environments
Isn’t the `retries` value supposed to be an int? The repro above has
```python
@dag.task(default_args={'retries': '2', 'retry_delay': timedelta(minutes=30)})
```
Which is the cause to the exception, if I’m not mistaken.
That said, Airflow should probably be more resilient against user issue like this. Probably set the value to default with a warning?
> Isn’t the `retries` value supposed to be an int? The repro above has
>
> ```python
> @dag.task(default_args={'retries': '2', 'retry_delay': timedelta(minutes=30)})
> ```
>
> Which is the cause to the exception, if I’m not mistaken.
>
> That said, Airflow should probably be more resilient against user issue like this. Probably set the value to default with a warning?
Excellent, it is working for us by changing the str to int at retries. Thanks much for the help !!
@uranusjr And if the retries is an string i think we can try to parse it as integer if we can. | 2021-06-12T15:21:00Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/airflow/.local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/airflow/.local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/www/views.py", line 1547, in clear
return self._clear_dag_tis(
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/www/views.py", line 1475, in _clear_dag_tis
count = dag.clear(
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/session.py", line 65, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/dag.py", line 1324, in clear
clear_task_instances(
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 160, in clear_task_instances
ti.max_tries = ti.try_number + task_retries - 1
TypeError: unsupported operand type(s) for +: 'int' and 'str'
| 2,392 |
|||
apache/airflow | apache__airflow-16491 | e72e5295fd5e710599bc0ecc9a70b0b3b5728f38 | diff --git a/airflow/utils/log/secrets_masker.py b/airflow/utils/log/secrets_masker.py
--- a/airflow/utils/log/secrets_masker.py
+++ b/airflow/utils/log/secrets_masker.py
@@ -111,6 +111,7 @@ class SecretsMasker(logging.Filter):
patterns: Set[str]
ALREADY_FILTERED_FLAG = "__SecretsMasker_filtered"
+ MAX_RECURSION_DEPTH = 5
def __init__(self):
super().__init__()
@@ -156,35 +157,34 @@ def filter(self, record) -> bool:
return True
- def _redact_all(self, item: "RedactableItem") -> "RedactableItem":
- if isinstance(item, dict):
- return {dict_key: self._redact_all(subval) for dict_key, subval in item.items()}
- elif isinstance(item, str):
+ def _redact_all(self, item: "RedactableItem", depth: int) -> "RedactableItem":
+ if depth > self.MAX_RECURSION_DEPTH or isinstance(item, str):
return '***'
+ if isinstance(item, dict):
+ return {dict_key: self._redact_all(subval, depth + 1) for dict_key, subval in item.items()}
elif isinstance(item, (tuple, set)):
# Turn set in to tuple!
- return tuple(self._redact_all(subval) for subval in item)
+ return tuple(self._redact_all(subval, depth + 1) for subval in item)
elif isinstance(item, list):
- return list(self._redact_all(subval) for subval in item)
+ return list(self._redact_all(subval, depth + 1) for subval in item)
else:
return item
# pylint: disable=too-many-return-statements
- def redact(self, item: "RedactableItem", name: str = None) -> "RedactableItem":
- """
- Redact an any secrets found in ``item``, if it is a string.
-
- If ``name`` is given, and it's a "sensitive" name (see
- :func:`should_hide_value_for_key`) then all string values in the item
- is redacted.
-
- """
+ def _redact(self, item: "RedactableItem", name: Optional[str], depth: int) -> "RedactableItem":
+ # Avoid spending too much effort on redacting on deeply nested
+ # structures. This also avoid infinite recursion if a structure has
+ # reference to self.
+ if depth > self.MAX_RECURSION_DEPTH:
+ return item
try:
if name and should_hide_value_for_key(name):
- return self._redact_all(item)
-
+ return self._redact_all(item, depth)
if isinstance(item, dict):
- return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()}
+ return {
+ dict_key: self._redact(subval, name=dict_key, depth=(depth + 1))
+ for dict_key, subval in item.items()
+ }
elif isinstance(item, str):
if self.replacer:
# We can't replace specific values, but the key-based redacting
@@ -194,9 +194,9 @@ def redact(self, item: "RedactableItem", name: str = None) -> "RedactableItem":
return item
elif isinstance(item, (tuple, set)):
# Turn set in to tuple!
- return tuple(self.redact(subval) for subval in item)
+ return tuple(self._redact(subval, name=None, depth=(depth + 1)) for subval in item)
elif isinstance(item, list):
- return list(self.redact(subval) for subval in item)
+ return [self._redact(subval, name=None, depth=(depth + 1)) for subval in item]
else:
return item
# I think this should never happen, but it does not hurt to leave it just in case
@@ -210,8 +210,16 @@ def redact(self, item: "RedactableItem", name: str = None) -> "RedactableItem":
)
return item
- # pylint: enable=too-many-return-statements
+ def redact(self, item: "RedactableItem", name: Optional[str] = None) -> "RedactableItem":
+ """Redact an any secrets found in ``item``, if it is a string.
+ If ``name`` is given, and it's a "sensitive" name (see
+ :func:`should_hide_value_for_key`) then all string values in the item
+ is redacted.
+ """
+ return self._redact(item, name, depth=0)
+
+ # pylint: enable=too-many-return-statements
def add_mask(self, secret: Union[str, dict, Iterable], name: str = None):
"""Add a new secret to be masked to this filter instance."""
if isinstance(secret, dict):
| secrets_masker RecursionError with nested TriggerDagRunOperators
**Apache Airflow version**: 2.1.0
**Environment**: tested on Windows docker-compose envirnoment and on k8s (both with celery executor).
**What happened**:
```
[2021-06-16 07:56:32,682] {taskinstance.py:1481} ERROR - Task failed with exception
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1137, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/operators/trigger_dagrun.py", line 134, in execute
replace_microseconds=False,
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/api/common/experimental/trigger_dag.py", line 123, in trigger_dag
replace_microseconds=replace_microseconds,
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/api/common/experimental/trigger_dag.py", line 48, in _trigger_dag
dag = dag_bag.get_dag(dag_id) # prefetch dag if it is stored serialized
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagbag.py", line 186, in get_dag
self._add_dag_from_db(dag_id=dag_id, session=session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagbag.py", line 252, in _add_dag_from_db
dag = row.dag
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/serialized_dag.py", line 175, in dag
dag = SerializedDAG.from_dict(self.data) # type: Any
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 792, in from_dict
return cls.deserialize_dag(serialized_obj['dag'])
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 716, in deserialize_dag
v = {task["task_id"]: SerializedBaseOperator.deserialize_operator(task) for task in v}
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 716, in <dictcomp>
v = {task["task_id"]: SerializedBaseOperator.deserialize_operator(task) for task in v}
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 493, in deserialize_operator
op_predefined_extra_links = cls._deserialize_operator_extra_links(v)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 600, in _deserialize_operator_extra_links
if _operator_link_class_path in get_operator_extra_links():
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 86, in get_operator_extra_links
_OPERATOR_EXTRA_LINKS.update(ProvidersManager().extra_links_class_names)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers_manager.py", line 400, in extra_links_class_names
self.initialize_providers_manager()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers_manager.py", line 129, in initialize_providers_manager
self._discover_all_providers_from_packages()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers_manager.py", line 151, in _discover_all_providers_from_packages
log.debug("Loading %s from package %s", entry_point, package_name)
File "/usr/local/lib/python3.7/logging/__init__.py", line 1366, in debug
self._log(DEBUG, msg, args, **kwargs)
File "/usr/local/lib/python3.7/logging/__init__.py", line 1514, in _log
self.handle(record)
File "/usr/local/lib/python3.7/logging/__init__.py", line 1524, in handle
self.callHandlers(record)
File "/usr/local/lib/python3.7/logging/__init__.py", line 1586, in callHandlers
hdlr.handle(record)
File "/usr/local/lib/python3.7/logging/__init__.py", line 890, in handle
rv = self.filter(record)
File "/usr/local/lib/python3.7/logging/__init__.py", line 751, in filter
result = f.filter(record)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 157, in filter
record.__dict__[k] = self.redact(v)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
....
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 201, in redact
elif isinstance(item, (tuple, set)):
RecursionError: maximum recursion depth exceeded in __instancecheck__
```
**What you expected to happen**:
I think new masker is not able to handle TriggerDagRunOperator running dag with TriggerDagRunOperator
**How to reproduce it**:
```
from datetime import datetime, timedelta
from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
def pprint(**kwargs):
print(1)
with DAG("test",
catchup=False,
max_active_runs=1,
start_date=datetime(2021, 1, 1),
is_paused_upon_creation=False,
schedule_interval=None) as dag:
task_observe_pea_data = PythonOperator(
task_id="test_task",
python_callable=pprint,
provide_context=True
)
with DAG("test_1",
catchup=False,
max_active_runs=1,
start_date=datetime(2021, 1, 1),
is_paused_upon_creation=False,
schedule_interval=None) as dag:
task_observe_pea_data = TriggerDagRunOperator(
task_id="test_trigger_1",
trigger_dag_id="test"
)
with DAG("test_2",
catchup=False,
max_active_runs=1,
start_date=datetime(2021, 1, 1),
is_paused_upon_creation=False,
schedule_interval=None) as dag:
task_observe_pea_data = TriggerDagRunOperator(
task_id="test_trigger_2",
trigger_dag_id="test_1"
)
```
**Anything else we need to know**:
How often does this problem occur? Every time
I have tried hide_sensitive_var_conn_fields=False but error still occurs.
| Thanks for opening your first issue here! Be sure to follow the issue template!
We should implement some kind of cycle detection in the redaction logic.
Maybe simple max depth of recursion. Somewhat arbitrary but trying to solve it 'properly' might be first - unnecessary and secondly - quite a bit too costly for the logging case. Setting like 2 max depth of recursion should cover the most common cases (dict of lists for example).
(however this particular problem should be fixed by just merged https://github.com/apache/airflow/pull/16424 so it is more of a 'in case' protection.
UPDATE: or maybe not - seems that it is iterating over tuple that causes this particular problem. | 2021-06-16T22:02:07Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1137, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/operators/trigger_dagrun.py", line 134, in execute
replace_microseconds=False,
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/api/common/experimental/trigger_dag.py", line 123, in trigger_dag
replace_microseconds=replace_microseconds,
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/api/common/experimental/trigger_dag.py", line 48, in _trigger_dag
dag = dag_bag.get_dag(dag_id) # prefetch dag if it is stored serialized
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagbag.py", line 186, in get_dag
self._add_dag_from_db(dag_id=dag_id, session=session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagbag.py", line 252, in _add_dag_from_db
dag = row.dag
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/serialized_dag.py", line 175, in dag
dag = SerializedDAG.from_dict(self.data) # type: Any
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 792, in from_dict
return cls.deserialize_dag(serialized_obj['dag'])
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 716, in deserialize_dag
v = {task["task_id"]: SerializedBaseOperator.deserialize_operator(task) for task in v}
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 716, in <dictcomp>
v = {task["task_id"]: SerializedBaseOperator.deserialize_operator(task) for task in v}
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 493, in deserialize_operator
op_predefined_extra_links = cls._deserialize_operator_extra_links(v)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 600, in _deserialize_operator_extra_links
if _operator_link_class_path in get_operator_extra_links():
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/serialization/serialized_objects.py", line 86, in get_operator_extra_links
_OPERATOR_EXTRA_LINKS.update(ProvidersManager().extra_links_class_names)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers_manager.py", line 400, in extra_links_class_names
self.initialize_providers_manager()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers_manager.py", line 129, in initialize_providers_manager
self._discover_all_providers_from_packages()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers_manager.py", line 151, in _discover_all_providers_from_packages
log.debug("Loading %s from package %s", entry_point, package_name)
File "/usr/local/lib/python3.7/logging/__init__.py", line 1366, in debug
self._log(DEBUG, msg, args, **kwargs)
File "/usr/local/lib/python3.7/logging/__init__.py", line 1514, in _log
self.handle(record)
File "/usr/local/lib/python3.7/logging/__init__.py", line 1524, in handle
self.callHandlers(record)
File "/usr/local/lib/python3.7/logging/__init__.py", line 1586, in callHandlers
hdlr.handle(record)
File "/usr/local/lib/python3.7/logging/__init__.py", line 890, in handle
rv = self.filter(record)
File "/usr/local/lib/python3.7/logging/__init__.py", line 751, in filter
result = f.filter(record)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 157, in filter
record.__dict__[k] = self.redact(v)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
....
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in redact
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 203, in <genexpr>
return tuple(self.redact(subval) for subval in item)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/log/secrets_masker.py", line 201, in redact
elif isinstance(item, (tuple, set)):
RecursionError: maximum recursion depth exceeded in __instancecheck__
| 2,394 |
|||
apache/airflow | apache__airflow-17105 | 3a2e162387b5d73f1badda8fcf027fbc2caa0f28 | diff --git a/airflow/models/dag.py b/airflow/models/dag.py
--- a/airflow/models/dag.py
+++ b/airflow/models/dag.py
@@ -75,6 +75,7 @@
from airflow.timetables.simple import NullTimetable, OnceTimetable
from airflow.typing_compat import Literal, RePatternType
from airflow.utils import timezone
+from airflow.utils.dag_cycle_tester import check_cycle
from airflow.utils.dates import cron_presets, date_range as utils_date_range
from airflow.utils.file import correct_maybe_zipped
from airflow.utils.helpers import validate_key
@@ -1959,6 +1960,8 @@ def run(
def cli(self):
"""Exposes a CLI specific to this DAG"""
+ check_cycle(self)
+
from airflow.cli import cli_parser
parser = cli_parser.get_parser(dag_parser=True)
diff --git a/airflow/utils/dag_cycle_tester.py b/airflow/utils/dag_cycle_tester.py
--- a/airflow/utils/dag_cycle_tester.py
+++ b/airflow/utils/dag_cycle_tester.py
@@ -16,15 +16,19 @@
# under the License.
"""DAG Cycle tester"""
from collections import defaultdict, deque
+from typing import TYPE_CHECKING
from airflow.exceptions import AirflowDagCycleException
+if TYPE_CHECKING:
+ from airflow.models import DAG
+
CYCLE_NEW = 0
CYCLE_IN_PROGRESS = 1
CYCLE_DONE = 2
-def test_cycle(dag):
+def test_cycle(dag: "DAG") -> None:
"""
A wrapper function of `check_cycle` for backward compatibility purpose.
New code should use `check_cycle` instead since this function name `test_cycle` starts with 'test_' and
@@ -40,10 +44,10 @@ def test_cycle(dag):
return check_cycle(dag)
-def check_cycle(dag):
- """
- Check to see if there are any cycles in the DAG. Returns False if no cycle found,
- otherwise raises exception.
+def check_cycle(dag: "DAG") -> None:
+ """Check to see if there are any cycles in the DAG.
+
+ :raises AirflowDagCycleException: If cycle is found in the DAG.
"""
# default of int is 0 which corresponds to CYCLE_NEW
visited = defaultdict(int)
| dag.cli should detect DAG cycles
**Description**
I wish `dag.cli()` reported cycles in a task graph.
**Use case / motivation**
We use Airflow (now 2.1.1), with about 40 DAGs authored by many people, with daily changes, and put our DAGs into custom docker image that we deploy with flux.
However, I noticed that a lot of commits from our developers, are a lot of small fixes, because it is tricky to test DAGs locally (especially if one uses plugins, which we don't anymore).
So I wrote a script that does import every dag file, runs it, and calls `dag.cli()`, and I then list all tasks, and run a test --dry_run on each task. That proved to be a super useful script, that can detect a lot of issues (malformed imports, syntax errors, typos in jinja2 templates, uses of unitialized variables, task id name collisions, and so on), before the change is even commited to our git repo, docker image is build, and deployed. Thus making iteration speed faster.
However, I noticed that `dag.cli()` does not detect cycles in a task graph.
Example:
```python3
from pprint import pprint
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.utils.dates import days_ago
def print_context(ds, **kwargs):
"""Print the Airflow context and ds variable from the context."""
pprint(kwargs)
print(ds)
return 'Whatever you return gets printed in the logs'
with DAG(
dag_id="test_dag1",
description="Testing",
schedule_interval="@daily",
catchup=False,
start_date=days_ago(2),
) as dag:
a = PythonOperator(
task_id='print_the_context1',
python_callable=print_context,
)
b = PythonOperator(
task_id='print_the_context2',
python_callable=print_context,
)
a >> b
b >> a
if __name__ == '__main__':
dag.cli()
```
Now running:
```
$ python3 dags/primary/examples/tutorial_cycles.py tasks list
print_the_context1
print_the_context2
$
```
```
$ python3 dags/primary/examples/tutorial_cycles.py tasks test --dry-run print_the_context2 '2021-07-19T00:00:00+0000'
[2021-07-19 10:37:27,513] {baseoperator.py:1263} INFO - Dry run
$
```
No warnings.
When running a dag using a scheduler, it eventually detects a cycle (not sure if on load, or only when executing it, or reaching a specific task), but that is a bit too late.
I wonder if it is possible to make `dag.cli()` detect cycles? It might also be possible to detect cycles even earlier, when adding DAG edges, but that might be too slow to do on every call. However, I am pretty sure dag.cli() could do it efficiently, as it does have a full graph available. (There are well known linear algorithms based on DFS that detect cycles).
Just now, I noticed that there is method `dag.topological_sort()`, that is quite handy, and will detect cycles, so if I add:
```python3
if __name__ == '__main__':
dag.topological_sort()
dag.cli()
```
It does detect a cycle:
```
Traceback (most recent call last):
File "/home/witek/code/airflow/dags/primary/examples/tutorial_cycles.py", line 33, in <module>
print(dag.topological_sort())
File "/home/witek/airflow-testing/venv/lib/python3.9/site-packages/airflow/models/dag.py", line 1119, in topological_sort
raise AirflowException(f"A cyclic dependency occurred in dag: {self.dag_id}")
airflow.exceptions.AirflowException: A cyclic dependency occurred in dag: test_dag1
```
I think it might be useful to add `topological_sort` (and `tree_view`) to be accessible via `dag.cli()`, so the external script can easily detect cycles this way.
I also noticed that calling `dag.treeview()` does not detect cycle. In fact it does not print anything when there is a cycle.
| I think this begs a more general question, should Airflow actively check whether a DAG is actually a DAG (directed acyclic graph)? Currently (from what I know) we don’t actually check this, and the cyclic error happens only when the DAG is actually run.
That's a fair point (regarding what Uranus pointed out), but it does seem like you'd want the user to know that their "DAG" isn't truly valid, right? That way, they don't proceed with something assuming it's correct until it actually hits that error.
Yeah. I think might be worth to check it and yeah - I do not think it is checked (unless I could not find it).
Cycles are checked by `DagBag`.
https://github.com/apache/airflow/blob/3234527284ce01db67ba22c544f71ddaf28fa27e/airflow/models/dagbag.py#L429
In this case of `dag.cli()`, we don't use `DagBag`, so the cycles are not checked.. I think it makes sense to check this in the `dag.cli()` method. +1 | 2021-07-20T09:27:11Z | [] | [] |
Traceback (most recent call last):
File "/home/witek/code/airflow/dags/primary/examples/tutorial_cycles.py", line 33, in <module>
print(dag.topological_sort())
File "/home/witek/airflow-testing/venv/lib/python3.9/site-packages/airflow/models/dag.py", line 1119, in topological_sort
raise AirflowException(f"A cyclic dependency occurred in dag: {self.dag_id}")
airflow.exceptions.AirflowException: A cyclic dependency occurred in dag: test_dag1
| 2,403 |
|||
apache/airflow | apache__airflow-17210 | 87f408b1e78968580c760acb275ae5bb042161db | diff --git a/airflow/providers/amazon/aws/hooks/base_aws.py b/airflow/providers/amazon/aws/hooks/base_aws.py
--- a/airflow/providers/amazon/aws/hooks/base_aws.py
+++ b/airflow/providers/amazon/aws/hooks/base_aws.py
@@ -37,6 +37,7 @@
import tenacity
from botocore.config import Config
from botocore.credentials import ReadOnlyCredentials
+from slugify import slugify
try:
from functools import cached_property
@@ -188,11 +189,14 @@ def _read_credentials_from_connection(self) -> Tuple[Optional[str], Optional[str
self.log.info("No credentials retrieved from Connection")
return aws_access_key_id, aws_secret_access_key
+ def _strip_invalid_session_name_characters(self, role_session_name: str) -> str:
+ return slugify(role_session_name, regex_pattern=r'[^\w+=,.@-]+')
+
def _assume_role(self, sts_client: boto3.client) -> Dict:
assume_role_kwargs = self.extra_config.get("assume_role_kwargs", {})
if "external_id" in self.extra_config: # Backwards compatibility
assume_role_kwargs["ExternalId"] = self.extra_config.get("external_id")
- role_session_name = f"Airflow_{self.conn.conn_id}"
+ role_session_name = self._strip_invalid_session_name_characters(f"Airflow_{self.conn.conn_id}")
self.log.info(
"Doing sts_client.assume_role to role_arn=%s (role_session_name=%s)",
self.role_arn,
| AWS Hooks fail when assuming role and connection id contains forward slashes
**Apache Airflow version**: 2.1.0
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): Client Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.3", GitCommit:"1e11e4a2108024935ecfcb2912226cedeafd99df", GitTreeState:"clean", BuildDate:"2020-10-14T18:49:28Z", GoVersion:"go1.15.2", Compiler:"gc", Platform:"darwin/amd64"}
Server Version: version.Info{Major:"1", Minor:"18+", GitVersion:"v1.18.16-eks-7737de", GitCommit:"7737de131e58a68dda49cdd0ad821b4cb3665ae8", GitTreeState:"clean", BuildDate:"2021-03-10T21:33:25Z", GoVersion:"go1.13.15", Compiler:"gc", Platform:"linux/amd64"}
**Environment**: Local/Development
- **Cloud provider or hardware configuration**: Docker container
- **OS** (e.g. from /etc/os-release): Debian GNU/Linux 10 (buster)
- **Kernel** (e.g. `uname -a`): Linux 243e98509628 5.10.25-linuxkit #1 SMP Tue Mar 23 09:27:39 UTC 2021 x86_64 GNU/Linux
- **Install tools**:
- **Others**:
**What happened**:
* Using AWS Secrets Manager secrets backend
* Using S3Hook with aws_conn_id="foo/bar/baz" (example, but the slashes are important)
* Secret value is: `aws://?role_arn=arn%3Aaws%3Aiam%3A%3A<account_id>%3Arole%2F<role_name>®ion_name=us-east-1`
* Get the following error: `botocore.exceptions.ClientError: An error occurred (ValidationError) when calling the AssumeRole operation: 1 validation error detected: Value 'Airflow_data/foo/bar/baz' at 'roleSessionName' failed to satisfy constraint: Member must satisfy regular expression pattern: [\w+=,.@-]*`
**What you expected to happen**:
No error and for boto to attempt to assume the role in the connection URI.
The _SessionFactory._assume_role class method is setting the role session name to `f"Airflow_{self.conn.conn_id}"` with no encoding.
**How to reproduce it**:
* Create an AWS connection with forward slashes in the name/id
** Use a role_arn in the connection string (e.g. `aws://?role_arn=...`)
* Create a test DAG using an AWS hook. Example below:
```python
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from datetime import datetime, timedelta
with DAG(
dag_id='test_assume_role',
start_date=datetime(2021, 6, 1),
schedule_interval=None, # no schedule, triggered manually/ad-hoc
tags=['test'],
) as dag:
def write_to_s3(**kwargs):
s3_hook = S3Hook(aws_conn_id='aws/test')
s3_hook.load_string(string_data='test', bucket_name='test_bucket', key='test/{{ execution_date }}')
write_test_object = PythonOperator(task_id='write_test_object', python_callable=write_to_s3)
```
**Anything else we need to know**:
This is a redacted log from my actual test while using AWS Secrets Manager. Should get a similar result *without* Secrets Manager though.
<details>
<summary>1.log</summary>
[2021-07-13 12:38:10,271] {taskinstance.py:876} INFO - Dependencies all met for <TaskInstance: test_assume_role.write_test_object 2021-07-13T12:35:02.576772+00:00 [queued]>
[2021-07-13 12:38:10,288] {taskinstance.py:876} INFO - Dependencies all met for <TaskInstance: test_assume_role.write_test_object 2021-07-13T12:35:02.576772+00:00 [queued]>
[2021-07-13 12:38:10,288] {taskinstance.py:1067} INFO -
--------------------------------------------------------------------------------
[2021-07-13 12:38:10,289] {taskinstance.py:1068} INFO - Starting attempt 1 of 1
[2021-07-13 12:38:10,289] {taskinstance.py:1069} INFO -
--------------------------------------------------------------------------------
[2021-07-13 12:38:10,299] {taskinstance.py:1087} INFO - Executing <Task(PythonOperator): write_test_object> on 2021-07-13T12:35:02.576772+00:00
[2021-07-13 12:38:10,305] {standard_task_runner.py:52} INFO - Started process 38974 to run task
[2021-07-13 12:38:10,309] {standard_task_runner.py:76} INFO - Running: ['airflow', 'tasks', 'run', 'test_assume_role', 'write_test_object', '2021-07-13T12:35:02.576772+00:00', '--job-id', '2376', '--pool', 'default_pool', '--raw', '--subdir', 'DAGS_FOLDER/test_assume_role.py', '--cfg-path', '/tmp/tmprusuo0ys', '--error-file', '/tmp/tmp8ytd9bk8']
[2021-07-13 12:38:10,311] {standard_task_runner.py:77} INFO - Job 2376: Subtask write_test_object
[2021-07-13 12:38:10,331] {logging_mixin.py:104} INFO - Running <TaskInstance: test_assume_role.write_test_object 2021-07-13T12:35:02.576772+00:00 [running]> on host 243e98509628
[2021-07-13 12:38:10,392] {taskinstance.py:1282} INFO - Exporting the following env vars:
AIRFLOW_CTX_DAG_OWNER=airflow
AIRFLOW_CTX_DAG_ID=test_assume_role
AIRFLOW_CTX_TASK_ID=write_test_object
AIRFLOW_CTX_EXECUTION_DATE=2021-07-13T12:35:02.576772+00:00
AIRFLOW_CTX_DAG_RUN_ID=manual__2021-07-13T12:35:02.576772+00:00
[2021-07-13 12:38:10,419] {base_aws.py:362} INFO - Airflow Connection: aws_conn_id=foo/bar/baz
[2021-07-13 12:38:10,444] {credentials.py:1087} INFO - Found credentials in environment variables.
[2021-07-13 12:38:11,079] {base_aws.py:173} INFO - No credentials retrieved from Connection
[2021-07-13 12:38:11,079] {base_aws.py:76} INFO - Retrieving region_name from Connection.extra_config['region_name']
[2021-07-13 12:38:11,079] {base_aws.py:81} INFO - Creating session with aws_access_key_id=None region_name=us-east-1
[2021-07-13 12:38:11,096] {base_aws.py:151} INFO - role_arn is arn:aws:iam::<account_id>:role/<role_name>
[2021-07-13 12:38:11,096] {base_aws.py:97} INFO - assume_role_method=None
[2021-07-13 12:38:11,098] {credentials.py:1087} INFO - Found credentials in environment variables.
[2021-07-13 12:38:11,119] {base_aws.py:185} INFO - Doing sts_client.assume_role to role_arn=arn:aws:iam::<account_id>:role/<role_name> (role_session_name=Airflow_data/foo/bar/baz)
[2021-07-13 12:38:11,407] {taskinstance.py:1481} ERROR - Task failed with exception
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1137, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python.py", line 150, in execute
return_value = self.execute_callable()
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python.py", line 161, in execute_callable
return self.python_callable(*self.op_args, **self.op_kwargs)
File "/usr/local/airflow/dags/test_assume_role.py", line 49, in write_to_s3
key='test/{{ execution_date }}'
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 61, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 90, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 571, in load_string
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 652, in _upload_file_obj
if not replace and self.check_for_key(key, bucket_name):
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 61, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 90, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 328, in check_for_key
raise e
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 322, in check_for_key
self.get_conn().head_object(Bucket=bucket_name, Key=key)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 455, in get_conn
return self.conn
File "/usr/local/lib/python3.7/site-packages/cached_property.py", line 36, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 437, in conn
return self.get_client_type(self.client_type, region_name=self.region_name)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 403, in get_client_type
session, endpoint_url = self._get_credentials(region_name)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 379, in _get_credentials
conn=connection_object, region_name=region_name, config=self.config
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 69, in create_session
return self._impersonate_to_role(role_arn=role_arn, session=session, session_kwargs=session_kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 101, in _impersonate_to_role
sts_client=sts_client, role_arn=role_arn, assume_role_kwargs=assume_role_kwargs
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 188, in _assume_role
RoleArn=role_arn, RoleSessionName=role_session_name, **assume_role_kwargs
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ValidationError) when calling the AssumeRole operation: 1 validation error detected: Value 'Airflow_data/foo/bar/baz' at 'roleSessionName' failed to satisfy constraint: Member must satisfy regular expression pattern: [\w+=,.@-]*
[2021-07-13 12:38:11,417] {taskinstance.py:1531} INFO - Marking task as FAILED. dag_id=test_assume_role, task_id=write_test_object, execution_date=20210713T123502, start_date=20210713T123810, end_date=20210713T123811
[2021-07-13 12:38:11,486] {local_task_job.py:151} INFO - Task exited with return code 1
</details>
| Thanks for opening your first issue here! Be sure to follow the issue template!
Are you willing to submit a PR? I'm happy to help with review.
> Are you willing to submit a PR? I'm happy to help with review.
That may be something I can start on next week.
I'd appreciate input on solutions.
My initial thought is there are 2 possible solutions, either implementing only 1 of them or implementing both.
1. Remove all characters that don't match the regular expression pattern ([\w+=,.@-]*) OR replace them with hyphens
2. Add an option that allows you to specify a custom role session name in the aws connection uri (e.g. `role_session_name=foo`) -- but I'm not terrible familiar with that code to know if that's the best way to handle it or how complex the change would be
> replace them with hyphens
It sounds like the simplest and most user-friendly solution. The value will still be readable and the user will not configure additional options. | 2021-07-25T17:35:57Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1137, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python.py", line 150, in execute
return_value = self.execute_callable()
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python.py", line 161, in execute_callable
return self.python_callable(*self.op_args, **self.op_kwargs)
File "/usr/local/airflow/dags/test_assume_role.py", line 49, in write_to_s3
key='test/{{ execution_date }}'
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 61, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 90, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 571, in load_string
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 652, in _upload_file_obj
if not replace and self.check_for_key(key, bucket_name):
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 61, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 90, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 328, in check_for_key
raise e
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 322, in check_for_key
self.get_conn().head_object(Bucket=bucket_name, Key=key)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 455, in get_conn
return self.conn
File "/usr/local/lib/python3.7/site-packages/cached_property.py", line 36, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 437, in conn
return self.get_client_type(self.client_type, region_name=self.region_name)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 403, in get_client_type
session, endpoint_url = self._get_credentials(region_name)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 379, in _get_credentials
conn=connection_object, region_name=region_name, config=self.config
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 69, in create_session
return self._impersonate_to_role(role_arn=role_arn, session=session, session_kwargs=session_kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 101, in _impersonate_to_role
sts_client=sts_client, role_arn=role_arn, assume_role_kwargs=assume_role_kwargs
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/base_aws.py", line 188, in _assume_role
RoleArn=role_arn, RoleSessionName=role_session_name, **assume_role_kwargs
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ValidationError) when calling the AssumeRole operation: 1 validation error detected: Value 'Airflow_data/foo/bar/baz' at 'roleSessionName' failed to satisfy constraint: Member must satisfy regular expression pattern: [\w+=,.@-]*
| 2,404 |
|||
apache/airflow | apache__airflow-17539 | 719709b6e994a99ad2cb8f90042a19a7924acb8e | diff --git a/airflow/providers/google/cloud/secrets/secret_manager.py b/airflow/providers/google/cloud/secrets/secret_manager.py
--- a/airflow/providers/google/cloud/secrets/secret_manager.py
+++ b/airflow/providers/google/cloud/secrets/secret_manager.py
@@ -19,11 +19,6 @@
import logging
from typing import Optional
-try:
- from functools import cached_property
-except ImportError:
- from cached_property import cached_property
-
from google.auth.exceptions import DefaultCredentialsError
from airflow.exceptions import AirflowException
@@ -121,10 +116,10 @@ def __init__(
if project_id:
self.project_id = project_id
- @cached_property
+ @property
def client(self) -> _SecretManagerClient:
"""
- Cached property returning secret client.
+ Property returning secret client.
:return: Secrets client
"""
| google.api_core.exceptions.Unknown: None Stream removed (Snowflake and GCP Secret Manager)
**Apache Airflow version**: 2.1.0
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): N/A
**Environment**:
- **Cloud provider or hardware configuration**: Astronomer-based local setup using Docker `quay.io/astronomer/ap-airflow:2.1.0-2-buster-onbuild`
- **OS** (e.g. from /etc/os-release): `Debian GNU/Linux 10 (buster)`
- **Kernel** (e.g. `uname -a`): `Linux 7a92d1fd4406 5.10.25-linuxkit #1 SMP Tue Mar 23 09:27:39 UTC 2021 x86_64 GNU/Linux`
- **Install tools**: apache-airflow-providers-snowflake
- **Others**:
**What happened**:
Having configured Snowflake connection and pointing to GCP Secret Manager backend `AIRFLOW__SECRETS__BACKEND=airflow.providers.google.cloud.secrets.secret_manager.CloudSecretManagerBackend` I am getting a pretty consistent error traced all the way down to gRPC
```File "/usr/local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 57, in error_remapped_callable
return callable_(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/grpc/_channel.py", line 946, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/usr/local/lib/python3.7/site-packages/grpc/_channel.py", line 849, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.UNKNOWN
details = "Stream removed"
debug_error_string = "{"created":"@1624370913.481874500","description":"Error received from peer ipv4:172.xxx.xx.xxx:443","file":"src/core/lib/surface/call.cc","file_line":1067,"grpc_message":"Stream removed","grpc_status":2}"
>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1137, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python.py", line 150, in execute
return_value = self.execute_callable()
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python.py", line 161, in execute_callable
return self.python_callable(*self.op_args, **self.op_kwargs)
File "/usr/local/airflow/dags/qe/weekly.py", line 63, in snfk_hook
df = hook.get_pandas_df(sql)
File "/usr/local/lib/python3.7/site-packages/airflow/hooks/dbapi.py", line 116, in get_pandas_df
with closing(self.get_conn()) as conn:
File "/usr/local/lib/python3.7/site-packages/airflow/providers/snowflake/hooks/snowflake.py", line 220, in get_conn
conn_config = self._get_conn_params()
File "/usr/local/lib/python3.7/site-packages/airflow/providers/snowflake/hooks/snowflake.py", line 152, in _get_conn_params
self.snowflake_conn_id # type: ignore[attr-defined] # pylint: disable=no-member
File "/usr/local/lib/python3.7/site-packages/airflow/hooks/base.py", line 67, in get_connection
conn = Connection.get_connection_from_secrets(conn_id)
File "/usr/local/lib/python3.7/site-packages/airflow/models/connection.py", line 376, in get_connection_from_secrets
conn = secrets_backend.get_connection(conn_id=conn_id)
File "/usr/local/lib/python3.7/site-packages/airflow/secrets/base_secrets.py", line 64, in get_connection
conn_uri = self.get_conn_uri(conn_id=conn_id)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/google/cloud/secrets/secret_manager.py", line 134, in get_conn_uri
return self._get_secret(self.connections_prefix, conn_id)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/google/cloud/secrets/secret_manager.py", line 170, in _get_secret
return self.client.get_secret(secret_id=secret_id, project_id=self.project_id)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/google/cloud/_internal_client/secret_manager_client.py", line 86, in get_secret
response = self.client.access_secret_version(name)
File "/usr/local/lib/python3.7/site-packages/google/cloud/secretmanager_v1/gapic/secret_manager_service_client.py", line 968, in access_secret_version
request, retry=retry, timeout=timeout, metadata=metadata
File "/usr/local/lib/python3.7/site-packages/google/api_core/gapic_v1/method.py", line 145, in __call__
return wrapped_func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/google/api_core/retry.py", line 286, in retry_wrapped_func
on_error=on_error,
File "/usr/local/lib/python3.7/site-packages/google/api_core/retry.py", line 184, in retry_target
return target()
File "/usr/local/lib/python3.7/site-packages/google/api_core/timeout.py", line 214, in func_with_timeout
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 59, in error_remapped_callable
six.raise_from(exceptions.from_grpc_error(exc), exc)
File "<string>", line 3, in raise_from
google.api_core.exceptions.Unknown: None Stream removed
```
**What you expected to happen**:
DAG successfully retrieves a configured connection for Snowflake from GCP Secret Manager and executes a query returning back a result.
**How to reproduce it**:
1. Configure Google Cloud Platform as secrets backend
`AIRFLOW__SECRETS__BACKEND=airflow.providers.google.cloud.secrets.secret_manager.CloudSecretManagerBackend`
2. Configure a Snowflake connection (`requirements.txt` has `apache-airflow-providers-snowflake`)
3. Create a DAG which uses SnowflakeHook similar to this:
```python
import logging
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.hooks.snowflake_hook import SnowflakeHook
from airflow.contrib.operators.snowflake_operator import SnowflakeOperator
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
args = {"owner": "Airflow", "start_date": airflow.utils.dates.days_ago(2)}
dag = DAG(
dag_id="snowflake_automation", default_args=args, schedule_interval=None
)
snowflake_query = [
"""create table public.test_employee (id number, name string);""",
"""insert into public.test_employee values(1, “Sam”),(2, “Andy”),(3, “Gill”);""",
]
def get_row_count(**context):
dwh_hook = SnowflakeHook(snowflake_conn_id="snowflake_conn")
result = dwh_hook.get_first("select count(*) from public.test_employee")
logging.info("Number of rows in `public.test_employee` - %s", result[0])
with dag:
create_insert = SnowflakeOperator(
task_id="snowfalke_create",
sql=snowflake_query ,
snowflake_conn_id="snowflake_conn",
)
get_count = PythonOperator(task_id="get_count", python_callable=get_row_count)
create_insert >> get_count
```
**Anything else we need to know**:
I looked around to see if this is an issue with Google's `api-core` and it seems like somebody has done research into it to point out that it might be downstream implementation issue and not the `api-core` issue: https://stackoverflow.com/questions/67374613/why-does-accessing-this-variable-fail-after-it-is-used-in-a-thread
| Thanks for opening your first issue here! Be sure to follow the issue template!
I am afraid there isn't much we can do in Airflow to fix it. Closing it. | 2021-08-10T17:47:49Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1137, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python.py", line 150, in execute
return_value = self.execute_callable()
File "/usr/local/lib/python3.7/site-packages/airflow/operators/python.py", line 161, in execute_callable
return self.python_callable(*self.op_args, **self.op_kwargs)
File "/usr/local/airflow/dags/qe/weekly.py", line 63, in snfk_hook
df = hook.get_pandas_df(sql)
File "/usr/local/lib/python3.7/site-packages/airflow/hooks/dbapi.py", line 116, in get_pandas_df
with closing(self.get_conn()) as conn:
File "/usr/local/lib/python3.7/site-packages/airflow/providers/snowflake/hooks/snowflake.py", line 220, in get_conn
conn_config = self._get_conn_params()
File "/usr/local/lib/python3.7/site-packages/airflow/providers/snowflake/hooks/snowflake.py", line 152, in _get_conn_params
self.snowflake_conn_id # type: ignore[attr-defined] # pylint: disable=no-member
File "/usr/local/lib/python3.7/site-packages/airflow/hooks/base.py", line 67, in get_connection
conn = Connection.get_connection_from_secrets(conn_id)
File "/usr/local/lib/python3.7/site-packages/airflow/models/connection.py", line 376, in get_connection_from_secrets
conn = secrets_backend.get_connection(conn_id=conn_id)
File "/usr/local/lib/python3.7/site-packages/airflow/secrets/base_secrets.py", line 64, in get_connection
conn_uri = self.get_conn_uri(conn_id=conn_id)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/google/cloud/secrets/secret_manager.py", line 134, in get_conn_uri
return self._get_secret(self.connections_prefix, conn_id)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/google/cloud/secrets/secret_manager.py", line 170, in _get_secret
return self.client.get_secret(secret_id=secret_id, project_id=self.project_id)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/google/cloud/_internal_client/secret_manager_client.py", line 86, in get_secret
response = self.client.access_secret_version(name)
File "/usr/local/lib/python3.7/site-packages/google/cloud/secretmanager_v1/gapic/secret_manager_service_client.py", line 968, in access_secret_version
request, retry=retry, timeout=timeout, metadata=metadata
File "/usr/local/lib/python3.7/site-packages/google/api_core/gapic_v1/method.py", line 145, in __call__
return wrapped_func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/google/api_core/retry.py", line 286, in retry_wrapped_func
on_error=on_error,
File "/usr/local/lib/python3.7/site-packages/google/api_core/retry.py", line 184, in retry_target
return target()
File "/usr/local/lib/python3.7/site-packages/google/api_core/timeout.py", line 214, in func_with_timeout
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 59, in error_remapped_callable
six.raise_from(exceptions.from_grpc_error(exc), exc)
File "<string>", line 3, in raise_from
google.api_core.exceptions.Unknown: None Stream removed
| 2,408 |
|||
apache/airflow | apache__airflow-18224 | 27144bd36794d3450a337786c84c4ddde9c79da3 | diff --git a/airflow/api_connexion/endpoints/user_endpoint.py b/airflow/api_connexion/endpoints/user_endpoint.py
--- a/airflow/api_connexion/endpoints/user_endpoint.py
+++ b/airflow/api_connexion/endpoints/user_endpoint.py
@@ -21,7 +21,7 @@
from werkzeug.security import generate_password_hash
from airflow.api_connexion import security
-from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound
+from airflow.api_connexion.exceptions import AlreadyExists, BadRequest, NotFound, Unknown
from airflow.api_connexion.parameters import apply_sorting, check_limit, format_parameters
from airflow.api_connexion.schemas.user_schema import (
UserCollection,
@@ -76,10 +76,14 @@ def post_user():
raise BadRequest(detail=str(e.messages))
security_manager = current_app.appbuilder.sm
+ username = data["username"]
+ email = data["email"]
- user = security_manager.find_user(username=data["username"])
- if user is not None:
- detail = f"Username `{user.username}` already exists. Use PATCH to update."
+ if security_manager.find_user(username=username):
+ detail = f"Username `{username}` already exists. Use PATCH to update."
+ raise AlreadyExists(detail=detail)
+ if security_manager.find_user(email=email):
+ detail = f"The email `{email}` is already taken."
raise AlreadyExists(detail=detail)
roles_to_add = []
@@ -101,6 +105,10 @@ def post_user():
default_role = security_manager.find_role(security_manager.auth_user_registration_role)
user = security_manager.add_user(role=default_role, **data)
+ if not user:
+ detail = f"Failed to add user `{username}`."
+ return Unknown(detail=detail)
+
if roles_to_add:
user.roles.extend(roles_to_add)
security_manager.update_user(user)
| POST /api/v1/users fails with exception
### Apache Airflow version
main (development)
### Operating System
From Astronomer’s QA team
### Versions of Apache Airflow Providers
_No response_
### Deployment
Astronomer
### Deployment details
_No response_
### What happened
When adding a new user, The following exception is emitted:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.9/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/decorator.py", line 48, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/uri_parsing.py", line 144, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/validation.py", line 184, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/response.py", line 103, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/parameter.py", line 121, in wrapper
return function(**kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/api_connexion/security.py", line 47, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/api_connexion/endpoints/user_endpoint.py", line 105, in post_user
user.roles.extend(roles_to_add)
AttributeError: 'bool' object has no attribute 'roles'
```
The immediate cause to this exception is F.A.B. returns `False` when it fails to add a new user. The problem, however, is _why_ excactly it failed. This is the payload used:
```json
{
"username": "username6",
"password": "password1",
"email": "username5@example.com",
"first_name": "user2",
"last_name": "test1",
"roles":[{"name":"Admin"},{"name":"Viewer"}]
}
```
This went through validation, therefore we know
1. The POST-ing user has permission to create a new user.
2. The format is correct (including the nested roles).
3. There is not already an existing `username6` in the database.
4. All listed roles exist.
(All these are already covered by unit tests.)
Further complicating the issue is F.A.B.’s security manager swallows an exception when this happens, and only logs the exception to the server. And we’re having trouble locating that line of log. It’s quite difficult to diagnose further, so I’m posting this hoping someone has better luck reproducing this.
I will submit a fix to correct the immediate issue, making the API emit 500 with something like “Failed to create user for unknown reason” to make the failure _slightly_ less confusing.
### What you expected to happen
_No response_
### How to reproduce
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| On further investigating, the problem seems to be the email field, F.A.B. seems to have a UNIQUE key on it. I’ll do a PR. | 2021-09-14T04:46:08Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.9/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/decorator.py", line 48, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/uri_parsing.py", line 144, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/validation.py", line 184, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/response.py", line 103, in wrapper
response = function(request)
File "/usr/local/lib/python3.9/site-packages/airflow/_vendor/connexion/decorators/parameter.py", line 121, in wrapper
return function(**kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/api_connexion/security.py", line 47, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/api_connexion/endpoints/user_endpoint.py", line 105, in post_user
user.roles.extend(roles_to_add)
AttributeError: 'bool' object has no attribute 'roles'
| 2,424 |
|||
apache/airflow | apache__airflow-18602 | 324aca410bbbddb22336746338ea8a60ad0b3989 | diff --git a/airflow/www/widgets.py b/airflow/www/widgets.py
--- a/airflow/www/widgets.py
+++ b/airflow/www/widgets.py
@@ -42,10 +42,12 @@ def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
if not field.data:
- field.data = ""
+ field.data = ''
template = self.data_template
- return Markup(template % {"text": html_params(type="text", value=field.data, **kwargs)})
+ return Markup(
+ template % {"text": html_params(type="text", value=field.data, required=True, **kwargs)}
+ )
class AirflowDateTimePickerROWidget(AirflowDateTimePickerWidget):
| Error when querying on the Browse view with empty date picker
**Apache Airflow version**: 2.0.2
**What happened**:
Under Browse, when querying with any empty datetime fields, I received the mushroom cloud.
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/views.py", line 551, in list
widgets = self._list()
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/baseviews.py", line 1127, in _list
page_size=page_size,
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/baseviews.py", line 1026, in _get_list_widget
page_size=page_size,
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 425, in query
count = self.query_count(query, filters, select_columns)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 347, in query_count
query, filters, select_columns=select_columns, aliases_mapping={}
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 332, in _apply_inner_all
query = self.apply_filters(query, inner_filters)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 187, in apply_filters
return filters.apply_all(query)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/filters.py", line 298, in apply_all
query = flt.apply(query, value)
File "/usr/local/lib/python3.7/site-packages/airflow/www/utils.py", line 373, in apply
value = timezone.parse(value, timezone=timezone.utc)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/timezone.py", line 173, in parse
return pendulum.parse(string, tz=timezone or TIMEZONE, strict=False) # type: ignore
File "/usr/local/lib/python3.7/site-packages/pendulum/parser.py", line 29, in parse
return _parse(text, **options)
File "/usr/local/lib/python3.7/site-packages/pendulum/parser.py", line 45, in _parse
parsed = base_parse(text, **options)
File "/usr/local/lib/python3.7/site-packages/pendulum/parsing/__init__.py", line 74, in parse
return _normalize(_parse(text, **_options), **_options)
File "/usr/local/lib/python3.7/site-packages/pendulum/parsing/__init__.py", line 120, in _parse
return _parse_common(text, **options)
File "/usr/local/lib/python3.7/site-packages/pendulum/parsing/__init__.py", line 177, in _parse_common
return date(year, month, day)
ValueError: year 0 is out of range
```
**What you expected to happen**:
Perhaps give a warning/error banner that indicate Airflow cannot perform the search with bad input. I think it'll also work if the datetime picker defaults the timestamp to the current time.
It looks like some fields are equipped to do that but not all.
**How to reproduce it**:
1. Go under Browse
2. Try to query with empty datetime picket
**Anything else we need to know**:
![Screen Shot 2021-05-20 at 5 12 54 PM](https://user-images.githubusercontent.com/5952735/119063940-12b13f80-b98f-11eb-9b6f-a4d5c396e971.png)
![Screen Shot 2021-05-20 at 5 13 36 PM](https://user-images.githubusercontent.com/5952735/119063945-1349d600-b98f-11eb-91cd-92d813414eba.png)
![Screen Shot 2021-05-20 at 5 12 35 PM](https://user-images.githubusercontent.com/5952735/119063948-13e26c80-b98f-11eb-945f-1439a263fc58.png)
![Screen Shot 2021-05-20 at 5 14 17 PM](https://user-images.githubusercontent.com/5952735/119063949-147b0300-b98f-11eb-8e8c-d5ee1e23bfc1.png)
![Screen Shot 2021-05-20 at 5 14 37 PM](https://user-images.githubusercontent.com/5952735/119063950-147b0300-b98f-11eb-9055-c89518bf8524.png)
![Screen Shot 2021-05-20 at 5 15 01 PM](https://user-images.githubusercontent.com/5952735/119063951-147b0300-b98f-11eb-8323-7602bf673205.png)
| I wonder what the best solution is. We can ignore empty fields in the backend (and probably should no matter what), but I would want the frontend to block form submission when this happens since leaving a field empty is likely a user error.
These pages and forms are created in Flask so we'd need to disable it there if a field is blank. Probably in `forms.py`? | 2021-09-29T08:08:31Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/views.py", line 551, in list
widgets = self._list()
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/baseviews.py", line 1127, in _list
page_size=page_size,
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/baseviews.py", line 1026, in _get_list_widget
page_size=page_size,
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 425, in query
count = self.query_count(query, filters, select_columns)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 347, in query_count
query, filters, select_columns=select_columns, aliases_mapping={}
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 332, in _apply_inner_all
query = self.apply_filters(query, inner_filters)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 187, in apply_filters
return filters.apply_all(query)
File "/usr/local/lib/python3.7/site-packages/flask_appbuilder/models/filters.py", line 298, in apply_all
query = flt.apply(query, value)
File "/usr/local/lib/python3.7/site-packages/airflow/www/utils.py", line 373, in apply
value = timezone.parse(value, timezone=timezone.utc)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/timezone.py", line 173, in parse
return pendulum.parse(string, tz=timezone or TIMEZONE, strict=False) # type: ignore
File "/usr/local/lib/python3.7/site-packages/pendulum/parser.py", line 29, in parse
return _parse(text, **options)
File "/usr/local/lib/python3.7/site-packages/pendulum/parser.py", line 45, in _parse
parsed = base_parse(text, **options)
File "/usr/local/lib/python3.7/site-packages/pendulum/parsing/__init__.py", line 74, in parse
return _normalize(_parse(text, **_options), **_options)
File "/usr/local/lib/python3.7/site-packages/pendulum/parsing/__init__.py", line 120, in _parse
return _parse_common(text, **options)
File "/usr/local/lib/python3.7/site-packages/pendulum/parsing/__init__.py", line 177, in _parse_common
return date(year, month, day)
ValueError: year 0 is out of range
| 2,429 |
|||
apache/airflow | apache__airflow-18733 | 181ac36db3749050a60fc1f08ceace005c5cb58b | diff --git a/airflow/providers/amazon/aws/operators/ecs.py b/airflow/providers/amazon/aws/operators/ecs.py
--- a/airflow/providers/amazon/aws/operators/ecs.py
+++ b/airflow/providers/amazon/aws/operators/ecs.py
@@ -453,6 +453,10 @@ def _check_success_task(self) -> None:
raise AirflowException(response)
for task in response['tasks']:
+
+ if task.get('stopCode', '') == 'TaskFailedToStart':
+ raise AirflowException(f"The task failed to start due to: {task.get('stoppedReason', '')}")
+
# This is a `stoppedReason` that indicates a task has not
# successfully finished, but there is no other indication of failure
# in the response.
@@ -466,13 +470,16 @@ def _check_success_task(self) -> None:
containers = task['containers']
for container in containers:
if container.get('lastStatus') == 'STOPPED' and container['exitCode'] != 0:
- last_logs = "\n".join(
- self.task_log_fetcher.get_last_log_messages(self.number_logs_exception)
- )
- raise AirflowException(
- f"This task is not in success state - last {self.number_logs_exception} "
- f"logs from Cloudwatch:\n{last_logs}"
- )
+ if self.task_log_fetcher:
+ last_logs = "\n".join(
+ self.task_log_fetcher.get_last_log_messages(self.number_logs_exception)
+ )
+ raise AirflowException(
+ f"This task is not in success state - last {self.number_logs_exception} "
+ f"logs from Cloudwatch:\n{last_logs}"
+ )
+ else:
+ raise AirflowException(f'This task is not in success state {task}')
elif container.get('lastStatus') == 'PENDING':
raise AirflowException(f'This task is still pending {task}')
elif 'error' in container.get('reason', '').lower():
| When an ECS Task fails to start, ECS Operator raises a CloudWatch exception
<!--
Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions.
Don't worry if they're not all applicable; just try to include what you can :-)
If you need to include code snippets or logs, please put them in fenced code
blocks. If they're super-long, please use the details tag like
<details><summary>super-long log</summary> lots of stuff </details>
Please delete these comment blocks before submitting the issue.
-->
<!--
IMPORTANT!!!
PLEASE CHECK "SIMILAR TO X EXISTING ISSUES" OPTION IF VISIBLE
NEXT TO "SUBMIT NEW ISSUE" BUTTON!!!
PLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!!
Please complete the next sections or the issue will be closed.
These questions are the first thing we need to know to understand the context.
-->
**Apache Airflow version**: 1.10.13
**Environment**:
- **Cloud provider or hardware configuration**:AWS
- **OS** (e.g. from /etc/os-release): Amazon Linux 2
- **Kernel** (e.g. `uname -a`): 4.14.209-160.339.amzn2.x86_64
- **Install tools**: pip
- **Others**:
**What happened**:
When an ECS Task exits with `stopCode: TaskFailedToStart`, the ECS Operator will exit with a ResourceNotFoundException for the GetLogEvents operation. This is because the task has failed to start, so no log is created.
```
[2021-03-14 02:32:49,792] {ecs_operator.py:147} INFO - ECS Task started: {'tasks': [{'attachments': [], 'availabilityZone': 'ap-northeast-1c', 'clusterArn': 'arn:aws:ecs:ap-northeast-1:xxxx:cluster/ecs-cluster', 'containerInstanceArn': 'arn:aws:ecs:ap-northeast-1:xxxx:container-instance/ecs-cluster/xxxx', 'containers': [{'containerArn': 'arn:aws:ecs:ap-northeast-1:xxxx:container/xxxx', 'taskArn': 'arn:aws:ecs:ap-northeast-1:xxxx:task/ecs-cluster/xxxx', 'name': 'container_image', 'image': 'xxxx.dkr.ecr.ap-northeast-1.amazonaws.com/ecr/container_image:latest', 'lastStatus': 'PENDING', 'networkInterfaces': [], 'cpu': '128', 'memoryReservation': '128'}], 'cpu': '128', 'createdAt': datetime.datetime(2021, 3, 14, 2, 32, 49, 770000, tzinfo=tzlocal()), 'desiredStatus': 'RUNNING', 'group': 'family:task', 'lastStatus': 'PENDING', 'launchType': 'EC2', 'memory': '128', 'overrides': {'containerOverrides': [{'name': 'container_image', 'command': ['/bin/bash', '-c', 'xxxx']}], 'inferenceAcceleratorOverrides': []}, 'startedBy': 'airflow', 'tags': [], 'taskArn': 'arn:aws:ecs:ap-northeast-1:xxxx:task/ecs-cluster/xxxx', 'taskDefinitionArn': 'arn:aws:ecs:ap-northeast-1:xxxx:task-definition/task:1', 'version': 1}], 'failures': [], 'ResponseMetadata': {'RequestId': 'xxxx', 'HTTPStatusCode': 200, 'HTTPHeaders': {'x-amzn-requestid': 'xxxx', 'content-type': 'application/x-amz-json-1.1', 'content-length': '1471', 'date': 'Sun, 14 Mar 2021 02:32:48 GMT'}, 'RetryAttempts': 0}}
[2021-03-14 02:34:15,022] {ecs_operator.py:168} INFO - ECS Task stopped, check status: {'tasks': [{'attachments': [], 'availabilityZone': 'ap-northeast-1c', 'clusterArn': 'arn:aws:ecs:ap-northeast-1:xxxx:cluster/ecs-cluster', 'connectivity': 'CONNECTED', 'connectivityAt': datetime.datetime(2021, 3, 14, 2, 32, 49, 770000, tzinfo=tzlocal()), 'containerInstanceArn': 'arn:aws:ecs:ap-northeast-1:xxxx:container-instance/ecs-cluster/xxxx', 'containers': [{'containerArn': 'arn:aws:ecs:ap-northeast-1:xxxx:container/xxxx', 'taskArn': 'arn:aws:ecs:ap-northeast-1:xxxx:task/ecs-cluster/xxxx', 'name': 'container_image', 'image': 'xxxx.dkr.ecr.ap-northeast-1.amazonaws.com/ecr/container_image:latest', 'lastStatus': 'STOPPED', 'reason': 'CannotPullContainerError: failed to register layer: Error processing tar file(exit status 1): write /var/lib/xxxx: no space left on device', 'networkInterfaces': [], 'healthStatus': 'UNKNOWN', 'cpu': '128', 'memoryReservation': '128'}], 'cpu': '128', 'createdAt': datetime.datetime(2021, 3, 14, 2, 32, 49, 770000, tzinfo=tzlocal()), 'desiredStatus': 'STOPPED', 'executionStoppedAt': datetime.datetime(2021, 3, 14, 2, 34, 12, 810000, tzinfo=tzlocal()), 'group': 'family:task', 'healthStatus': 'UNKNOWN', 'lastStatus': 'STOPPED', 'launchType': 'EC2', 'memory': '128', 'overrides': {'containerOverrides': [{'name': 'container_image', 'command': ['/bin/bash', '-c', 'xxxx']}], 'inferenceAcceleratorOverrides': []}, 'pullStartedAt': datetime.datetime(2021, 3, 14, 2, 32, 51, 68000, tzinfo=tzlocal()), 'pullStoppedAt': datetime.datetime(2021, 3, 14, 2, 34, 13, 584000, tzinfo=tzlocal()), 'startedBy': 'airflow', 'stopCode': 'TaskFailedToStart', 'stoppedAt': datetime.datetime(2021, 3, 14, 2, 34, 12, 821000, tzinfo=tzlocal()), 'stoppedReason': 'Task failed to start', 'stoppingAt': datetime.datetime(2021, 3, 14, 2, 34, 12, 821000, tzinfo=tzlocal()), 'tags': [], 'taskArn': 'arn:aws:ecs:ap-northeast-1:xxxx:task/ecs-cluster/xxxx', 'taskDefinitionArn': 'arn:aws:ecs:ap-northeast-1:xxxx:task-definition/task:1', 'version': 2}], 'failures': [], 'ResponseMetadata': {'RequestId': 'xxxx', 'HTTPStatusCode': 200, 'HTTPHeaders': {'x-amzn-requestid': 'xxxx', 'content-type': 'application/x-amz-json-1.1', 'content-length': '1988', 'date': 'Sun, 14 Mar 2021 02:34:14 GMT'}, 'RetryAttempts': 0}}
[2021-03-14 02:34:15,024] {ecs_operator.py:172} INFO - ECS Task logs output:
[2021-03-14 02:34:15,111] {credentials.py:1094} INFO - Found credentials in environment variables.
[2021-03-14 02:34:15,416] {taskinstance.py:1150} ERROR - An error occurred (ResourceNotFoundException) when calling the GetLogEvents operation: The specified log stream does not exist.
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 984, in _run_raw_task
result = task_copy.execute(context=context)
File "/usr/local/lib/python3.7/site-packages/airflow/contrib/operators/ecs_operator.py", line 152, in execute
self._check_success_task()
File "/usr/local/lib/python3.7/site-packages/airflow/contrib/operators/ecs_operator.py", line 175, in _check_success_task
for event in self.get_logs_hook().get_log_events(self.awslogs_group, stream_name):
File "/usr/local/lib/python3.7/site-packages/airflow/contrib/hooks/aws_logs_hook.py", line 85, in get_log_events
**token_arg)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.ResourceNotFoundException: An error occurred (ResourceNotFoundException) when calling the GetLogEvents operation: The specified log stream does not exist.
```
<!-- (please include exact error messages if you can) -->
**What you expected to happen**:
ResourceNotFoundException is misleading because it feels like a problem with CloudWatchLogs. Expect AirflowException to indicate that the task has failed.
<!-- What do you think went wrong? -->
**How to reproduce it**:
<!---
As minimally and precisely as possible. Keep in mind we do not have access to your cluster or dags.
If you are using kubernetes, please attempt to recreate the issue using minikube or kind.
## Install minikube/kind
- Minikube https://minikube.sigs.k8s.io/docs/start/
- Kind https://kind.sigs.k8s.io/docs/user/quick-start/
If this is a UI bug, please provide a screenshot of the bug or a link to a youtube video of the bug in action
You can include images using the .md style of
![alt text](http://url/to/img.png)
To record a screencast, mac users can use QuickTime and then create an unlisted youtube video with the resulting .mov file.
--->
This can be reproduced by running an ECS Task that fails to start, for example by specifying a non-existent entry_point.
**Anything else we need to know**:
<!--
How often does this problem occur? Once? Every time etc?
Any relevant logs to include? Put them here in side a detail tag:
<details><summary>x.log</summary> lots of stuff </details>
-->
I suspect Issue #11663 has the same problem, i.e. it's not a CloudWatch issue, but a failure to start an ECS Task.
| Thanks for opening your first issue here! Be sure to follow the issue template!
Yes. And Fargate tasks are more likely to experience this `failed-to-start` issue than the conventional EC2 tasks. I've been back and forth with AWS support on this one for a long long time. The AWS's diagnostic is this:
* Randomly AWS could fail to provision the ENI (elastic network interface) when running your Fargate task, and when this happens, the Fargate task fails to start **silently**. The only way to avoid this is to write your own "baby-sitting" routine to poll the `eni_status` in your ecs task after you trigger the task.
But luckily we only see around 1% Fargate tasks failed to start due to this random issue.
I didn't close https://github.com/apache/airflow/issues/11663 just to remind me to create a new issue here but apparently @kanga333 beat me to it :grin:
@zachliu @kanga333 did remove retry for now #16150 solved the problem?
> @zachliu @kanga333 did remove retry for now #16150 solved the problem?
@eladkal unfortunately no, #16150 was for another issue that's only somewhat related to this one | 2021-10-05T09:59:20Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 984, in _run_raw_task
result = task_copy.execute(context=context)
File "/usr/local/lib/python3.7/site-packages/airflow/contrib/operators/ecs_operator.py", line 152, in execute
self._check_success_task()
File "/usr/local/lib/python3.7/site-packages/airflow/contrib/operators/ecs_operator.py", line 175, in _check_success_task
for event in self.get_logs_hook().get_log_events(self.awslogs_group, stream_name):
File "/usr/local/lib/python3.7/site-packages/airflow/contrib/hooks/aws_logs_hook.py", line 85, in get_log_events
**token_arg)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.ResourceNotFoundException: An error occurred (ResourceNotFoundException) when calling the GetLogEvents operation: The specified log stream does not exist.
| 2,430 |
|||
apache/airflow | apache__airflow-19258 | 61d009305478e76e53aaf43ce07a181ebbd259d3 | diff --git a/airflow/www/views.py b/airflow/www/views.py
--- a/airflow/www/views.py
+++ b/airflow/www/views.py
@@ -2863,6 +2863,7 @@ def gantt(self, session=None):
task_dict['end_date'] = task_dict['end_date'] or timezone.utcnow()
task_dict['extraLinks'] = dag.get_task(ti.task_id).extra_links
task_dict['try_number'] = try_count
+ task_dict['execution_date'] = dttm
tasks.append(task_dict)
tf_count = 0
@@ -2884,6 +2885,7 @@ def gantt(self, session=None):
task_dict['operator'] = task.task_type
task_dict['try_number'] = try_count
task_dict['extraLinks'] = task.extra_links
+ task_dict['execution_date'] = dttm
tasks.append(task_dict)
task_names = [ti.task_id for ti in tis]
| Task modal links are broken in the dag gantt view
### Apache Airflow version
2.2.0 (latest released)
### Operating System
Debian GNU/Linux 11 (bullseye)
### Versions of Apache Airflow Providers
N/A
### Deployment
Other Docker-based deployment
### Deployment details
CeleryExecutor / ECS / Postgres
### What happened
![image](https://user-images.githubusercontent.com/160865/139075540-25fca98f-a858-49ce-8f3f-1b9a145a6853.png)
Clicking on logs / instance details on the following dialog causes an exception:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.9/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.9/site-packages/airflow/www/auth.py", line 51, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/www/decorators.py", line 63, in wrapper
log.execution_date = pendulum.parse(execution_date_value, strict=False)
File "/usr/local/lib/python3.9/site-packages/pendulum/parser.py", line 29, in parse
return _parse(text, **options)
File "/usr/local/lib/python3.9/site-packages/pendulum/parser.py", line 45, in _parse
parsed = base_parse(text, **options)
File "/usr/local/lib/python3.9/site-packages/pendulum/parsing/__init__.py", line 74, in parse
return _normalize(_parse(text, **_options), **_options)
File "/usr/local/lib/python3.9/site-packages/pendulum/parsing/__init__.py", line 120, in _parse
return _parse_common(text, **options)
File "/usr/local/lib/python3.9/site-packages/pendulum/parsing/__init__.py", line 177, in _parse_common
return date(year, month, day)
ValueError: year 0 is out of range
```
This is because the execution_date in the query param of the url is empty i.e:
`http://localhost:50008/log?dag_id=test_logging&task_id=check_exception_to_sentry&execution_date=`
### What you expected to happen
The logs to load / task instance detail page to load
### How to reproduce
See above
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| 2021-10-27T14:33:56Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.9/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.9/site-packages/airflow/www/auth.py", line 51, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/www/decorators.py", line 63, in wrapper
log.execution_date = pendulum.parse(execution_date_value, strict=False)
File "/usr/local/lib/python3.9/site-packages/pendulum/parser.py", line 29, in parse
return _parse(text, **options)
File "/usr/local/lib/python3.9/site-packages/pendulum/parser.py", line 45, in _parse
parsed = base_parse(text, **options)
File "/usr/local/lib/python3.9/site-packages/pendulum/parsing/__init__.py", line 74, in parse
return _normalize(_parse(text, **_options), **_options)
File "/usr/local/lib/python3.9/site-packages/pendulum/parsing/__init__.py", line 120, in _parse
return _parse_common(text, **options)
File "/usr/local/lib/python3.9/site-packages/pendulum/parsing/__init__.py", line 177, in _parse_common
return date(year, month, day)
ValueError: year 0 is out of range
| 2,445 |
||||
apache/airflow | apache__airflow-19307 | 98d906743689c4e0068db7a8b0d10f2486638a3b | diff --git a/airflow/models/dag.py b/airflow/models/dag.py
--- a/airflow/models/dag.py
+++ b/airflow/models/dag.py
@@ -607,12 +607,12 @@ def previous_schedule(self, dttm):
return None
return self.timetable._get_prev(timezone.coerce_datetime(dttm))
- def get_next_data_interval(self, dag_model: "DagModel") -> DataInterval:
+ def get_next_data_interval(self, dag_model: "DagModel") -> Optional[DataInterval]:
"""Get the data interval of the next scheduled run.
For compatibility, this method infers the data interval from the DAG's
- schedule if the run does not have an explicit one set, which is possible for
- runs created prior to AIP-39.
+ schedule if the run does not have an explicit one set, which is possible
+ for runs created prior to AIP-39.
This function is private to Airflow core and should not be depended as a
part of the Python API.
@@ -621,11 +621,14 @@ def get_next_data_interval(self, dag_model: "DagModel") -> DataInterval:
"""
if self.dag_id != dag_model.dag_id:
raise ValueError(f"Arguments refer to different DAGs: {self.dag_id} != {dag_model.dag_id}")
+ if dag_model.next_dagrun is None: # Next run not scheduled.
+ return None
data_interval = dag_model.next_dagrun_data_interval
if data_interval is not None:
return data_interval
- # Compatibility: runs scheduled before AIP-39 implementation don't have an
- # explicit data interval. Try to infer from the logical date.
+ # Compatibility: A run was scheduled without an explicit data interval.
+ # This means the run was scheduled before AIP-39 implementation. Try to
+ # infer from the logical date.
return self.infer_automated_data_interval(dag_model.next_dagrun)
def get_run_data_interval(self, run: DagRun) -> DataInterval:
| "Not a valid timetable" when returning None from next_dagrun_info in a custom timetable
### Apache Airflow version
2.2.0 (latest released)
### Operating System
Mac
### Versions of Apache Airflow Providers
_No response_
### Deployment
Other Docker-based deployment
### Deployment details
_No response_
### What happened
Getting an exception when returning None from next_dagrun_info in a custom timetable. The timetable protocol says that when None is returned a DagRun will not happen but right now it throws an exception.
Exception:
```
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 623, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 704, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 787, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 1039, in _schedule_dag_run
self._update_dag_next_dagruns(dag, dag_model, active_runs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 930, in _update_dag_next_dagruns
data_interval = dag.get_next_data_interval(dag_model)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 629, in get_next_data_interval
return self.infer_automated_data_interval(dag_model.next_dagrun)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 671, in infer_automated_data_interval
raise ValueError(f"Not a valid timetable: {self.timetable!r}")
ValueError: Not a valid timetable: <my_timetables.workday_timetable.WorkdayTimetable object at 0x7f42b1f02430>
```
### What you expected to happen
DagRun to not happen.
### How to reproduce
Create a custom timetable and return None in next_dagrun_info after a few DagRuns are created by that timetable
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| 2021-10-29T10:18:39Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 623, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 704, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 787, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 1039, in _schedule_dag_run
self._update_dag_next_dagruns(dag, dag_model, active_runs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 930, in _update_dag_next_dagruns
data_interval = dag.get_next_data_interval(dag_model)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 629, in get_next_data_interval
return self.infer_automated_data_interval(dag_model.next_dagrun)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 671, in infer_automated_data_interval
raise ValueError(f"Not a valid timetable: {self.timetable!r}")
ValueError: Not a valid timetable: <my_timetables.workday_timetable.WorkdayTimetable object at 0x7f42b1f02430>
| 2,446 |
||||
apache/airflow | apache__airflow-19418 | 5a113f302769f0ecad3a54bad3027d459cb276a4 | diff --git a/airflow/timetables/interval.py b/airflow/timetables/interval.py
--- a/airflow/timetables/interval.py
+++ b/airflow/timetables/interval.py
@@ -271,8 +271,9 @@ def serialize(self) -> Dict[str, Any]:
return {"delta": delta}
def validate(self) -> None:
- if self._delta.total_seconds() <= 0:
- raise AirflowTimetableInvalid("schedule interval must be positive")
+ now = datetime.datetime.now()
+ if (now + self._delta) <= now:
+ raise AirflowTimetableInvalid(f"schedule interval must be positive, not {self._delta!r}")
def _get_next(self, current: DateTime) -> DateTime:
return convert_to_utc(current + self._delta)
| A dag's schedule interval can no longer be an instance of dateutils.relativedelta
### Apache Airflow version
2.2.1 (latest released)
### Operating System
debian
### Versions of Apache Airflow Providers
apache-airflow==2.2.1
apache-airflow-providers-amazon==2.3.0
apache-airflow-providers-ftp==2.0.1
apache-airflow-providers-google==6.0.0
apache-airflow-providers-http==2.0.1
apache-airflow-providers-imap==2.0.1
apache-airflow-providers-jira==2.0.1
apache-airflow-providers-mysql==2.1.1
apache-airflow-providers-postgres==2.3.0
apache-airflow-providers-redis==2.0.1
apache-airflow-providers-sqlite==2.0.1
apache-airflow-providers-ssh==2.2.0
### Deployment
Other Docker-based deployment
### Deployment details
Dask executor, custom-built Docker images, postgres 12.7 backend
### What happened
I upgraded Airflow from 2.0.2 to 2.2.1, and some DAGs I have that used dateutils.relativedelta objects as schedule intervals stopped running
### What you expected to happen
The [code](https://github.com/apache/airflow/blob/2.2.1/airflow/models/dag.py#L101) for the schedule_interval parameter of the DAG constructor indicates that a relativedelta object is allowed, so I expected the DAG to be correctly parsed and scheduled.
### How to reproduce
Create a DAG that has a relativedelta object as its schedule interval, and it will not appear in the UI or be scheduled.
### Anything else
Here is the code that causes the failure within the PR where it was introduced: [link](https://github.com/apache/airflow/pull/17414/files#diff-ed37fe966e8247e0bfd8aa28bc2698febeec3807df5f5a00545ca80744f8aff6R267)
Here are the logs for the exception, found in the scheduler logs for the file that contains the offending DAG
<details><pre>
ERROR | {dagbag.py:528} - 'relativedelta' object has no attribute 'total_seconds'
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 515, in collect_dags
found_dags = self.process_file(filepath, only_if_updated=only_if_updated, safe_mode=safe_mode)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 298, in process_file
found_dags = self._process_modules(filepath, mods, file_last_changed_on_disk)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 401, in _process_modules
dag.timetable.validate()
File "/usr/local/lib/python3.9/site-packages/airflow/timetables/interval.py", line 274, in validate
if self._delta.total_seconds() <= 0:
AttributeError: 'relativedelta' object has no attribute 'total_seconds'
</pre></details>
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| 2021-11-05T02:37:03Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 515, in collect_dags
found_dags = self.process_file(filepath, only_if_updated=only_if_updated, safe_mode=safe_mode)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 298, in process_file
found_dags = self._process_modules(filepath, mods, file_last_changed_on_disk)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 401, in _process_modules
dag.timetable.validate()
File "/usr/local/lib/python3.9/site-packages/airflow/timetables/interval.py", line 274, in validate
if self._delta.total_seconds() <= 0:
AttributeError: 'relativedelta' object has no attribute 'total_seconds'
| 2,448 |
||||
apache/airflow | apache__airflow-19481 | 360474fff3738f70e95580a91e778250afa7ce82 | diff --git a/airflow/api/common/experimental/trigger_dag.py b/airflow/api/common/experimental/trigger_dag.py
--- a/airflow/api/common/experimental/trigger_dag.py
+++ b/airflow/api/common/experimental/trigger_dag.py
@@ -68,10 +68,12 @@ def _trigger_dag(
)
run_id = run_id or DagRun.generate_run_id(DagRunType.MANUAL, execution_date)
- dag_run = DagRun.find(dag_id=dag_id, run_id=run_id)
+ dag_run = DagRun.find_duplicate(dag_id=dag_id, execution_date=execution_date, run_id=run_id)
if dag_run:
- raise DagRunAlreadyExists(f"Run id {run_id} already exists for dag id {dag_id}")
+ raise DagRunAlreadyExists(
+ f"A Dag Run already exists for dag id {dag_id} at {execution_date} with run id {run_id}"
+ )
run_conf = None
if conf:
diff --git a/airflow/cli/commands/standalone_command.py b/airflow/cli/commands/standalone_command.py
--- a/airflow/cli/commands/standalone_command.py
+++ b/airflow/cli/commands/standalone_command.py
@@ -228,7 +228,10 @@ def job_running(self, job):
Checks if the given job name is running and heartbeating correctly
(used to tell if scheduler is alive)
"""
- return job.most_recent_job().is_alive()
+ recent = job.most_recent_job()
+ if not recent:
+ return False
+ return recent.is_alive()
def print_ready(self):
"""
diff --git a/airflow/dag_processing/processor.py b/airflow/dag_processing/processor.py
--- a/airflow/dag_processing/processor.py
+++ b/airflow/dag_processing/processor.py
@@ -414,16 +414,21 @@ def manage_slas(self, dag: DAG, session: Session = None) -> None:
sla_misses = []
next_info = dag.next_dagrun_info(dag.get_run_data_interval(ti.dag_run), restricted=False)
- while next_info.logical_date < ts:
- next_info = dag.next_dagrun_info(next_info.data_interval, restricted=False)
- if next_info.logical_date + task.sla < ts:
- sla_miss = SlaMiss(
- task_id=ti.task_id,
- dag_id=ti.dag_id,
- execution_date=next_info.logical_date,
- timestamp=ts,
- )
- sla_misses.append(sla_miss)
+ if next_info is None:
+ self.log.info("Skipping SLA check for %s because task does not have scheduled date", ti)
+ else:
+ while next_info.logical_date < ts:
+ next_info = dag.next_dagrun_info(next_info.data_interval, restricted=False)
+ if next_info is None:
+ break
+ if next_info.logical_date + task.sla < ts:
+ sla_miss = SlaMiss(
+ task_id=ti.task_id,
+ dag_id=ti.dag_id,
+ execution_date=next_info.logical_date,
+ timestamp=ts,
+ )
+ sla_misses.append(sla_miss)
if sla_misses:
session.add_all(sla_misses)
session.commit()
diff --git a/airflow/executors/kubernetes_executor.py b/airflow/executors/kubernetes_executor.py
--- a/airflow/executors/kubernetes_executor.py
+++ b/airflow/executors/kubernetes_executor.py
@@ -598,13 +598,17 @@ def sync(self) -> None:
try:
self.kube_scheduler.run_next(task)
except ApiException as e:
- if e.reason == "BadRequest":
- self.log.error("Request was invalid. Failing task")
+
+ # These codes indicate something is wrong with pod definition; otherwise we assume pod
+ # definition is ok, and that retrying may work
+ if e.status in (400, 422):
+ self.log.error("Pod creation failed with reason %r. Failing task", e.reason)
key, _, _, _ = task
self.change_state(key, State.FAILED, e)
else:
self.log.warning(
- 'ApiException when attempting to run task, re-queueing. Message: %s',
+ 'ApiException when attempting to run task, re-queueing. Reason: %r. Message: %s',
+ e.reason,
json.loads(e.body)['message'],
)
self.task_queue.put(task)
diff --git a/airflow/jobs/scheduler_job.py b/airflow/jobs/scheduler_job.py
--- a/airflow/jobs/scheduler_job.py
+++ b/airflow/jobs/scheduler_job.py
@@ -1127,6 +1127,7 @@ def adopt_or_reset_orphaned_tasks(self, session: Session = None):
num_failed = (
session.query(SchedulerJob)
.filter(
+ SchedulerJob.job_type == "SchedulerJob",
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
diff --git a/airflow/migrations/versions/7b2661a43ba3_taskinstance_keyed_to_dagrun.py b/airflow/migrations/versions/7b2661a43ba3_taskinstance_keyed_to_dagrun.py
--- a/airflow/migrations/versions/7b2661a43ba3_taskinstance_keyed_to_dagrun.py
+++ b/airflow/migrations/versions/7b2661a43ba3_taskinstance_keyed_to_dagrun.py
@@ -198,6 +198,9 @@ def upgrade():
if dialect_name == "mysql":
# Mysql creates an index and a constraint -- we have to drop both
batch_op.drop_index('task_reschedule_dag_task_date_fkey')
+ batch_op.alter_column(
+ 'dag_id', existing_type=sa.String(length=ID_LEN), type_=string_id_col_type, nullable=False
+ )
batch_op.drop_index('idx_task_reschedule_dag_task_date')
# Then update the new column by selecting the right value from DagRun
@@ -346,12 +349,12 @@ def downgrade():
batch_op.drop_index('idx_task_reschedule_dag_task_run')
with op.batch_alter_table('task_instance', schema=None) as batch_op:
+ batch_op.drop_constraint('task_instance_pkey', type_='primary')
batch_op.alter_column('execution_date', existing_type=dt_type, existing_nullable=True, nullable=False)
batch_op.alter_column(
'dag_id', existing_type=string_id_col_type, existing_nullable=True, nullable=True
)
- batch_op.drop_constraint('task_instance_pkey', type_='primary')
batch_op.create_primary_key('task_instance_pkey', ['dag_id', 'task_id', 'execution_date'])
batch_op.drop_constraint('task_instance_dag_run_fkey', type_='foreignkey')
@@ -415,11 +418,11 @@ def downgrade():
)
else:
with op.batch_alter_table('dag_run', schema=None) as batch_op:
- batch_op.drop_index('dag_id_state', table_name='dag_run')
+ batch_op.drop_index('dag_id_state')
batch_op.alter_column('run_id', existing_type=sa.VARCHAR(length=250), nullable=True)
batch_op.alter_column('execution_date', existing_type=dt_type, nullable=True)
batch_op.alter_column('dag_id', existing_type=sa.VARCHAR(length=250), nullable=True)
- batch_op.create_index('dag_id_state', 'dag_run', ['dag_id', 'state'], unique=False)
+ batch_op.create_index('dag_id_state', ['dag_id', 'state'], unique=False)
def _multi_table_update(dialect_name, target, column):
diff --git a/airflow/models/dag.py b/airflow/models/dag.py
--- a/airflow/models/dag.py
+++ b/airflow/models/dag.py
@@ -198,6 +198,9 @@ class DAG(LoggingMixin):
:type schedule_interval: datetime.timedelta or
dateutil.relativedelta.relativedelta or str that acts as a cron
expression
+ :param timetable: Specify which timetable to use (in which case schedule_interval
+ must not be set). See :doc:`/howto/timetable` for more information
+ :type timetable: airflow.timetables.base.Timetable
:param start_date: The timestamp from which the scheduler will
attempt to backfill
:type start_date: datetime.datetime
@@ -607,12 +610,12 @@ def previous_schedule(self, dttm):
return None
return self.timetable._get_prev(timezone.coerce_datetime(dttm))
- def get_next_data_interval(self, dag_model: "DagModel") -> DataInterval:
+ def get_next_data_interval(self, dag_model: "DagModel") -> Optional[DataInterval]:
"""Get the data interval of the next scheduled run.
For compatibility, this method infers the data interval from the DAG's
- schedule if the run does not have an explicit one set, which is possible for
- runs created prior to AIP-39.
+ schedule if the run does not have an explicit one set, which is possible
+ for runs created prior to AIP-39.
This function is private to Airflow core and should not be depended as a
part of the Python API.
@@ -621,11 +624,14 @@ def get_next_data_interval(self, dag_model: "DagModel") -> DataInterval:
"""
if self.dag_id != dag_model.dag_id:
raise ValueError(f"Arguments refer to different DAGs: {self.dag_id} != {dag_model.dag_id}")
+ if dag_model.next_dagrun is None: # Next run not scheduled.
+ return None
data_interval = dag_model.next_dagrun_data_interval
if data_interval is not None:
return data_interval
- # Compatibility: runs scheduled before AIP-39 implementation don't have an
- # explicit data interval. Try to infer from the logical date.
+ # Compatibility: A run was scheduled without an explicit data interval.
+ # This means the run was scheduled before AIP-39 implementation. Try to
+ # infer from the logical date.
return self.infer_automated_data_interval(dag_model.next_dagrun)
def get_run_data_interval(self, run: DagRun) -> DataInterval:
diff --git a/airflow/models/dagrun.py b/airflow/models/dagrun.py
--- a/airflow/models/dagrun.py
+++ b/airflow/models/dagrun.py
@@ -285,12 +285,13 @@ def next_dagruns_to_examine(
query.limit(max_number), of=cls, session=session, **skip_locked(session=session)
)
- @staticmethod
+ @classmethod
@provide_session
def find(
+ cls,
dag_id: Optional[Union[str, List[str]]] = None,
run_id: Optional[str] = None,
- execution_date: Optional[datetime] = None,
+ execution_date: Optional[Union[datetime, List[datetime]]] = None,
state: Optional[DagRunState] = None,
external_trigger: Optional[bool] = None,
no_backfills: bool = False,
@@ -324,35 +325,65 @@ def find(
:param execution_end_date: dag run that was executed until this date
:type execution_end_date: datetime.datetime
"""
- DR = DagRun
-
- qry = session.query(DR)
+ qry = session.query(cls)
dag_ids = [dag_id] if isinstance(dag_id, str) else dag_id
if dag_ids:
- qry = qry.filter(DR.dag_id.in_(dag_ids))
+ qry = qry.filter(cls.dag_id.in_(dag_ids))
if run_id:
- qry = qry.filter(DR.run_id == run_id)
+ qry = qry.filter(cls.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
- qry = qry.filter(DR.execution_date.in_(execution_date))
+ qry = qry.filter(cls.execution_date.in_(execution_date))
else:
- qry = qry.filter(DR.execution_date == execution_date)
+ qry = qry.filter(cls.execution_date == execution_date)
if execution_start_date and execution_end_date:
- qry = qry.filter(DR.execution_date.between(execution_start_date, execution_end_date))
+ qry = qry.filter(cls.execution_date.between(execution_start_date, execution_end_date))
elif execution_start_date:
- qry = qry.filter(DR.execution_date >= execution_start_date)
+ qry = qry.filter(cls.execution_date >= execution_start_date)
elif execution_end_date:
- qry = qry.filter(DR.execution_date <= execution_end_date)
+ qry = qry.filter(cls.execution_date <= execution_end_date)
if state:
- qry = qry.filter(DR.state == state)
+ qry = qry.filter(cls.state == state)
if external_trigger is not None:
- qry = qry.filter(DR.external_trigger == external_trigger)
+ qry = qry.filter(cls.external_trigger == external_trigger)
if run_type:
- qry = qry.filter(DR.run_type == run_type)
+ qry = qry.filter(cls.run_type == run_type)
if no_backfills:
- qry = qry.filter(DR.run_type != DagRunType.BACKFILL_JOB)
+ qry = qry.filter(cls.run_type != DagRunType.BACKFILL_JOB)
+
+ return qry.order_by(cls.execution_date).all()
+
+ @classmethod
+ @provide_session
+ def find_duplicate(
+ cls,
+ dag_id: str,
+ run_id: str,
+ execution_date: datetime,
+ session: Session = None,
+ ) -> Optional['DagRun']:
+ """
+ Return an existing run for the DAG with a specific run_id or execution_date.
- return qry.order_by(DR.execution_date).all()
+ *None* is returned if no such DAG run is found.
+
+ :param dag_id: the dag_id to find duplicates for
+ :type dag_id: str
+ :param run_id: defines the run id for this dag run
+ :type run_id: str
+ :param execution_date: the execution date
+ :type execution_date: datetime.datetime
+ :param session: database session
+ :type session: sqlalchemy.orm.session.Session
+ """
+ return (
+ session.query(cls)
+ .filter(
+ cls.dag_id == dag_id,
+ or_(cls.run_id == run_id, cls.execution_date == execution_date),
+ )
+ .one_or_none()
+ )
@staticmethod
def generate_run_id(run_type: DagRunType, execution_date: datetime) -> str:
diff --git a/airflow/models/param.py b/airflow/models/param.py
--- a/airflow/models/param.py
+++ b/airflow/models/param.py
@@ -14,7 +14,6 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-
from typing import Any, Dict, Optional
import jsonschema
@@ -49,6 +48,7 @@ class Param:
"""
__NO_VALUE_SENTINEL = NoValueSentinel()
+ CLASS_IDENTIFIER = '__class'
def __init__(self, default: Any = __NO_VALUE_SENTINEL, description: str = None, **kwargs):
self.value = default
@@ -90,7 +90,7 @@ def resolve(self, value: Optional[Any] = __NO_VALUE_SENTINEL, suppress_exception
def dump(self) -> dict:
"""Dump the Param as a dictionary"""
- out_dict = {'__class': f'{self.__module__}.{self.__class__.__name__}'}
+ out_dict = {self.CLASS_IDENTIFIER: f'{self.__module__}.{self.__class__.__name__}'}
out_dict.update(self.__dict__)
return out_dict
diff --git a/airflow/models/variable.py b/airflow/models/variable.py
--- a/airflow/models/variable.py
+++ b/airflow/models/variable.py
@@ -197,7 +197,7 @@ def update(
"""
cls.check_for_write_conflict(key)
- if cls.get_variable_from_secrets(key) is None:
+ if cls.get_variable_from_secrets(key=key) is None:
raise KeyError(f'Variable {key} does not exist')
obj = session.query(cls).filter(cls.key == key).first()
@@ -223,6 +223,7 @@ def rotate_fernet_key(self):
if self._val and self.is_encrypted:
self._val = fernet.rotate(self._val.encode('utf-8')).decode()
+ @staticmethod
def check_for_write_conflict(key: str) -> None:
"""
Logs a warning if a variable exists outside of the metastore.
diff --git a/airflow/serialization/serialized_objects.py b/airflow/serialization/serialized_objects.py
--- a/airflow/serialization/serialized_objects.py
+++ b/airflow/serialization/serialized_objects.py
@@ -55,7 +55,6 @@
except ImportError:
HAS_KUBERNETES = False
-
if TYPE_CHECKING:
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
@@ -325,7 +324,7 @@ def _serialize(cls, var: Any) -> Any: # Unfortunately there is no support for r
elif isinstance(var, TaskGroup):
return SerializedTaskGroup.serialize_task_group(var)
elif isinstance(var, Param):
- return cls._encode(var.dump(), type_=DAT.PARAM)
+ return cls._encode(cls._serialize_param(var), type_=DAT.PARAM)
else:
log.debug('Cast type %s to str in serialization.', type(var))
return str(var)
@@ -368,9 +367,7 @@ def _deserialize(cls, encoded_var: Any) -> Any:
elif type_ == DAT.TUPLE:
return tuple(cls._deserialize(v) for v in var)
elif type_ == DAT.PARAM:
- param_class = import_string(var['_type'])
- del var['_type']
- return param_class(**var)
+ return cls._deserialize_param(var)
else:
raise TypeError(f'Invalid type {type_!s} in deserialization.')
@@ -409,6 +406,38 @@ def _value_is_hardcoded_default(cls, attrname: str, value: Any, instance: Any) -
return True
return False
+ @classmethod
+ def _serialize_param(cls, param: Param):
+ return dict(
+ __class=f"{param.__module__}.{param.__class__.__name__}",
+ default=cls._serialize(param.value),
+ description=cls._serialize(param.description),
+ schema=cls._serialize(param.schema),
+ )
+
+ @classmethod
+ def _deserialize_param(cls, param_dict: Dict):
+ """
+ In 2.2.0, Param attrs were assumed to be json-serializable and were not run through
+ this class's ``_serialize`` method. So before running through ``_deserialize``,
+ we first verify that it's necessary to do.
+ """
+ class_name = param_dict['__class']
+ class_ = import_string(class_name) # type: Type[Param]
+ attrs = ('default', 'description', 'schema')
+ kwargs = {}
+ for attr in attrs:
+ if attr not in param_dict:
+ continue
+ val = param_dict[attr]
+ is_serialized = isinstance(val, dict) and '__type' in val
+ if is_serialized:
+ deserialized_val = cls._deserialize(param_dict[attr])
+ kwargs[attr] = deserialized_val
+ else:
+ kwargs[attr] = val
+ return class_(**kwargs)
+
@classmethod
def _serialize_params_dict(cls, params: ParamsDict):
"""Serialize Params dict for a DAG/Task"""
@@ -416,23 +445,20 @@ def _serialize_params_dict(cls, params: ParamsDict):
for k, v in params.items():
# TODO: As of now, we would allow serialization of params which are of type Param only
if f'{v.__module__}.{v.__class__.__name__}' == 'airflow.models.param.Param':
- kwargs = v.dump()
- kwargs['default'] = kwargs.pop('value')
- serialized_params[k] = kwargs
+ serialized_params[k] = cls._serialize_param(v)
else:
raise ValueError('Params to a DAG or a Task can be only of type airflow.models.param.Param')
return serialized_params
@classmethod
def _deserialize_params_dict(cls, encoded_params: Dict) -> ParamsDict:
- """Deserialize a DAGs Params dict"""
+ """Deserialize a DAG's Params dict"""
op_params = {}
for k, v in encoded_params.items():
if isinstance(v, dict) and "__class" in v:
- param_class = import_string(v['__class'])
- op_params[k] = param_class(**v)
+ op_params[k] = cls._deserialize_param(v)
else:
- # Old style params, upgrade it
+ # Old style params, convert it
op_params[k] = Param(v)
return ParamsDict(op_params)
diff --git a/airflow/ti_deps/deps/pool_slots_available_dep.py b/airflow/ti_deps/deps/pool_slots_available_dep.py
--- a/airflow/ti_deps/deps/pool_slots_available_dep.py
+++ b/airflow/ti_deps/deps/pool_slots_available_dep.py
@@ -55,7 +55,7 @@ def _get_dep_statuses(self, ti, session, dep_context=None):
else:
# Controlled by UNIQUE key in slot_pool table,
# only one result can be returned.
- open_slots = pools[0].open_slots()
+ open_slots = pools[0].open_slots(session=session)
if ti.state in EXECUTION_STATES:
open_slots += ti.pool_slots
diff --git a/airflow/timetables/interval.py b/airflow/timetables/interval.py
--- a/airflow/timetables/interval.py
+++ b/airflow/timetables/interval.py
@@ -271,8 +271,9 @@ def serialize(self) -> Dict[str, Any]:
return {"delta": delta}
def validate(self) -> None:
- if self._delta.total_seconds() <= 0:
- raise AirflowTimetableInvalid("schedule interval must be positive")
+ now = datetime.datetime.now()
+ if (now + self._delta) <= now:
+ raise AirflowTimetableInvalid(f"schedule interval must be positive, not {self._delta!r}")
def _get_next(self, current: DateTime) -> DateTime:
return convert_to_utc(current + self._delta)
diff --git a/airflow/utils/cli.py b/airflow/utils/cli.py
--- a/airflow/utils/cli.py
+++ b/airflow/utils/cli.py
@@ -177,8 +177,7 @@ def get_dag_by_file_location(dag_id: str):
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise AirflowException(
- 'dag_id could not be found: {}. Either the dag did not exist or it failed to '
- 'parse.'.format(dag_id)
+ f"Dag {dag_id!r} could not be found; either it does not exist or it failed to parse."
)
dagbag = DagBag(dag_folder=dag_model.fileloc)
return dagbag.dags[dag_id]
@@ -191,8 +190,7 @@ def get_dag(subdir: Optional[str], dag_id: str) -> "DAG":
dagbag = DagBag(process_subdir(subdir))
if dag_id not in dagbag.dags:
raise AirflowException(
- 'dag_id could not be found: {}. Either the dag did not exist or it failed to '
- 'parse.'.format(dag_id)
+ f"Dag {dag_id!r} could not be found; either it does not exist or it failed to parse."
)
return dagbag.dags[dag_id]
diff --git a/airflow/utils/db.py b/airflow/utils/db.py
--- a/airflow/utils/db.py
+++ b/airflow/utils/db.py
@@ -18,6 +18,7 @@
import logging
import os
import time
+from tempfile import gettempdir
from typing import Iterable
from sqlalchemy import Table, exc, func, inspect, or_, text
@@ -508,7 +509,7 @@ def create_default_connections(session=None):
Connection(
conn_id="sqlite_default",
conn_type="sqlite",
- host="/tmp/sqlite_default.db",
+ host=os.path.join(gettempdir(), "sqlite_default.db"),
),
session,
)
@@ -708,10 +709,55 @@ def _format_dangling_error(source_table, target_table, invalid_count, reason):
)
-def _move_dangling_run_data_to_new_table(session, source_table, target_table):
+def _move_dangling_run_data_to_new_table(session, source_table: "Table", target_table_name: str):
where_clause = "where dag_id is null or run_id is null or execution_date is null"
- session.execute(text(f"create table {target_table} as select * from {source_table} {where_clause}"))
- session.execute(text(f"delete from {source_table} {where_clause}"))
+ _move_dangling_table(session, source_table, target_table_name, where_clause)
+
+
+def _move_dangling_table(session, source_table: "Table", target_table_name: str, where_clause: str):
+ dialect_name = session.get_bind().dialect.name
+
+ delete_where = " AND ".join(
+ f"{source_table.name}.{c.name} = d.{c.name}" for c in source_table.primary_key.columns
+ )
+ if dialect_name == "mssql":
+ session.execute(
+ text(f"select source.* into {target_table_name} from {source_table} as source {where_clause}")
+ )
+ session.execute(
+ text(
+ f"delete from {source_table} from {source_table} join {target_table_name} AS d ON "
+ + delete_where
+ )
+ )
+ else:
+ # Postgres, MySQL and SQLite all have the same CREATE TABLE a AS SELECT ... syntax
+ session.execute(
+ text(
+ f"create table {target_table_name} as select source.* from {source_table} as source "
+ + where_clause
+ )
+ )
+
+ # But different join-delete syntax.
+ if dialect_name == "mysql":
+ session.execute(
+ text(
+ f"delete {source_table} from {source_table} join {target_table_name} as d on "
+ + delete_where
+ )
+ )
+ elif dialect_name == "sqlite":
+ session.execute(
+ text(
+ f"delete from {source_table} where ROWID in (select {source_table}.ROWID from "
+ f"{source_table} as source join {target_table_name} as d on {delete_where})"
+ )
+ )
+ else:
+ session.execute(
+ text(f"delete from {source_table} using {target_table_name} as d where {delete_where}")
+ )
def check_run_id_null(session) -> Iterable[str]:
@@ -719,7 +765,7 @@ def check_run_id_null(session) -> Iterable[str]:
metadata = sqlalchemy.schema.MetaData(session.bind)
try:
- metadata.reflect(only=[DagRun.__tablename__])
+ metadata.reflect(only=[DagRun.__tablename__], extend_existing=True, resolve_fks=False)
except exc.InvalidRequestError:
# Table doesn't exist -- empty db
return
@@ -744,21 +790,16 @@ def check_run_id_null(session) -> Iterable[str]:
reason="with a NULL dag_id, run_id, or execution_date",
)
return
- _move_dangling_run_data_to_new_table(session, dagrun_table.name, dagrun_dangling_table_name)
+ _move_dangling_run_data_to_new_table(session, dagrun_table, dagrun_dangling_table_name)
-def _move_dangling_task_data_to_new_table(session, source_table, target_table):
- where_clause = f"""
- where (task_id, dag_id, execution_date) IN (
- select source.task_id, source.dag_id, source.execution_date
- from {source_table} as source
- left join dag_run as dr
- on (source.dag_id = dr.dag_id and source.execution_date = dr.execution_date)
- where dr.id is null
- )
+def _move_dangling_task_data_to_new_table(session, source_table: "Table", target_table_name: str):
+ where_clause = """
+ left join dag_run as dr
+ on (source.dag_id = dr.dag_id and source.execution_date = dr.execution_date)
+ where dr.id is null
"""
- session.execute(text(f"create table {target_table} as select * from {source_table} {where_clause}"))
- session.execute(text(f"delete from {source_table} {where_clause}"))
+ _move_dangling_table(session, source_table, target_table_name, where_clause)
def check_task_tables_without_matching_dagruns(session) -> Iterable[str]:
@@ -769,7 +810,7 @@ def check_task_tables_without_matching_dagruns(session) -> Iterable[str]:
models_to_dagrun = [TaskInstance, TaskReschedule]
for model in models_to_dagrun + [DagRun]:
try:
- metadata.reflect(only=[model.__tablename__])
+ metadata.reflect(only=[model.__tablename__], extend_existing=True, resolve_fks=False)
except exc.InvalidRequestError:
# Table doesn't exist, but try the other ones incase the user is upgrading from an _old_ DB
# version
@@ -820,7 +861,7 @@ def check_task_tables_without_matching_dagruns(session) -> Iterable[str]:
)
errored = True
continue
- _move_dangling_task_data_to_new_table(session, source_table.name, dangling_table_name)
+ _move_dangling_task_data_to_new_table(session, source_table, dangling_table_name)
if errored:
session.rollback()
@@ -841,6 +882,8 @@ def _check_migration_errors(session=None) -> Iterable[str]:
check_task_tables_without_matching_dagruns,
):
yield from check_fn(session)
+ # Ensure there is no "active" transaction. Seems odd, but without this MSSQL can hang
+ session.commit()
@provide_session
diff --git a/airflow/www/security.py b/airflow/www/security.py
--- a/airflow/www/security.py
+++ b/airflow/www/security.py
@@ -553,7 +553,7 @@ def clean_perms(self):
perms = sesh.query(sqla_models.PermissionView).filter(
or_(
sqla_models.PermissionView.permission == None, # noqa
- sqla_models.PermissionView.view_menu == None,
+ sqla_models.PermissionView.view_menu == None, # noqa
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
diff --git a/airflow/www/views.py b/airflow/www/views.py
--- a/airflow/www/views.py
+++ b/airflow/www/views.py
@@ -2864,6 +2864,7 @@ def gantt(self, session=None):
task_dict['end_date'] = task_dict['end_date'] or timezone.utcnow()
task_dict['extraLinks'] = dag.get_task(ti.task_id).extra_links
task_dict['try_number'] = try_count
+ task_dict['execution_date'] = dttm
tasks.append(task_dict)
tf_count = 0
@@ -2885,6 +2886,7 @@ def gantt(self, session=None):
task_dict['operator'] = task.task_type
task_dict['try_number'] = try_count
task_dict['extraLinks'] = task.extra_links
+ task_dict['execution_date'] = dttm
tasks.append(task_dict)
task_names = [ti.task_id for ti in tis]
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,7 @@
logger = logging.getLogger(__name__)
-version = '2.2.1'
+version = '2.2.2'
my_dir = dirname(__file__)
| A dag's schedule interval can no longer be an instance of dateutils.relativedelta
### Apache Airflow version
2.2.1 (latest released)
### Operating System
debian
### Versions of Apache Airflow Providers
apache-airflow==2.2.1
apache-airflow-providers-amazon==2.3.0
apache-airflow-providers-ftp==2.0.1
apache-airflow-providers-google==6.0.0
apache-airflow-providers-http==2.0.1
apache-airflow-providers-imap==2.0.1
apache-airflow-providers-jira==2.0.1
apache-airflow-providers-mysql==2.1.1
apache-airflow-providers-postgres==2.3.0
apache-airflow-providers-redis==2.0.1
apache-airflow-providers-sqlite==2.0.1
apache-airflow-providers-ssh==2.2.0
### Deployment
Other Docker-based deployment
### Deployment details
Dask executor, custom-built Docker images, postgres 12.7 backend
### What happened
I upgraded Airflow from 2.0.2 to 2.2.1, and some DAGs I have that used dateutils.relativedelta objects as schedule intervals stopped running
### What you expected to happen
The [code](https://github.com/apache/airflow/blob/2.2.1/airflow/models/dag.py#L101) for the schedule_interval parameter of the DAG constructor indicates that a relativedelta object is allowed, so I expected the DAG to be correctly parsed and scheduled.
### How to reproduce
Create a DAG that has a relativedelta object as its schedule interval, and it will not appear in the UI or be scheduled.
### Anything else
Here is the code that causes the failure within the PR where it was introduced: [link](https://github.com/apache/airflow/pull/17414/files#diff-ed37fe966e8247e0bfd8aa28bc2698febeec3807df5f5a00545ca80744f8aff6R267)
Here are the logs for the exception, found in the scheduler logs for the file that contains the offending DAG
<details><pre>
ERROR | {dagbag.py:528} - 'relativedelta' object has no attribute 'total_seconds'
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 515, in collect_dags
found_dags = self.process_file(filepath, only_if_updated=only_if_updated, safe_mode=safe_mode)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 298, in process_file
found_dags = self._process_modules(filepath, mods, file_last_changed_on_disk)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 401, in _process_modules
dag.timetable.validate()
File "/usr/local/lib/python3.9/site-packages/airflow/timetables/interval.py", line 274, in validate
if self._delta.total_seconds() <= 0:
AttributeError: 'relativedelta' object has no attribute 'total_seconds'
</pre></details>
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| 2021-11-08T20:22:48Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 515, in collect_dags
found_dags = self.process_file(filepath, only_if_updated=only_if_updated, safe_mode=safe_mode)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 298, in process_file
found_dags = self._process_modules(filepath, mods, file_last_changed_on_disk)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 401, in _process_modules
dag.timetable.validate()
File "/usr/local/lib/python3.9/site-packages/airflow/timetables/interval.py", line 274, in validate
if self._delta.total_seconds() <= 0:
AttributeError: 'relativedelta' object has no attribute 'total_seconds'
| 2,451 |
||||
apache/airflow | apache__airflow-19668 | 9a246d3fa30439fb2240458dbb220c24214b4831 | diff --git a/airflow/providers/microsoft/azure/operators/container_instances.py b/airflow/providers/microsoft/azure/operators/container_instances.py
--- a/airflow/providers/microsoft/azure/operators/container_instances.py
+++ b/airflow/providers/microsoft/azure/operators/container_instances.py
@@ -199,7 +199,7 @@ def execute(self, context: dict) -> int:
# Check name again in case it was templated.
self._check_name(self.name)
- self._ci_hook = AzureContainerInstanceHook(self.ci_conn_id)
+ self._ci_hook = AzureContainerInstanceHook(conn_id=self.ci_conn_id)
if self.fail_if_exists:
self.log.info("Testing if container group already exists")
| AzureContainerInstancesOperator is not working due to argument error
### Apache Airflow Provider(s)
microsoft-azure
### Versions of Apache Airflow Providers
3.1.0
### Apache Airflow version
2.1.3 (latest released)
### Operating System
Ubuntu
### Deployment
Docker-Compose
### Deployment details
_No response_
### What happened
I'm getting this error when trying to run AzureContainerInstancesOperator. I believe its due to AzureContainerInstanceHook initialization when passing self.ci_conn_id as a positional argument.
https://github.com/apache/airflow/blob/79d85573591f641db4b5f89a12213e799ec6dea1/airflow/providers/microsoft/azure/operators/azure_container_instances.py#L202
Should this be?
```py
self._ci_hook = AzureContainerInstanceHook(conn_id=self.ci_conn_id)
```
Error:
```
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1164, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1282, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1312, in _execute_task
result = task_copy.execute(context=context)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/providers/microsoft/azure/operators/azure_container_instances.py", line 202, in execute
self._ci_hook = AzureContainerInstanceHook(self.ci_conn_id)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/providers/microsoft/azure/hooks/azure_container_instance.py", line 45, in __init__
super().__init__(sdk_client=ContainerInstanceManagementClient, *args, **kwargs)
TypeError: __init__() got multiple values for argument 'sdk_client'
```
### What you expected to happen
_No response_
### How to reproduce
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
@binhnefits assigned to you
@binhnefits are you still working on this issue?
I can also help with this one.
We are facing the same issue.
Would be great if this could be fixed.
| 2021-11-18T03:12:42Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1164, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1282, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1312, in _execute_task
result = task_copy.execute(context=context)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/providers/microsoft/azure/operators/azure_container_instances.py", line 202, in execute
self._ci_hook = AzureContainerInstanceHook(self.ci_conn_id)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/providers/microsoft/azure/hooks/azure_container_instance.py", line 45, in __init__
super().__init__(sdk_client=ContainerInstanceManagementClient, *args, **kwargs)
TypeError: __init__() got multiple values for argument 'sdk_client'
| 2,454 |
|||
apache/airflow | apache__airflow-19821 | 314a4fe0050783ebb43b300c4c950667d1ddaa89 | diff --git a/airflow/sensors/base.py b/airflow/sensors/base.py
--- a/airflow/sensors/base.py
+++ b/airflow/sensors/base.py
@@ -17,12 +17,14 @@
# under the License.
import datetime
+import functools
import hashlib
import os
import time
from datetime import timedelta
from typing import Any, Callable, Dict, Iterable
+from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import (
AirflowException,
@@ -41,6 +43,16 @@
# See https://github.com/apache/airflow/issues/16035
from airflow.utils.decorators import apply_defaults
+# As documented in https://dev.mysql.com/doc/refman/5.7/en/datetime.html.
+_MYSQL_TIMESTAMP_MAX = datetime.datetime(2038, 1, 19, 3, 14, 7, tzinfo=timezone.utc)
+
+
+@functools.lru_cache(maxsize=None)
+def _is_metadatabase_mysql() -> bool:
+ if settings.engine is None:
+ raise AirflowException("Must initialize ORM first")
+ return settings.engine.url.get_backend_name() == "mysql"
+
class BaseSensorOperator(BaseOperator, SkipMixin):
"""
@@ -125,6 +137,17 @@ def _validate_input_values(self) -> None:
f"The mode must be one of {self.valid_modes},'{self.dag.dag_id if self.has_dag() else ''}.{self.task_id}'; received '{self.mode}'."
)
+ # Sanity check for poke_interval isn't immediately over MySQL's TIMESTAMP limit.
+ # This check is only rudimentary to catch trivial user errors, e.g. mistakenly
+ # set the value to milliseconds instead of seconds. There's another check when
+ # we actually try to reschedule to ensure database sanity.
+ if self.reschedule and _is_metadatabase_mysql():
+ if timezone.utcnow() + datetime.timedelta(seconds=self.poke_interval) > _MYSQL_TIMESTAMP_MAX:
+ raise AirflowException(
+ f"Cannot set poke_interval to {self.poke_interval} seconds in reschedule "
+ f"mode since it will take reschedule time over MySQL's TIMESTAMP limit."
+ )
+
def poke(self, context: Dict) -> bool:
"""
Function that the sensors defined while deriving this class should
@@ -233,9 +256,13 @@ def run_duration() -> float:
else:
raise AirflowSensorTimeout(f"Snap. Time is OUT. DAG id: {log_dag_id}")
if self.reschedule:
- reschedule_date = timezone.utcnow() + timedelta(
- seconds=self._get_next_poke_interval(started_at, run_duration, try_number)
- )
+ next_poke_interval = self._get_next_poke_interval(started_at, run_duration, try_number)
+ reschedule_date = timezone.utcnow() + timedelta(seconds=next_poke_interval)
+ if _is_metadatabase_mysql() and reschedule_date > _MYSQL_TIMESTAMP_MAX:
+ raise AirflowSensorTimeout(
+ f"Cannot reschedule DAG {log_dag_id} to {reschedule_date.isoformat()} "
+ f"since it is over MySQL's TIMESTAMP storage limit."
+ )
raise AirflowRescheduleException(reschedule_date)
else:
time.sleep(self._get_next_poke_interval(started_at, run_duration, try_number))
| Airflow scheduler crashed with TypeError: '>=' not supported between instances of 'datetime.datetime' and 'NoneType'
### Apache Airflow version
2.1.4
### Operating System
Ubuntu 20.04.3 LTS
### Versions of Apache Airflow Providers
_No response_
### Deployment
Other Docker-based deployment
### Deployment details
_No response_
### What happened
Airflow scheduler crashed with following exception
```
[2021-11-23 22:41:16,528] {scheduler_job.py:662} INFO - Starting the scheduler
[2021-11-23 22:41:16,528] {scheduler_job.py:667} INFO - Processing each file at most -1 times
[2021-11-23 22:41:16,639] {manager.py:254} INFO - Launched DagFileProcessorManager with pid: 19
[2021-11-23 22:41:16,641] {scheduler_job.py:1217} INFO - Resetting orphaned tasks for active dag runs
[2021-11-23 22:41:16,644] {settings.py:51} INFO - Configured default timezone Timezone('Etc/GMT-7')
[2021-11-23 22:41:19,016] {scheduler_job.py:711} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/airflow/jobs/scheduler_job.py", line 695, in _execute
self._run_scheduler_loop()
File "/usr/local/lib/python3.8/dist-packages/airflow/jobs/scheduler_job.py", line 788, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/usr/local/lib/python3.8/dist-packages/airflow/jobs/scheduler_job.py", line 901, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/usr/local/lib/python3.8/dist-packages/airflow/jobs/scheduler_job.py", line 1143, in _schedule_dag_run
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
File "/usr/local/lib/python3.8/dist-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/airflow/models/dagrun.py", line 438, in update_state
info = self.task_instance_scheduling_decisions(session)
File "/usr/local/lib/python3.8/dist-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/airflow/models/dagrun.py", line 539, in task_instance_scheduling_decisions
schedulable_tis, changed_tis = self._get_ready_tis(scheduleable_tasks, finished_tasks, session)
File "/usr/local/lib/python3.8/dist-packages/airflow/models/dagrun.py", line 565, in _get_ready_tis
if st.are_dependencies_met(
File "/usr/local/lib/python3.8/dist-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/airflow/models/taskinstance.py", line 890, in are_dependencies_met
for dep_status in self.get_failed_dep_statuses(dep_context=dep_context, session=session):
File "/usr/local/lib/python3.8/dist-packages/airflow/models/taskinstance.py", line 911, in get_failed_dep_statuses
for dep_status in dep.get_dep_statuses(self, session, dep_context):
File "/usr/local/lib/python3.8/dist-packages/airflow/ti_deps/deps/base_ti_dep.py", line 101, in get_dep_statuses
yield from self._get_dep_statuses(ti, session, dep_context)
File "/usr/local/lib/python3.8/dist-packages/airflow/ti_deps/deps/ready_to_reschedule.py", line 66, in _get_dep_statuses
if now >= next_reschedule_date:
TypeError: '>=' not supported between instances of 'datetime.datetime' and 'NoneType'
[2021-11-23 22:41:20,020] {process_utils.py:100} INFO - Sending Signals.SIGTERM to GPID 19
```
### What you expected to happen
_No response_
### How to reproduce
Define a `BaseSensorOperator` task with large `poke_interval` with `reschedule` mode
```
BaseSensorOperator(
task_id='task',
poke_interval=863998946,
mode='reschedule',
dag=dag
)
```
### Anything else
_No response_
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
As per https://dev.mysql.com/doc/refman/5.7/en/datetime.html,
> The TIMESTAMP data type is used for values that contain both date and time parts. TIMESTAMP has a range of '1970-01-01 00:00:01' UTC to '2038-01-19 03:14:07' UTC.
> Invalid DATE, DATETIME, or TIMESTAMP values are converted to the “zero” value of the appropriate type ('0000-00-00' or '0000-00-00 00:00:00'), if the SQL mode permits this conversion
reschedule_date was set to `0000-00-00 00:00:00.000000` since it's exceeded the upper limit.
It's a big problem if the scheduler crash due to problem with a specific DAG. This means that a single DAG causes problems for all the other DAGs.
I'm also interested in why would you want to set interval for more than 9999 days? While crashed scheduler is a big problem the use case of getting there seems theoretical to me so I wouldn't say that this is require an urgent fix.
@eladkal
I generated the dag from code but the interval was mistakenly calculated in milliseconds rather than seconds.
So the value was unintended? In that case I guess we could just invent an arbitrary cap to `poke_interval` and just make the DAG error out on parse. It does not need to be that precise either.
Ugh, MySQLs date "handling" makes me sad.
> Ugh, MySQLs date "handling" makes me sad.
Very much so. | 2021-11-25T09:07:07Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/airflow/jobs/scheduler_job.py", line 695, in _execute
self._run_scheduler_loop()
File "/usr/local/lib/python3.8/dist-packages/airflow/jobs/scheduler_job.py", line 788, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/usr/local/lib/python3.8/dist-packages/airflow/jobs/scheduler_job.py", line 901, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/usr/local/lib/python3.8/dist-packages/airflow/jobs/scheduler_job.py", line 1143, in _schedule_dag_run
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
File "/usr/local/lib/python3.8/dist-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/airflow/models/dagrun.py", line 438, in update_state
info = self.task_instance_scheduling_decisions(session)
File "/usr/local/lib/python3.8/dist-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/airflow/models/dagrun.py", line 539, in task_instance_scheduling_decisions
schedulable_tis, changed_tis = self._get_ready_tis(scheduleable_tasks, finished_tasks, session)
File "/usr/local/lib/python3.8/dist-packages/airflow/models/dagrun.py", line 565, in _get_ready_tis
if st.are_dependencies_met(
File "/usr/local/lib/python3.8/dist-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/airflow/models/taskinstance.py", line 890, in are_dependencies_met
for dep_status in self.get_failed_dep_statuses(dep_context=dep_context, session=session):
File "/usr/local/lib/python3.8/dist-packages/airflow/models/taskinstance.py", line 911, in get_failed_dep_statuses
for dep_status in dep.get_dep_statuses(self, session, dep_context):
File "/usr/local/lib/python3.8/dist-packages/airflow/ti_deps/deps/base_ti_dep.py", line 101, in get_dep_statuses
yield from self._get_dep_statuses(ti, session, dep_context)
File "/usr/local/lib/python3.8/dist-packages/airflow/ti_deps/deps/ready_to_reschedule.py", line 66, in _get_dep_statuses
if now >= next_reschedule_date:
TypeError: '>=' not supported between instances of 'datetime.datetime' and 'NoneType'
| 2,458 |
|||
apache/airflow | apache__airflow-19933 | 5b50d610d4f1288347392fac4a6eaaed78d1bc41 | diff --git a/airflow/task/task_runner/standard_task_runner.py b/airflow/task/task_runner/standard_task_runner.py
--- a/airflow/task/task_runner/standard_task_runner.py
+++ b/airflow/task/task_runner/standard_task_runner.py
@@ -85,12 +85,12 @@ def _start_by_fork(self):
args.func(args, dag=self.dag)
return_code = 0
except Exception:
+ return_code = 1
self.log.exception(
"Failed to execute job %s for task %s",
self._task_instance.job_id,
self._task_instance.task_id,
)
- return_code = 1
finally:
# Explicitly flush any pending exception to Sentry if enabled
Sentry.flush()
| Reference to undeclared variable: "local variable 'return_code' referenced before assignment"
### Apache Airflow version
2.2.1
### Operating System
Ubuntu 20.04 LTS
### Versions of Apache Airflow Providers
apache-airflow-providers-amazon==2.3.0
apache-airflow-providers-apache-cassandra==2.1.0
apache-airflow-providers-ftp==2.0.1
apache-airflow-providers-google==6.0.0
apache-airflow-providers-http==2.0.1
apache-airflow-providers-imap==2.0.1
apache-airflow-providers-jdbc==2.0.1
apache-airflow-providers-mysql==2.1.1
apache-airflow-providers-postgres==2.3.0
apache-airflow-providers-presto==2.0.1
apache-airflow-providers-slack==4.1.0
apache-airflow-providers-sqlite==2.0.1
### Deployment
Virtualenv installation
### Deployment details
_No response_
### What happened
Incorrect "finally" block invokes "UnboundLocalError: local variable 'return_code' referenced before assignment"
Traceback example:
```python
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/task/task_runner/standard_task_runner.py", line 88, in _start_by_fork
self.log.exception(
File "/usr/lib/python3.8/logging/__init__.py", line 1481, in exception
self.error(msg, *args, exc_info=exc_info, **kwargs)
File "/usr/lib/python3.8/logging/__init__.py", line 1475, in error
self._log(ERROR, msg, args, **kwargs)
File "/usr/lib/python3.8/logging/__init__.py", line 1589, in _log
self.handle(record)
File "/usr/lib/python3.8/logging/__init__.py", line 1599, in handle
self.callHandlers(record)
File "/usr/lib/python3.8/logging/__init__.py", line 1661, in callHandlers
hdlr.handle(record)
File "/usr/lib/python3.8/logging/__init__.py", line 950, in handle
rv = self.filter(record)
File "/usr/lib/python3.8/logging/__init__.py", line 811, in filter
result = f.filter(record)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/utils/log/secrets_masker.py", line 167, in filter
self._redact_exception_with_context(exc)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/utils/log/secrets_masker.py", line 150, in _redact_exception_with_context
self._redact_exception_with_context(exception.__context__)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/utils/log/secrets_masker.py", line 150, in _redact_exception_with_context
self._redact_exception_with_context(exception.__context__)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/utils/log/secrets_masker.py", line 148, in _redact_exception_with_context
exception.args = (self.redact(v) for v in exception.args)
AttributeError: can't set attribute
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/executors/celery_executor.py", line 121, in _execute_in_fork
args.func(args)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/utils/cli.py", line 92, in wrapper
return f(*args, **kwargs)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 292, in task_run
_run_task_by_selected_method(args, dag, ti)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 105, in _run_task_by_selected_method
_run_task_by_local_task_job(args, ti)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 163, in _run_task_by_local_task_job
run_job.run()
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/jobs/base_job.py", line 245, in run
self._execute()
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/jobs/local_task_job.py", line 103, in _execute
self.task_runner.start()
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/task/task_runner/standard_task_runner.py", line 41, in start
self.process = self._start_by_fork()
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/task/task_runner/standard_task_runner.py", line 98, in _start_by_fork
os._exit(return_code)
UnboundLocalError: local variable 'return_code' referenced before assignment
```
Bug location:
https://github.com/apache/airflow/blob/2.2.1/airflow/task/task_runner/standard_task_runner.py#L84-L98
Explanation:
Nested exception triggered when we are trying to log exception, so return_code remains undeclared.
### What you expected to happen
return_code variable should be declared
### How to reproduce
It is probably hard to reproduce because you need to have exception in task execution as well as exception in logging function.
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| @a-pertsev maybe you woudl like to make a PR fixing it ?
@potiuk unfortunately i had some problems with using breeze to run tests (
I will make one more try later, maybe
From the traceback it looks like this could be solved just by moving `return_code = 1` before `self.log.exception(...)` (?)
yeap
@a-pertsev I could prepare a PR unless you'd like to? | 2021-12-01T14:17:46Z | [] | [] |
Traceback (most recent call last):
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/executors/celery_executor.py", line 121, in _execute_in_fork
args.func(args)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/utils/cli.py", line 92, in wrapper
return f(*args, **kwargs)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 292, in task_run
_run_task_by_selected_method(args, dag, ti)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 105, in _run_task_by_selected_method
_run_task_by_local_task_job(args, ti)
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 163, in _run_task_by_local_task_job
run_job.run()
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/jobs/base_job.py", line 245, in run
self._execute()
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/jobs/local_task_job.py", line 103, in _execute
self.task_runner.start()
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/task/task_runner/standard_task_runner.py", line 41, in start
self.process = self._start_by_fork()
File "/var/lib/airflow/.venv/lib/python3.8/site-packages/airflow/task/task_runner/standard_task_runner.py", line 98, in _start_by_fork
os._exit(return_code)
UnboundLocalError: local variable 'return_code' referenced before assignment
| 2,463 |
|||
apache/airflow | apache__airflow-20737 | ca4d2a3d088c911861748f281d3009f9b2167591 | diff --git a/airflow/cli/commands/task_command.py b/airflow/cli/commands/task_command.py
--- a/airflow/cli/commands/task_command.py
+++ b/airflow/cli/commands/task_command.py
@@ -16,24 +16,27 @@
# specific language governing permissions and limitations
# under the License.
"""Task sub-commands"""
+import datetime
import importlib
import json
import logging
import os
import textwrap
-from contextlib import contextmanager, redirect_stderr, redirect_stdout, suppress
+from contextlib import contextmanager, redirect_stderr, redirect_stdout
from typing import List, Optional
from pendulum.parsing.exceptions import ParserError
from sqlalchemy.orm.exc import NoResultFound
+from sqlalchemy.orm.session import Session
from airflow import settings
from airflow.cli.simple_table import AirflowConsole
from airflow.configuration import conf
-from airflow.exceptions import AirflowException, DagRunNotFound
+from airflow.exceptions import AirflowException, DagRunNotFound, TaskInstanceNotFound
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagPickle, TaskInstance
+from airflow.models.baseoperator import BaseOperator
from airflow.models.dag import DAG
from airflow.models.dagrun import DagRun
from airflow.models.xcom import IN_MEMORY_DAGRUN_ID
@@ -50,46 +53,82 @@
from airflow.utils.dates import timezone
from airflow.utils.log.logging_mixin import StreamLogWriter
from airflow.utils.net import get_hostname
-from airflow.utils.session import create_session, provide_session
-
-
-def _get_dag_run(dag, exec_date_or_run_id, create_if_necessary, session):
+from airflow.utils.session import NEW_SESSION, create_session, provide_session
+
+
+def _get_dag_run(
+ *,
+ dag: DAG,
+ exec_date_or_run_id: str,
+ create_if_necessary: bool,
+ session: Session,
+) -> DagRun:
+ """Try to retrieve a DAG run from a string representing either a run ID or logical date.
+
+ This checks DAG runs like this:
+
+ 1. If the input ``exec_date_or_run_id`` matches a DAG run ID, return the run.
+ 2. Try to parse the input as a date. If that works, and the resulting
+ date matches a DAG run's logical date, return the run.
+ 3. If ``create_if_necessary`` is *False* and the input works for neither of
+ the above, raise ``DagRunNotFound``.
+ 4. Try to create a new DAG run. If the input looks like a date, use it as
+ the logical date; otherwise use it as a run ID and set the logical date
+ to the current time.
+ """
dag_run = dag.get_dagrun(run_id=exec_date_or_run_id, session=session)
if dag_run:
return dag_run
- execution_date = None
- with suppress(ParserError, TypeError):
- execution_date = timezone.parse(exec_date_or_run_id)
+ try:
+ execution_date: Optional[datetime.datetime] = timezone.parse(exec_date_or_run_id)
+ except (ParserError, TypeError):
+ execution_date = None
- if create_if_necessary and not execution_date:
- return DagRun(dag_id=dag.dag_id, run_id=exec_date_or_run_id)
try:
return (
session.query(DagRun)
- .filter(
- DagRun.dag_id == dag.dag_id,
- DagRun.execution_date == execution_date,
- )
+ .filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date == execution_date)
.one()
)
except NoResultFound:
- if create_if_necessary:
- return DagRun(dag.dag_id, run_id=IN_MEMORY_DAGRUN_ID, execution_date=execution_date)
- raise DagRunNotFound(
- f"DagRun for {dag.dag_id} with run_id or execution_date of {exec_date_or_run_id!r} not found"
- ) from None
+ if not create_if_necessary:
+ raise DagRunNotFound(
+ f"DagRun for {dag.dag_id} with run_id or execution_date of {exec_date_or_run_id!r} not found"
+ ) from None
+
+ if execution_date is not None:
+ return DagRun(dag.dag_id, run_id=IN_MEMORY_DAGRUN_ID, execution_date=execution_date)
+ return DagRun(dag.dag_id, run_id=exec_date_or_run_id, execution_date=timezone.utcnow())
@provide_session
-def _get_ti(task, exec_date_or_run_id, create_if_necessary=False, session=None):
+def _get_ti(
+ task: BaseOperator,
+ exec_date_or_run_id: str,
+ *,
+ create_if_necessary: bool = False,
+ session: Session = NEW_SESSION,
+) -> TaskInstance:
"""Get the task instance through DagRun.run_id, if that fails, get the TI the old way"""
- dag_run = _get_dag_run(task.dag, exec_date_or_run_id, create_if_necessary, session)
+ dag_run = _get_dag_run(
+ dag=task.dag,
+ exec_date_or_run_id=exec_date_or_run_id,
+ create_if_necessary=create_if_necessary,
+ session=session,
+ )
- ti = dag_run.get_task_instance(task.task_id)
- if not ti and create_if_necessary:
+ ti_or_none = dag_run.get_task_instance(task.task_id)
+ if ti_or_none is None:
+ if not create_if_necessary:
+ raise TaskInstanceNotFound(
+ f"TaskInstance for {task.dag.dag_id}, {task.task_id} with "
+ f"run_id or execution_date of {exec_date_or_run_id!r} not found"
+ )
ti = TaskInstance(task, run_id=dag_run.run_id)
ti.dag_run = dag_run
+ else:
+ ti = ti_or_none
ti.refresh_from_task(task)
return ti
| DagRun for <FOO> with run_id or execution_date of 'manual__XXXXX' not found
### Apache Airflow version
2.2.2 (latest released)
### What happened
After upgrading from Airflow 2.1.4 to 2.2.2, every DAG gives this error upon execution:
> [2021-12-17, 15:01:12 UTC] {taskinstance.py:1259} INFO - Executing <Task(_PythonDecoratedOperator): print_the_context> on 2021-12-17 15:01:08.943254+00:00
[2021-12-17, 15:01:12 UTC] {standard_task_runner.py:52} INFO - Started process 873 to run task
[2021-12-17, 15:01:12 UTC] {standard_task_runner.py:76} INFO - Running: ['airflow', 'tasks', 'run', 'example_python_operator', 'print_the_context', 'manual__2021-12-17T15:01:08.943254+00:00', '--job-id', '326', '--raw', '--subdir', 'DAGS_FOLDER/test.py', '--cfg-path', '/tmp/tmpej1imvkr', '--error-file', '/tmp/tmpqn9ad7em']
[2021-12-17, 15:01:12 UTC] {standard_task_runner.py:77} INFO - Job 326: Subtask print_the_context
[2021-12-17, 15:01:12 UTC] {standard_task_runner.py:92} ERROR - Failed to execute job 326 for task print_the_context
Traceback (most recent call last):
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/task/task_runner/standard_task_runner.py", line 85, in _start_by_fork
args.func(args, dag=self.dag)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/utils/cli.py", line 92, in wrapper
return f(*args, **kwargs)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 287, in task_run
ti = _get_ti(task, args.execution_date_or_run_id)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 86, in _get_ti
dag_run = _get_dag_run(task.dag, exec_date_or_run_id, create_if_necssary, session)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 80, in _get_dag_run
) from None
airflow.exceptions.DagRunNotFound: DagRun for example_python_operator with run_id or execution_date of 'manual__2021-12-17T15:01:08.943254+00:00' not found
Both tables `airflowdb.task_instance` and `airflowdb.dag_run` have rows with `run_id` equal to "manual__2021-12-17T15:01:08.943254+00:00".
The issue seems to arise in the `_get_dag_run()` function from [airflow/cli/commands/task_command.py](https://github.com/apache/airflow/blob/bb82cc0fbb7a6630eac1155d0c3b445dff13ceb6/airflow/cli/commands/task_command.py#L61-L72):
```
execution_date = None
with suppress(ParserError, TypeError):
execution_date = timezone.parse(exec_date_or_run_id)
if create_if_necessary and not execution_date:
return DagRun(dag_id=dag.dag_id, run_id=exec_date_or_run_id)
try:
return (
session.query(DagRun)
.filter(
DagRun.dag_id == dag.dag_id,
DagRun.execution_date == execution_date,
)
.one()
)
```
Here, `exec_date_or_run_id == 'manual__2021-12-17T15:01:08.943254+00:00'` and `timezone.parse(exec_date_or_run_id)` fails, meaning `execution_date` stays as `None` and the session query returns no results.
### What you expected to happen
Expect DAGs to run without the above error.
### How to reproduce
Upgraded from 2.1.4 to 2.2.2 and manually ran a few DAGs. The above log is from the [example_python_operator](https://github.com/apache/airflow/blob/main/airflow/example_dags/example_python_operator.py) DAG provided on the Airflow repo.
### Operating System
Amazon Linux 2
### Versions of Apache Airflow Providers
_No response_
### Deployment
Virtualenv installation
### Deployment details
_No response_
### Anything else
Tried `airflow db upgrade` and `airflow db reset` without any luck. The same issue appears on 2.2.3rc2.
Using MySQL 8.0.23.
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| cc @uranusjr Can you take a look when you have time, please? | 2022-01-07T04:43:23Z | [] | [] |
Traceback (most recent call last):
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/task/task_runner/standard_task_runner.py", line 85, in _start_by_fork
args.func(args, dag=self.dag)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/utils/cli.py", line 92, in wrapper
return f(*args, **kwargs)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 287, in task_run
ti = _get_ti(task, args.execution_date_or_run_id)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 86, in _get_ti
dag_run = _get_dag_run(task.dag, exec_date_or_run_id, create_if_necssary, session)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 80, in _get_dag_run
) from None
airflow.exceptions.DagRunNotFound: DagRun for example_python_operator with run_id or execution_date of 'manual__2021-12-17T15:01:08.943254+00:00' not found
| 2,477 |
|||
apache/airflow | apache__airflow-20902 | c59001d79facf7e472e0581ac8a538c25eebfda7 | diff --git a/airflow/migrations/versions/e655c0453f75_add_taskmap_and_map_id_on_taskinstance.py b/airflow/migrations/versions/e655c0453f75_add_taskmap_and_map_id_on_taskinstance.py
--- a/airflow/migrations/versions/e655c0453f75_add_taskmap_and_map_id_on_taskinstance.py
+++ b/airflow/migrations/versions/e655c0453f75_add_taskmap_and_map_id_on_taskinstance.py
@@ -24,7 +24,7 @@
"""
from alembic import op
-from sqlalchemy import Column, ForeignKeyConstraint, Integer
+from sqlalchemy import Column, ForeignKeyConstraint, Integer, text
from airflow.models.base import StringID
from airflow.utils.sqlalchemy import ExtendedJSON
@@ -47,12 +47,12 @@ def upgrade():
with op.batch_alter_table("task_instance") as batch_op:
# I think we always use this name for TaskInstance after 7b2661a43ba3?
batch_op.drop_constraint("task_instance_pkey", type_="primary")
- batch_op.add_column(Column("map_index", Integer, nullable=False, default=-1))
+ batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1")))
batch_op.create_primary_key("task_instance_pkey", ["dag_id", "task_id", "run_id", "map_index"])
# Re-create task_reschedule's constraints.
with op.batch_alter_table("task_reschedule") as batch_op:
- batch_op.add_column(Column("map_index", Integer, nullable=False, default=-1))
+ batch_op.add_column(Column("map_index", Integer, nullable=False, server_default=text("-1")))
batch_op.create_foreign_key(
"task_reschedule_ti_fkey",
"task_instance",
diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -60,6 +60,7 @@
func,
inspect,
or_,
+ text,
tuple_,
)
from sqlalchemy.ext.associationproxy import association_proxy
@@ -343,7 +344,7 @@ class TaskInstance(Base, LoggingMixin):
task_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True, nullable=False)
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True, nullable=False)
run_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True, nullable=False)
- map_index = Column(Integer, primary_key=True, nullable=False, default=-1)
+ map_index = Column(Integer, primary_key=True, nullable=False, server_default=text("-1"))
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
diff --git a/airflow/models/taskreschedule.py b/airflow/models/taskreschedule.py
--- a/airflow/models/taskreschedule.py
+++ b/airflow/models/taskreschedule.py
@@ -20,7 +20,7 @@
import datetime
from typing import TYPE_CHECKING
-from sqlalchemy import Column, ForeignKeyConstraint, Index, Integer, String, asc, desc
+from sqlalchemy import Column, ForeignKeyConstraint, Index, Integer, String, asc, desc, text
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relationship
@@ -41,7 +41,7 @@ class TaskReschedule(Base):
task_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
run_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False)
- map_index = Column(Integer, nullable=False, default=-1)
+ map_index = Column(Integer, nullable=False, server_default=text("-1"))
try_number = Column(Integer, nullable=False)
start_date = Column(UtcDateTime, nullable=False)
end_date = Column(UtcDateTime, nullable=False)
| Airflow database upgrade fails with "psycopg2.errors.NotNullViolation: column "map_index" of relation "task_instance" contains null value"s
### Apache Airflow version
main (development)
### What happened
I currently have Airflow 2.2.3 and due to this [issue](https://github.com/apache/airflow/issues/19699) I have tried to upgrade Airflow to this [commit](https://github.com/apache/airflow/commit/14ee831c7ad767e31a3aeccf3edbc519b3b8c923).
When I run `airflow db upgrade` I get the following error:
```
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
INFO [alembic.runtime.migration] Running upgrade 587bdf053233 -> e655c0453f75, Add TaskMap and map_index on TaskInstance.
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/usr/local/lib/python3.7/dist-packages/sqlalchemy/engine/default.py", line 608, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.NotNullViolation: column "map_index" of relation "task_instance" contains null values
```
The map_index column was introduced with this [PR](https://github.com/apache/airflow/pull/20286).
Could you please advise?
### What you expected to happen
_No response_
### How to reproduce
_No response_
### Operating System
Ubuntu 18.04.6 LTS
### Versions of Apache Airflow Providers
_No response_
### Deployment
Other
### Deployment details
Kubernetes
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
Map Index hasn't been introduced in Airflow 2.2.3 and nor intended to, it is currently available in the main branch and will be released in Airflow 2.3. So it is very likely you somehow used some code from the main branch.
Check https://airflow.apache.org/docs/apache-airflow/stable/migrations-ref.html -- that migration isn't available in 2.2.3
Double verify by running the following:
```
AIRFLOW_VERSION=2.2.3
PYTHON_VERSION="$(python --version | cut -d " " -f 2 | cut -d "." -f 1-2)"
CONSTRAINT_URL="https://raw.githubusercontent.com/apache/airflow/constraints-${AIRFLOW_VERSION}/constraints-${PYTHON_VERSION}.txt"
pip install "apache-airflow[async,postgres,google]==${AIRFLOW_VERSION}" --constraint "${CONSTRAINT_URL}"
```
Thank you for your answer.
Yes, I was using the code from the main branch.
Is there an estimate as to when Airflow 2.3 will be released?
@dcardinha Nothing firm, but our aim is "in about a month" | 2022-01-17T10:51:24Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/usr/local/lib/python3.7/dist-packages/sqlalchemy/engine/default.py", line 608, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.NotNullViolation: column "map_index" of relation "task_instance" contains null values
| 2,480 |
|||
apache/airflow | apache__airflow-21116 | dff536e9409c6fe885aa61402772b946e33dda08 | diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -112,7 +112,7 @@
from airflow.utils.retries import run_with_db_retries
from airflow.utils.session import NEW_SESSION, create_session, provide_session
from airflow.utils.sqlalchemy import ExtendedJSON, UtcDateTime
-from airflow.utils.state import DagRunState, State
+from airflow.utils.state import DagRunState, State, TaskInstanceState
from airflow.utils.timeout import timeout
try:
@@ -201,11 +201,11 @@ def clear_task_instances(
lambda: defaultdict(lambda: defaultdict(set))
)
for ti in tis:
- if ti.state == State.RUNNING:
+ if ti.state == TaskInstanceState.RUNNING:
if ti.job_id:
# If a task is cleared when running, set its state to RESTARTING so that
# the task is terminated and becomes eligible for retry.
- ti.state = State.RESTARTING
+ ti.state = TaskInstanceState.RESTARTING
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
@@ -220,7 +220,7 @@ def clear_task_instances(
# outdated. We make max_tries the maximum value of its
# original max_tries or the last attempted try number.
ti.max_tries = max(ti.max_tries, ti.prev_attempted_tries)
- ti.state = State.NONE
+ ti.state = None
ti.external_executor_id = None
session.merge(ti)
@@ -258,7 +258,7 @@ def clear_task_instances(
from airflow.jobs.base_job import BaseJob
for job in session.query(BaseJob).filter(BaseJob.id.in_(job_ids)).all():
- job.state = State.RESTARTING
+ job.state = TaskInstanceState.RESTARTING
if activate_dag_runs is not None:
warnings.warn(
@@ -287,10 +287,11 @@ def clear_task_instances(
)
.all()
)
+ dag_run_state = DagRunState(dag_run_state) # Validate the state value.
for dr in drs:
dr.state = dag_run_state
dr.start_date = timezone.utcnow()
- if dag_run_state == State.QUEUED:
+ if dag_run_state == DagRunState.QUEUED:
dr.last_scheduling_decision = None
dr.start_date = None
diff --git a/airflow/utils/state.py b/airflow/utils/state.py
--- a/airflow/utils/state.py
+++ b/airflow/utils/state.py
@@ -30,9 +30,11 @@ class TaskInstanceState(str, Enum):
Note that None is also allowed, so always use this in a type hint with Optional.
"""
+ # The scheduler sets a TaskInstance state to None when it's created but not
+ # yet run, but we don't list it here since TaskInstance is a string enum.
+ # Use None instead if need this state.
+
# Set by the scheduler
- # None - Task is created but should not run yet
- NONE = None
REMOVED = "removed" # Task vanished from DAG before it ran
SCHEDULED = "scheduled" # Task should run and will be handed to executor soon
| Running airflow dags backfill --reset-dagruns <dag_id> -s <execution_start_dt> -e <execution_end_dt> results in error when run twice.
### Apache Airflow version
2.2.3 (latest released)
### What happened
It's the same situation as https://github.com/apache/airflow/issues/21023.
Only change to `airflow dags backfill` from `airflow dags test`.
``` bash
(airflow) [www@np-data-eng-airflow-sync001-lde-jp2v-prod ~]$ airflow dags backfill tutorial --reset-dagruns -s 2022-01-20 -e 2022-01-23
You are about to delete these 9 tasks:
<TaskInstance: tutorial.print_date scheduled__2022-01-20T07:48:54.720148+00:00 [success]>
<TaskInstance: tutorial.print_date scheduled__2022-01-21T07:48:54.720148+00:00 [success]>
<TaskInstance: tutorial.print_date scheduled__2022-01-22T07:48:54.720148+00:00 [success]>
<TaskInstance: tutorial.sleep scheduled__2022-01-20T07:48:54.720148+00:00 [success]>
<TaskInstance: tutorial.sleep scheduled__2022-01-21T07:48:54.720148+00:00 [success]>
<TaskInstance: tutorial.sleep scheduled__2022-01-22T07:48:54.720148+00:00 [success]>
<TaskInstance: tutorial.templated scheduled__2022-01-20T07:48:54.720148+00:00 [success]>
<TaskInstance: tutorial.templated scheduled__2022-01-21T07:48:54.720148+00:00 [success]>
<TaskInstance: tutorial.templated scheduled__2022-01-22T07:48:54.720148+00:00 [success]>
Are you sure? (yes/no):
y
Traceback (most recent call last):
File "/home1/www/venv3/airflow/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/__main__.py", line 48, in main
args.func(args)
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/utils/cli.py", line 92, in wrapper
return f(*args, **kwargs)
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/cli/commands/dag_command.py", line 108, in dag_backfill
dag_run_state=State.NONE,
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/models/dag.py", line 1948, in clear_dags
dry_run=False,
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/models/dag.py", line 1887, in clear
dag_run_state=dag_run_state,
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 270, in clear_task_instances
dr.state = dag_run_state
File "<string>", line 1, in __set__
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/models/dagrun.py", line 194, in set_state
raise ValueError(f"invalid DagRun state: {state}")
ValueError: invalid DagRun state: None
```
### What you expected to happen
_No response_
### How to reproduce
1. Setup Airflow 2.2.3
2. Run any dag (in my case, using [tutorial dag file)](https://airflow.apache.org/docs/apache-airflow/stable/tutorial.html).
3. Run again same dag by `airflow dags backfill` command.
### Operating System
CentOS Linux 7.9
### Versions of Apache Airflow Providers
Providers info
apache-airflow-providers-celery | 2.1.0
apache-airflow-providers-cncf-kubernetes | 3.0.1
apache-airflow-providers-ftp | 2.0.1
apache-airflow-providers-http | 2.0.2
apache-airflow-providers-imap | 2.1.0
apache-airflow-providers-mysql | 2.1.1
apache-airflow-providers-redis | 2.0.1
apache-airflow-providers-slack | 4.1.0
apache-airflow-providers-sqlite | 2.0.1
apache-airflow-providers-ssh | 2.3.0
### Deployment
Virtualenv installation
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Feel free to fix ! If you are fast, we can even cherry-pick to 2.2.4 :)
We should also add a check in `clear_task_instances` to catch this mistake, otherwise future regressions are bound to happen.
> We should also add a check in `clear_task_instances` to catch this mistake, otherwise future regressions are bound to happen.
Good point! | 2022-01-26T07:42:40Z | [] | [] |
Traceback (most recent call last):
File "/home1/www/venv3/airflow/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/__main__.py", line 48, in main
args.func(args)
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/utils/cli.py", line 92, in wrapper
return f(*args, **kwargs)
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/cli/commands/dag_command.py", line 108, in dag_backfill
dag_run_state=State.NONE,
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/models/dag.py", line 1948, in clear_dags
dry_run=False,
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, session=session, **kwargs)
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/models/dag.py", line 1887, in clear
dag_run_state=dag_run_state,
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 270, in clear_task_instances
dr.state = dag_run_state
File "<string>", line 1, in __set__
File "/home1/www/venv3/airflow/lib/python3.7/site-packages/airflow/models/dagrun.py", line 194, in set_state
raise ValueError(f"invalid DagRun state: {state}")
ValueError: invalid DagRun state: None
| 2,483 |
|||
apache/airflow | apache__airflow-21289 | dc3c47dacd2a7058358cc5874b0064a064d4c51e | diff --git a/airflow/providers/elasticsearch/log/es_task_handler.py b/airflow/providers/elasticsearch/log/es_task_handler.py
--- a/airflow/providers/elasticsearch/log/es_task_handler.py
+++ b/airflow/providers/elasticsearch/log/es_task_handler.py
@@ -103,15 +103,25 @@ def __init__(
self.handler: Union[logging.FileHandler, logging.StreamHandler] # type: ignore[assignment]
def _render_log_id(self, ti: TaskInstance, try_number: int) -> str:
- dag_run = ti.dag_run
+ dag_run = ti.get_dagrun()
+ try:
+ data_interval: Tuple[datetime, datetime] = ti.task.dag.get_run_data_interval(dag_run)
+ except AttributeError: # ti.task is not always set.
+ data_interval = (dag_run.data_interval_start, dag_run.data_interval_end)
if self.json_format:
- data_interval_start = self._clean_date(dag_run.data_interval_start)
- data_interval_end = self._clean_date(dag_run.data_interval_end)
+ data_interval_start = self._clean_date(data_interval[0])
+ data_interval_end = self._clean_date(data_interval[1])
execution_date = self._clean_date(dag_run.execution_date)
else:
- data_interval_start = dag_run.data_interval_start.isoformat()
- data_interval_end = dag_run.data_interval_end.isoformat()
+ if data_interval[0]:
+ data_interval_start = data_interval[0].isoformat()
+ else:
+ data_interval_start = ""
+ if data_interval[1]:
+ data_interval_end = data_interval[1].isoformat()
+ else:
+ data_interval_end = ""
execution_date = dag_run.execution_date.isoformat()
return self.log_id_template.format(
@@ -125,14 +135,15 @@ def _render_log_id(self, ti: TaskInstance, try_number: int) -> str:
)
@staticmethod
- def _clean_date(value: datetime) -> str:
+ def _clean_date(value: Optional[datetime]) -> str:
"""
Clean up a date value so that it is safe to query in elasticsearch
by removing reserved characters.
- # https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characters
- :param execution_date: execution date of the dag run.
+ https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characters
"""
+ if value is None:
+ return ""
return value.strftime("%Y_%m_%dT%H_%M_%S_%f")
def _group_logs_by_host(self, logs):
diff --git a/airflow/utils/log/file_task_handler.py b/airflow/utils/log/file_task_handler.py
--- a/airflow/utils/log/file_task_handler.py
+++ b/airflow/utils/log/file_task_handler.py
@@ -18,8 +18,9 @@
"""File logging handler for tasks."""
import logging
import os
+from datetime import datetime
from pathlib import Path
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Optional, Tuple
import httpx
from itsdangerous import TimedJSONWebSignatureSerializer
@@ -83,9 +84,25 @@ def _render_filename(self, ti: "TaskInstance", try_number: int) -> str:
context["try_number"] = try_number
return render_template_to_string(self.filename_jinja_template, context)
elif self.filename_template:
+ dag_run = ti.get_dagrun()
+ try:
+ data_interval: Tuple[datetime, datetime] = ti.task.dag.get_run_data_interval(dag_run)
+ except AttributeError: # ti.task is not always set.
+ data_interval = (dag_run.data_interval_start, dag_run.data_interval_end)
+ if data_interval[0]:
+ data_interval_start = data_interval[0].isoformat()
+ else:
+ data_interval_start = ""
+ if data_interval[1]:
+ data_interval_end = data_interval[1].isoformat()
+ else:
+ data_interval_end = ""
return self.filename_template.format(
dag_id=ti.dag_id,
task_id=ti.task_id,
+ run_id=ti.run_id,
+ data_interval_start=data_interval_start,
+ data_interval_end=data_interval_end,
execution_date=ti.get_dagrun().logical_date.isoformat(),
try_number=try_number,
)
| Elasticsearch remote log will not fetch task logs from manual dagruns before 2.2 upgrade
### Apache Airflow Provider(s)
elasticsearch
### Versions of Apache Airflow Providers
```
apache-airflow-providers-amazon==1!2.5.0
apache-airflow-providers-cncf-kubernetes==1!2.1.0
apache-airflow-providers-datadog==1!2.0.1
apache-airflow-providers-elasticsearch==1!2.1.0
apache-airflow-providers-ftp==1!2.0.1
apache-airflow-providers-google==1!6.1.0
apache-airflow-providers-http==1!2.0.1
apache-airflow-providers-imap==1!2.0.1
apache-airflow-providers-microsoft-azure==1!3.3.0
apache-airflow-providers-mysql==1!2.1.1
apache-airflow-providers-postgres==1!2.3.0
apache-airflow-providers-redis==1!2.0.1
apache-airflow-providers-slack==1!4.1.0
apache-airflow-providers-sqlite==1!2.0.1
apache-airflow-providers-ssh==1!2.3.0
```
### Apache Airflow version
2.2.2
### Operating System
Debian Bullseye
### Deployment
Astronomer
### Deployment details
_No response_
### What happened
After upgrading to 2.2, task logs from manual dagruns performed before the upgrade could no longer be retrieved, even though they can still be seen in Kibana. Scheduled dagruns' tasks and tasks for dagruns begun after the upgrade are retrieved without issue.
The issue appears to be because these tasks with missing logs all belong to dagruns that do not have the attribute data_interval_start or data_interval_end set.
### What you expected to happen
Task logs continue to be fetched after upgrade.
### How to reproduce
Below is how I verified the log fetching process.
I ran the code snippet in a python interpreter in the scheduler to test log fetching.
```py
from airflow.models import TaskInstance, DagBag, DagRun
from airflow.settings import Session, DAGS_FOLDER
from airflow.configuration import conf
import logging
from dateutil import parser
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('logging', 'task_log_reader')
handler = next((handler for handler in logger.handlers if handler.name == task_log_reader), None)
dag_id = 'pipeline_nile_reconciliation'
task_id = 'nile_overcount_spend_resolution_task'
execution_date = parser.parse('2022-01-10T11:49:57.197933+00:00')
try_number=1
session = Session()
ti = session.query(TaskInstance).filter(
TaskInstance.dag_id == dag_id,
TaskInstance.task_id == task_id,
TaskInstance.execution_date == execution_date).first()
dagrun = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date).first()
dagbag = DagBag(DAGS_FOLDER, read_dags_from_db=True)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(ti.task_id)
ti.dagrun = dagrun
handler.read(ti, try_number, {})
```
The following error log indicates errors in the log reading.
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.9/site-packages/airflow/utils/log/file_task_handler.py", line 239, in read
log, metadata = self._read(task_instance, try_number_element, metadata)
File "/usr/local/lib/python3.9/site-packages/airflow/providers/elasticsearch/log/es_task_handler.py", line 168, in _read
log_id = self._render_log_id(ti, try_number)
File "/usr/local/lib/python3.9/site-packages/airflow/providers/elasticsearch/log/es_task_handler.py", line 107, in _render_log_id
data_interval_start = self._clean_date(dag_run.data_interval_start)
File "/usr/local/lib/python3.9/site-packages/airflow/providers/elasticsearch/log/es_task_handler.py", line 134, in _clean_date
return value.strftime("%Y_%m_%dT%H_%M_%S_%f")
AttributeError: 'NoneType' object has no attribute 'strftime'
```
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| All credit to @wolfier for finding this bug.
Seems like the problem in the code is that [this section](https://github.com/apache/airflow/blob/main/airflow/providers/elasticsearch/log/es_task_handler.py#L109-L114) requires resolving the `data_interval_start` and `data_interval_end` even if the format you specify does not use them, and they don't exist on manual dagruns from before 2.2
It should use `dag.get_run_data_interval(dag_run)` instead. How to get the correct DAG object, however, is the problem; `dag_run.get_dag()` *might* work, but if it doesn’t, we need to do something else, perhaps `ti.task.dag` or something else.
Also whoever works on this should also fix the rendering function in `FileTaskHandler` to add those same data-interval-related context variables. | 2022-02-03T10:11:25Z | [] | [] |
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.9/site-packages/airflow/utils/log/file_task_handler.py", line 239, in read
log, metadata = self._read(task_instance, try_number_element, metadata)
File "/usr/local/lib/python3.9/site-packages/airflow/providers/elasticsearch/log/es_task_handler.py", line 168, in _read
log_id = self._render_log_id(ti, try_number)
File "/usr/local/lib/python3.9/site-packages/airflow/providers/elasticsearch/log/es_task_handler.py", line 107, in _render_log_id
data_interval_start = self._clean_date(dag_run.data_interval_start)
File "/usr/local/lib/python3.9/site-packages/airflow/providers/elasticsearch/log/es_task_handler.py", line 134, in _clean_date
return value.strftime("%Y_%m_%dT%H_%M_%S_%f")
AttributeError: 'NoneType' object has no attribute 'strftime'
| 2,486 |
|||
apache/airflow | apache__airflow-21307 | 2c5f636e5cfac7cc246d6ed93660bf0f8e968982 | diff --git a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
--- a/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
+++ b/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
@@ -59,7 +59,8 @@ def description(self):
"""Fetch first row to initialize cursor description when using server side cursor."""
if not self.initialized:
element = self.cursor.fetchone()
- self.rows.append(element)
+ if element is not None:
+ self.rows.append(element)
self.initialized = True
return self.cursor.description
| PostgresToGCSOperator fail on empty table and use_server_side_cursor=True
### Apache Airflow Provider(s)
google
### Versions of Apache Airflow Providers
apache-airflow-providers-google==6.1.0
### Apache Airflow version
2.2.2 (latest released)
### Operating System
Debian GNU/Linux 10 (buster)
### Deployment
Other Docker-based deployment
### Deployment details
_No response_
### What happened
When I'm execute `PostgresToGCSOperator` on empty table and set `use_server_side_cursor=True` the operator fails with error:
```
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1332, in _run_raw_task
self._execute_task_with_callbacks(context)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1458, in _execute_task_with_callbacks
result = self._execute_task(context, self.task)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1514, in _execute_task
result = execute_callable(context=context)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/providers/google/cloud/transfers/sql_to_gcs.py", line 154, in execute
files_to_upload = self._write_local_data_files(cursor)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/providers/google/cloud/transfers/sql_to_gcs.py", line 213, in _write_local_data_files
row = self.convert_types(schema, col_type_dict, row)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/providers/google/cloud/transfers/sql_to_gcs.py", line 174, in convert_types
return [self.convert_type(value, col_type_dict.get(name)) for name, value in zip(schema, row)]
TypeError: 'NoneType' object is not iterable
```
Operator command when I'm using:
```python
task_send = PostgresToGCSOperator(
task_id=f'send_{table}',
postgres_conn_id='postgres_raw',
gcp_conn_id=gcp_conn_id,
sql=f'SELECT * FROM public.{table}',
use_server_side_cursor=True,
bucket=bucket,
filename=f'{table}.csv',
export_format='csv',
)
```
### What you expected to happen
I'm expected, that operator on empty table not creating file and no upload it on Google Cloud.
### How to reproduce
- Create empty postgresql table.
- Create dag with task with PostgresToGCSOperator. that upload this table in Google Cloud.
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
Maybe you would like to contribute a PR to fix it @PikaYellow35 - we have > 1800 contributors, you can become one of them :)
Hi @potiuk - I am new to Airflow and would like to try and take a pass at this.
I have breeze setup; have created one [PR](https://github.com/apache/airflow/pull/19727); but nothing on the operators side. I will go through the corresponding code and contributing page again. Do let me know if you have any pointers..
@rsg17 feel free to join Airflow slack. There are channels for support like: `airflow-how-to-pr` | 2022-02-03T21:12:47Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1332, in _run_raw_task
self._execute_task_with_callbacks(context)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1458, in _execute_task_with_callbacks
result = self._execute_task(context, self.task)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1514, in _execute_task
result = execute_callable(context=context)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/providers/google/cloud/transfers/sql_to_gcs.py", line 154, in execute
files_to_upload = self._write_local_data_files(cursor)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/providers/google/cloud/transfers/sql_to_gcs.py", line 213, in _write_local_data_files
row = self.convert_types(schema, col_type_dict, row)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/providers/google/cloud/transfers/sql_to_gcs.py", line 174, in convert_types
return [self.convert_type(value, col_type_dict.get(name)) for name, value in zip(schema, row)]
TypeError: 'NoneType' object is not iterable
| 2,487 |
|||
apache/airflow | apache__airflow-22685 | 2cf1ae30538e109627417e8f0c1650addac3311b | diff --git a/airflow/dag_processing/manager.py b/airflow/dag_processing/manager.py
--- a/airflow/dag_processing/manager.py
+++ b/airflow/dag_processing/manager.py
@@ -1065,6 +1065,7 @@ def prepare_file_path_queue(self):
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
+ processors_to_remove = []
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
@@ -1080,6 +1081,14 @@ def _kill_timed_out_processors(self):
Stats.incr('dag_file_processor_timeouts')
processor.kill()
+ # Clean up processor references
+ self.waitables.pop(processor.waitable_handle)
+ processors_to_remove.append(file_path)
+
+ # Clean up `self._processors` after iterating over it
+ for proc in processors_to_remove:
+ self._processors.pop(proc)
+
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
diff --git a/airflow/dag_processing/processor.py b/airflow/dag_processing/processor.py
--- a/airflow/dag_processing/processor.py
+++ b/airflow/dag_processing/processor.py
@@ -21,6 +21,7 @@
import os
import signal
import threading
+import time
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
@@ -231,6 +232,12 @@ def _kill_process(self) -> None:
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
+
+ # Reap the spawned zombie. We active wait, because in Python 3.9 `waitpid` might lead to an
+ # exception, due to change in Python standard library and possibility of race condition
+ # see https://bugs.python.org/issue42558
+ while self._process._popen.poll() is None: # type: ignore
+ time.sleep(0.001)
if self._parent_channel:
self._parent_channel.close()
| dag_processing code needs to handle OSError("handle is closed") in poll() and recv() calls
### Apache Airflow version
2.1.4
### What happened
The problem also exists in the latest version of the Airflow code, but I experienced it in 2.1.4.
This is the root cause of problems experienced in [issue#13542](https://github.com/apache/airflow/issues/13542).
I'll provide a stack trace below. The problem is in the code of airflow/dag_processing/processor.py (and manager.py), all poll() and recv() calls to the multiprocessing communication channels need to be wrapped in exception handlers, handling OSError("handle is closed") exceptions. If one looks at the Python multiprocessing source code, it throws this exception when the channel's handle has been closed.
This occurs in Airflow when a DAG File Processor has been killed or terminated; the Airflow code closes the communication channel when it is killing or terminating a DAG File Processor process (for example, when a dag_file_processor_timeout occurs).This killing or terminating happens asynchronously (in another process) from the process calling the poll() or recv() on the communication channel. This is why an exception needs to be handled. A pre-check of the handle being open is not good enough, because the other process doing the kill or terminate may close the handle in between your pre-check and actually calling poll() or recv() (a race condition).
### What you expected to happen
Here is the stack trace of the occurence I saw:
```
[2022-03-08 17:41:06,101] {taskinstance.py:914} DEBUG - <TaskInstance: staq_report_daily.gs.wait_staq_csv_file 2022-03-06 17:15:00+00:00 [running]> dependency 'Not In Retry Period' PASSED: True, The context specified that being in a retry p
eriod was permitted.
[2022-03-08 17:41:06,101] {taskinstance.py:904} DEBUG - Dependencies all met for <TaskInstance: staq_report_daily.gs.wait_staq_csv_file 2022-03-06 17:15:00+00:00 [running]>
[2022-03-08 17:41:06,119] {scheduler_job.py:1196} DEBUG - Skipping SLA check for <DAG: gdai_gcs_sync> because no tasks in DAG have SLAs
[2022-03-08 17:41:06,119] {scheduler_job.py:1196} DEBUG - Skipping SLA check for <DAG: unity_creative_import_process> because no tasks in DAG have SLAs
[2022-03-08 17:41:06,119] {scheduler_job.py:1196} DEBUG - Skipping SLA check for <DAG: sales_dm_to_bq> because no tasks in DAG have SLAs
[2022-03-08 17:44:50,454] {settings.py:302} DEBUG - Disposing DB connection pool (PID 1902)
Process ForkProcess-1:
Traceback (most recent call last):
File "/opt/python3.8/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/opt/python3.8/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/manager.py", line 370, in _run_processor_manager
processor_manager.start()
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/manager.py", line 610, in start
return self._run_parsing_loop()
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/manager.py", line 671, in _run_parsing_loop
self._collect_results_from_processor(processor)
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/manager.py", line 981, in _collect_results_from_processor
if processor.result is not None:
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/processor.py", line 321, in result
if not self.done:
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/processor.py", line 286, in done
if self._parent_channel.poll():
File "/opt/python3.8/lib/python3.8/multiprocessing/connection.py", line 255, in poll
self._check_closed()
File "/opt/python3.8/lib/python3.8/multiprocessing/connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed
```
This corresponded in time to the following log entries:
```
% kubectl logs airflow-scheduler-58c997dd98-n8xr8 -c airflow-scheduler --previous | egrep 'Ran scheduling loop in|[[]heartbeat[]]'
[2022-03-08 17:40:47,586] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.56 seconds
[2022-03-08 17:40:49,146] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.56 seconds
[2022-03-08 17:40:50,675] {base_job.py:227} DEBUG - [heartbeat]
[2022-03-08 17:40:50,687] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.54 seconds
[2022-03-08 17:40:52,144] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.46 seconds
[2022-03-08 17:40:53,620] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.47 seconds
[2022-03-08 17:40:55,085] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.46 seconds
[2022-03-08 17:40:56,169] {base_job.py:227} DEBUG - [heartbeat]
[2022-03-08 17:40:56,180] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.49 seconds
[2022-03-08 17:40:57,667] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.49 seconds
[2022-03-08 17:40:59,148] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.48 seconds
[2022-03-08 17:41:00,618] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.47 seconds
[2022-03-08 17:41:01,742] {base_job.py:227} DEBUG - [heartbeat]
[2022-03-08 17:41:01,757] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.58 seconds
[2022-03-08 17:41:03,133] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.55 seconds
[2022-03-08 17:41:04,664] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 0.53 seconds
[2022-03-08 17:44:50,649] {base_job.py:227} DEBUG - [heartbeat]
[2022-03-08 17:44:50,814] {scheduler_job.py:813} DEBUG - Ran scheduling loop in 225.15 seconds
```
You can see that when this exception occurred, there was a hang in the scheduler for almost 4 minutes, no scheduling loops, and no scheduler_job heartbeats.
This hang probably also caused stuck queued jobs as issue#13542 describes.
### How to reproduce
This is hard to reproduce because it is a race condition. But you might be able to reproduce by having in a dagfile top-level code that calls sleep, so that it takes longer to parse than core dag_file_processor_timeout setting. That would cause the parsing processes to be terminated, creating the conditions for this bug to occur.
### Operating System
NAME="Ubuntu" VERSION="18.04.6 LTS (Bionic Beaver)" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 18.04.6 LTS" VERSION_ID="18.04" HOME_URL="https://www.ubuntu.com/" SUPPORT_URL="https://help.ubuntu.com/" BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" VERSION_CODENAME=bionic UBUNTU_CODENAME=bionic
### Versions of Apache Airflow Providers
Not relevant, this is a core dag_processing issue.
### Deployment
Composer
### Deployment details
"composer-1.17.6-airflow-2.1.4"
In order to isolate the scheduler to a separate machine, so as to not have interference from other processes such as airflow-workers running on the same machine, we created an additional node-pool for the scheduler, and ran these k8s patches to move the scheduler to a separate machine.
New node pool definition:
```HCL
{
name = "scheduler-pool"
machine_type = "n1-highcpu-8"
autoscaling = false
node_count = 1
disk_type = "pd-balanced"
disk_size = 64
image_type = "COS"
auto_repair = true
auto_upgrade = true
max_pods_per_node = 32
},
```
patch.sh
```sh
#!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: $0 namespace"
echo "Description: Isolate airflow-scheduler onto it's own node-pool (scheduler-pool)."
echo "Options:"
echo " namespace: kubernetes namespace used by Composer"
exit 1
fi
namespace=$1
set -eu
set -o pipefail
scheduler_patch="$(cat airflow-scheduler-patch.yaml)"
fluentd_patch="$(cat composer-fluentd-daemon-patch.yaml)"
set -x
kubectl -n default patch daemonset composer-fluentd-daemon -p "${fluentd_patch}"
kubectl -n ${namespace} patch deployment airflow-scheduler -p "${scheduler_patch}"
```
composer-fluentd-daemon-patch.yaml
```yaml
spec:
template:
spec:
nodeSelector: null
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cloud.google.com/gke-nodepool
operator: In
values:
- default-pool
- scheduler-pool
```
airflow-scheduler-patch.yaml
```yaml
spec:
template:
spec:
nodeSelector:
cloud.google.com/gke-nodepool: scheduler-pool
containers:
- name: gcs-syncd
resources:
limits:
memory: 2Gi
```
### Anything else
On the below checkbox of submitting a PR, I could submit one, but it'd be untested code, I don't really have the environment setup to test the patch.
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
Feel free to submit a pull request to handle the exception! We can figure out how to test the solution in the review process.
BTW I don’t know what your current fix looks like, but `OSError` has an `errno` attribute, checking that in the error handling code may be appropriate as well. (Not sure, I don’t even know what errno this error has right now.)
I plan to submit a PR within the next two weeks. | 2022-04-01T10:45:10Z | [] | [] |
Traceback (most recent call last):
File "/opt/python3.8/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/opt/python3.8/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/manager.py", line 370, in _run_processor_manager
processor_manager.start()
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/manager.py", line 610, in start
return self._run_parsing_loop()
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/manager.py", line 671, in _run_parsing_loop
self._collect_results_from_processor(processor)
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/manager.py", line 981, in _collect_results_from_processor
if processor.result is not None:
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/processor.py", line 321, in result
if not self.done:
File "/opt/python3.8/lib/python3.8/site-packages/airflow/dag_processing/processor.py", line 286, in done
if self._parent_channel.poll():
File "/opt/python3.8/lib/python3.8/multiprocessing/connection.py", line 255, in poll
self._check_closed()
File "/opt/python3.8/lib/python3.8/multiprocessing/connection.py", line 136, in _check_closed
raise OSError("handle is closed")
OSError: handle is closed
| 2,505 |
|||
apache/airflow | apache__airflow-23053 | c3d883a971a8e4e65ccc774891928daaaa0f4442 | diff --git a/airflow/jobs/backfill_job.py b/airflow/jobs/backfill_job.py
--- a/airflow/jobs/backfill_job.py
+++ b/airflow/jobs/backfill_job.py
@@ -266,7 +266,7 @@ def _manage_executor_state(
if ti.state not in self.STATES_COUNT_AS_RUNNING:
# Don't use ti.task; if this task is mapped, that attribute
# would hold the unmapped task. We need to original task here.
- for node in self.dag.get_task(ti.task_id, include_subdags=True).mapped_dependants():
+ for node in self.dag.get_task(ti.task_id, include_subdags=True).iter_mapped_dependants():
new_tis, num_mapped_tis = node.expand_mapped_task(ti.run_id, session=session)
yield node, ti.run_id, new_tis, num_mapped_tis
diff --git a/airflow/models/mappedoperator.py b/airflow/models/mappedoperator.py
--- a/airflow/models/mappedoperator.py
+++ b/airflow/models/mappedoperator.py
@@ -30,6 +30,7 @@
Dict,
FrozenSet,
Iterable,
+ Iterator,
List,
Optional,
Sequence,
@@ -76,6 +77,7 @@
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.dag import DAG
+ from airflow.models.operator import Operator
from airflow.models.taskinstance import TaskInstance
from airflow.models.xcom_arg import XComArg
from airflow.utils.task_group import TaskGroup
@@ -775,6 +777,13 @@ def _find_index_for_this_field(index: int) -> int:
return k, v
raise IndexError(f"index {map_index} is over mapped length")
+ def iter_mapped_dependencies(self) -> Iterator["Operator"]:
+ """Upstream dependencies that provide XComs used by this task for task mapping."""
+ from airflow.models.xcom_arg import XComArg
+
+ for ref in XComArg.iter_xcom_args(self._get_expansion_kwargs()):
+ yield ref.operator
+
@cached_property
def parse_time_mapped_ti_count(self) -> Optional[int]:
"""
diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -2333,7 +2333,7 @@ def _record_task_map_for_downstreams(self, task: "Operator", value: Any, *, sess
# currently possible for a downstream to depend on one individual mapped
# task instance, only a task as a whole. This will change in AIP-42
# Phase 2, and we'll need to further analyze the mapped task case.
- if task.is_mapped or not task.has_mapped_dependants():
+ if task.is_mapped or next(task.iter_mapped_dependants(), None) is None:
return
if value is None:
raise XComForMappingNotPushed()
diff --git a/airflow/models/taskmixin.py b/airflow/models/taskmixin.py
--- a/airflow/models/taskmixin.py
+++ b/airflow/models/taskmixin.py
@@ -291,13 +291,20 @@ def serialize_for_task_group(self) -> Tuple[DagAttributeTypes, Any]:
"""This is used by SerializedTaskGroup to serialize a task group's content."""
raise NotImplementedError()
- def mapped_dependants(self) -> Iterator["MappedOperator"]:
- """Return any mapped nodes that are direct dependencies of the current task
+ def _iter_all_mapped_downstreams(self) -> Iterator["MappedOperator"]:
+ """Return mapped nodes that are direct dependencies of the current task.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
+
+ Note that this does not guarantee the returned tasks actually use the
+ current task for task mapping, but only checks those task are mapped
+ operators, and are downstreams of the current task.
+
+ To get a list of tasks that uses the current task for task mapping, use
+ :meth:`iter_mapped_dependants` instead.
"""
from airflow.models.mappedoperator import MappedOperator
from airflow.utils.task_group import TaskGroup
@@ -315,7 +322,7 @@ def _walk_group(group: TaskGroup) -> Iterable[Tuple[str, DAGNode]]:
tg = self.task_group
if not tg:
- raise RuntimeError("Cannot check for mapped_dependants when not attached to a DAG")
+ raise RuntimeError("Cannot check for mapped dependants when not attached to a DAG")
for key, child in _walk_group(tg):
if key == self.node_id:
continue
@@ -324,12 +331,16 @@ def _walk_group(group: TaskGroup) -> Iterable[Tuple[str, DAGNode]]:
if self.node_id in child.upstream_task_ids:
yield child
- def has_mapped_dependants(self) -> bool:
- """Whether any downstream dependencies depend on this task for mapping.
+ def iter_mapped_dependants(self) -> Iterator["MappedOperator"]:
+ """Return mapped nodes that depend on the current task the expansion.
For now, this walks the entire DAG to find mapped nodes that has this
current task as an upstream. We cannot use ``downstream_list`` since it
only contains operators, not task groups. In the future, we should
provide a way to record an DAG node's all downstream nodes instead.
"""
- return any(self.mapped_dependants())
+ return (
+ downstream
+ for downstream in self._iter_all_mapped_downstreams()
+ if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies())
+ )
diff --git a/airflow/models/xcom_arg.py b/airflow/models/xcom_arg.py
--- a/airflow/models/xcom_arg.py
+++ b/airflow/models/xcom_arg.py
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Union
+from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models.abstractoperator import AbstractOperator
@@ -156,21 +156,31 @@ def resolve(self, context: Context, session: "Session" = NEW_SESSION) -> Any:
return result
@staticmethod
- def apply_upstream_relationship(op: "Operator", arg: Any):
- """
- Set dependency for XComArgs.
+ def iter_xcom_args(arg: Any) -> Iterator["XComArg"]:
+ """Return XComArg instances in an arbitrary value.
- This looks for XComArg objects in ``arg`` "deeply" (looking inside lists, dicts and classes decorated
- with "template_fields") and sets the relationship to ``op`` on any found.
+ This recursively traverse ``arg`` and look for XComArg instances in any
+ collection objects, and instances with ``template_fields`` set.
"""
if isinstance(arg, XComArg):
- op.set_upstream(arg.operator)
+ yield arg
elif isinstance(arg, (tuple, set, list)):
for elem in arg:
- XComArg.apply_upstream_relationship(op, elem)
+ yield from XComArg.iter_xcom_args(elem)
elif isinstance(arg, dict):
for elem in arg.values():
- XComArg.apply_upstream_relationship(op, elem)
+ yield from XComArg.iter_xcom_args(elem)
elif isinstance(arg, AbstractOperator):
for elem in arg.template_fields:
- XComArg.apply_upstream_relationship(op, elem)
+ yield from XComArg.iter_xcom_args(elem)
+
+ @staticmethod
+ def apply_upstream_relationship(op: "Operator", arg: Any):
+ """Set dependency for XComArgs.
+
+ This looks for XComArg objects in ``arg`` "deeply" (looking inside
+ collections objects and classes decorated with ``template_fields``), and
+ sets the relationship to ``op`` on any found.
+ """
+ for ref in XComArg.iter_xcom_args(arg):
+ op.set_upstream(ref.operator)
| A task's returned object should not be checked for mappability if the dag doesn't use it in an expansion.
### Apache Airflow version
main (development)
### What happened
Here's a dag:
```python3
with DAG(...) as dag:
@dag.task
def foo():
return "foo"
@dag.task
def identity(thing):
return thing
foo() >> identity.expand(thing=[1, 2, 3])
```
`foo` fails with these task logs:
```
[2022-04-14, 14:15:26 UTC] {python.py:173} INFO - Done. Returned value was: foo
[2022-04-14, 14:15:26 UTC] {taskinstance.py:1837} WARNING - We expected to get frame set in local storage but it was not. Please report this as an issue with full logs at https://github.com/apache/airflow/issues/new
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1417, in _run_raw_task
self._execute_task_with_callbacks(context, test_mode)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1564, in _execute_task_with_callbacks
result = self._execute_task(context, task_orig)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1634, in _execute_task
self._record_task_map_for_downstreams(task_orig, result, session=session)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2314, in _record_task_map_for_downstreams
raise UnmappableXComTypePushed(value)
airflow.exceptions.UnmappableXComTypePushed: unmappable return type 'str'
```
### What you think should happen instead
Airflow shouldn't bother checking `foo`'s return type for mappability because its return value is never used in an expansion.
### How to reproduce
Run the dag, notice the failure
### Operating System
debian (docker)
### Versions of Apache Airflow Providers
n/a
### Deployment
Astronomer
### Deployment details
using image with ref: e5dd6fdcfd2f53ed90e29070711c121de447b404
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Marking this to 2.3.0, but we _might_ push it to 2.3.1 | 2022-04-18T08:29:58Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1417, in _run_raw_task
self._execute_task_with_callbacks(context, test_mode)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1564, in _execute_task_with_callbacks
result = self._execute_task(context, task_orig)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1634, in _execute_task
self._record_task_map_for_downstreams(task_orig, result, session=session)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2314, in _record_task_map_for_downstreams
raise UnmappableXComTypePushed(value)
airflow.exceptions.UnmappableXComTypePushed: unmappable return type 'str'
| 2,516 |
|||
apache/airflow | apache__airflow-23119 | 70eede5dd6924a4eb74b7600cce2c627e51a3b7e | diff --git a/airflow/dag_processing/processor.py b/airflow/dag_processing/processor.py
--- a/airflow/dag_processing/processor.py
+++ b/airflow/dag_processing/processor.py
@@ -604,7 +604,7 @@ def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
if request.is_failure_callback:
- ti = TI(task, run_id=simple_ti.run_id)
+ ti = TI(task, run_id=simple_ti.run_id, map_index=simple_ti.map_index)
# TODO: Use simple_ti to improve performance here in the future
ti.refresh_from_db()
ti.handle_failure_with_callback(error=request.msg, test_mode=self.UNIT_TEST_MODE)
diff --git a/airflow/models/taskfail.py b/airflow/models/taskfail.py
--- a/airflow/models/taskfail.py
+++ b/airflow/models/taskfail.py
@@ -63,13 +63,13 @@ class TaskFail(Base):
viewonly=True,
)
- def __init__(self, task, run_id, start_date, end_date, map_index):
- self.dag_id = task.dag_id
- self.task_id = task.task_id
- self.run_id = run_id
- self.map_index = map_index
- self.start_date = start_date
- self.end_date = end_date
+ def __init__(self, ti):
+ self.dag_id = ti.dag_id
+ self.task_id = ti.task_id
+ self.run_id = ti.run_id
+ self.map_index = ti.map_index
+ self.start_date = ti.start_date
+ self.end_date = ti.end_date
if self.end_date and self.start_date:
self.duration = int((self.end_date - self.start_date).total_seconds())
else:
diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -20,6 +20,7 @@
import hashlib
import logging
import math
+import operator
import os
import pickle
import signal
@@ -133,6 +134,7 @@
if TYPE_CHECKING:
+ from airflow.models.baseoperator import BaseOperator
from airflow.models.dag import DAG, DagModel
from airflow.models.dagrun import DagRun
from airflow.models.operator import Operator
@@ -1901,24 +1903,15 @@ def handle_failure(
if not test_mode:
self.refresh_from_db(session)
- task = self.task.unmap()
self.end_date = timezone.utcnow()
self.set_duration()
- Stats.incr(f'operator_failures_{task.task_type}', 1, 1)
+ Stats.incr(f'operator_failures_{self.task.task_type}')
Stats.incr('ti_failures')
if not test_mode:
session.add(Log(State.FAILED, self))
# Log failure duration
- session.add(
- TaskFail(
- task=task,
- run_id=self.run_id,
- start_date=self.start_date,
- end_date=self.end_date,
- map_index=self.map_index,
- )
- )
+ session.add(TaskFail(ti=self))
self.clear_next_method_args()
@@ -1934,20 +1927,26 @@ def handle_failure(
# only mark task instance as FAILED if the next task instance
# try_number exceeds the max_tries ... or if force_fail is truthy
+ task = None
+ try:
+ task = self.task.unmap()
+ except Exception:
+ self.log.error("Unable to unmap task, can't determine if we need to send an alert email or not")
+
if force_fail or not self.is_eligible_to_retry():
self.state = State.FAILED
- email_for_state = task.email_on_failure
+ email_for_state = operator.attrgetter('email_on_failure')
else:
if self.state == State.QUEUED:
# We increase the try_number so as to fail the task if it fails to start after sometime
self._try_number += 1
self.state = State.UP_FOR_RETRY
- email_for_state = task.email_on_retry
+ email_for_state = operator.attrgetter('email_on_retry')
self._log_state('Immediate failure requested. ' if force_fail else '')
- if email_for_state and task.email:
+ if task and email_for_state(task) and task.email:
try:
- self.email_alert(error)
+ self.email_alert(error, task)
except Exception:
self.log.exception('Failed to send email to: %s', task.email)
@@ -2241,11 +2240,15 @@ def render_k8s_pod_yaml(self) -> Optional[dict]:
sanitized_pod = ApiClient().sanitize_for_serialization(pod)
return sanitized_pod
- def get_email_subject_content(self, exception: BaseException) -> Tuple[str, str, str]:
+ def get_email_subject_content(
+ self, exception: BaseException, task: Optional["BaseOperator"] = None
+ ) -> Tuple[str, str, str]:
"""Get the email subject content for exceptions."""
# For a ti from DB (without ti.task), return the default value
# Reuse it for smart sensor to send default email alert
- use_default = not hasattr(self, 'task')
+ if task is None:
+ task = getattr(self, 'task')
+ use_default = task is None
exception_html = str(exception).replace('\n', '<br>')
default_subject = 'Airflow alert: {{ti}}'
@@ -2312,13 +2315,14 @@ def render(key: str, content: str) -> str:
return subject, html_content, html_content_err
- def email_alert(self, exception):
+ def email_alert(self, exception, task: "BaseOperator"):
"""Send alert email with exception information."""
- subject, html_content, html_content_err = self.get_email_subject_content(exception)
+ subject, html_content, html_content_err = self.get_email_subject_content(exception, task=task)
+ assert task.email
try:
- send_email(self.task.email, subject, html_content)
+ send_email(task.email, subject, html_content)
except Exception:
- send_email(self.task.email, subject, html_content_err)
+ send_email(task.email, subject, html_content_err)
def set_duration(self) -> None:
"""Set TI duration"""
@@ -2573,9 +2577,10 @@ def __init__(
dag_id: str,
task_id: str,
run_id: str,
- start_date: datetime,
- end_date: datetime,
+ start_date: Optional[datetime],
+ end_date: Optional[datetime],
try_number: int,
+ map_index: int,
state: str,
executor_config: Any,
pool: str,
@@ -2584,21 +2589,20 @@ def __init__(
run_as_user: Optional[str] = None,
priority_weight: Optional[int] = None,
):
- self._dag_id: str = dag_id
- self._task_id: str = task_id
- self._run_id: str = run_id
- self._start_date: datetime = start_date
- self._end_date: datetime = end_date
- self._try_number: int = try_number
- self._state: str = state
- self._executor_config: Any = executor_config
- self._run_as_user: Optional[str] = None
- self._run_as_user = run_as_user
- self._pool: str = pool
- self._priority_weight: Optional[int] = None
- self._priority_weight = priority_weight
- self._queue: str = queue
- self._key = key
+ self.dag_id = dag_id
+ self.task_id = task_id
+ self.run_id = run_id
+ self.map_index = map_index
+ self.start_date = start_date
+ self.end_date = end_date
+ self.try_number = try_number
+ self.state = state
+ self.executor_config = executor_config
+ self.run_as_user = run_as_user
+ self.pool = pool
+ self.priority_weight = priority_weight
+ self.queue = queue
+ self.key = key
def __eq__(self, other):
if isinstance(other, self.__class__):
@@ -2611,6 +2615,7 @@ def from_ti(cls, ti: TaskInstance):
dag_id=ti.dag_id,
task_id=ti.task_id,
run_id=ti.run_id,
+ map_index=ti.map_index,
start_date=ti.start_date,
end_date=ti.end_date,
try_number=ti.try_number,
@@ -2625,80 +2630,16 @@ def from_ti(cls, ti: TaskInstance):
@classmethod
def from_dict(cls, obj_dict: dict):
- ti_key = obj_dict.get('_key', [])
- start_date: Union[Any, datetime] = (
- datetime.fromisoformat(str(obj_dict.get('_start_date')))
- if obj_dict.get('_start_date') is not None
- else None
- )
- end_date: Union[Any, datetime] = (
- datetime.fromisoformat(str(obj_dict.get('_end_date')))
- if obj_dict.get('_end_date') is not None
- else None
- )
- return cls(
- dag_id=str(obj_dict['_dag_id']),
- task_id=str(obj_dict.get('_task_id')),
- run_id=str(obj_dict.get('_run_id')),
- start_date=start_date,
- end_date=end_date,
- try_number=obj_dict.get('_try_number', 1),
- state=str(obj_dict.get('_state')),
- executor_config=obj_dict.get('_executor_config'),
- run_as_user=obj_dict.get('_run_as_user', None),
- pool=str(obj_dict.get('_pool')),
- priority_weight=obj_dict.get('_priority_weight', None),
- queue=str(obj_dict.get('_queue')),
- key=TaskInstanceKey(ti_key[0], ti_key[1], ti_key[2], ti_key[3], ti_key[4]),
- )
-
- @property
- def dag_id(self) -> str:
- return self._dag_id
-
- @property
- def task_id(self) -> str:
- return self._task_id
-
- @property
- def run_id(self) -> str:
- return self._run_id
-
- @property
- def start_date(self) -> datetime:
- return self._start_date
-
- @property
- def end_date(self) -> datetime:
- return self._end_date
-
- @property
- def try_number(self) -> int:
- return self._try_number
-
- @property
- def state(self) -> str:
- return self._state
-
- @property
- def pool(self) -> str:
- return self._pool
-
- @property
- def priority_weight(self) -> Optional[int]:
- return self._priority_weight
-
- @property
- def queue(self) -> str:
- return self._queue
-
- @property
- def key(self) -> TaskInstanceKey:
- return self._key
-
- @property
- def executor_config(self):
- return self._executor_config
+ ti_key = TaskInstanceKey(*obj_dict.pop('key'))
+ start_date = None
+ end_date = None
+ start_date_str: Optional[str] = obj_dict.pop('start_date')
+ end_date_str: Optional[str] = obj_dict.pop('end_date')
+ if start_date_str:
+ start_date = timezone.parse(start_date_str)
+ if end_date_str:
+ end_date = timezone.parse(end_date_str)
+ return cls(**obj_dict, start_date=start_date, end_date=end_date, key=ti_key)
STATICA_HACK = True
| Mapped KubernetesPodOperator "fails" but UI shows it is as still running
### Apache Airflow version
2.3.0b1 (pre-release)
### What happened
This dag has a problem. The `name` kwarg is missing from one of the mapped instances.
```python3
from datetime import datetime
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import (
KubernetesPodOperator,
)
from airflow.configuration import conf
namespace = conf.get("kubernetes", "NAMESPACE")
with DAG(
dag_id="kpo_mapped",
start_date=datetime(1970, 1, 1),
schedule_interval=None,
) as dag:
KubernetesPodOperator(
task_id="cowsay_static_named",
name="cowsay_statc",
namespace=namespace,
image="docker.io/rancher/cowsay",
cmds=["cowsay"],
arguments=["moo"],
)
KubernetesPodOperator.partial(
task_id="cowsay_mapped",
# name="cowsay_mapped", # required field missing
image="docker.io/rancher/cowsay",
namespace=namespace,
cmds=["cowsay"],
).expand(arguments=[["mooooove"], ["cow"], ["get out the way"]])
KubernetesPodOperator.partial(
task_id="cowsay_mapped_named",
name="cowsay_mapped",
namespace=namespace,
image="docker.io/rancher/cowsay",
cmds=["cowsay"],
).expand(arguments=[["mooooove"], ["cow"], ["get out the way"]])
```
If you omit that field in an unmapped task, you get a dag parse error, which is appropriate. But omitting it from the mapped task gives you this runtime error in the task logs:
```
[2022-04-20, 05:11:02 UTC] {standard_task_runner.py:52} INFO - Started process 60 to run task
[2022-04-20, 05:11:02 UTC] {standard_task_runner.py:79} INFO - Running: ['airflow', 'tasks', 'run', 'kpo_mapped', 'cowsay_mapped', 'manual__2022-04-20T05:11:01+00:00', '--job-id', '12', '--raw', '--subdir', 'DAGS_FOLDER/dags/taskmap/kpo_mapped.py', '--cfg-path', '/tmp/tmp_g3sj496', '--map-index', '0', '--error-file', '/tmp/tmp2_313wxj']
[2022-04-20, 05:11:02 UTC] {standard_task_runner.py:80} INFO - Job 12: Subtask cowsay_mapped
[2022-04-20, 05:11:02 UTC] {task_command.py:369} INFO - Running <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T05:11:01+00:00 map_index=0 [running]> on host airflow-worker-65f9fd9d5b-vpgnk
[2022-04-20, 05:11:02 UTC] {taskinstance.py:1863} WARNING - We expected to get frame set in local storage but it was not. Please report this as an issue with full logs at https://github.com/apache/airflow/issues/new
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1440, in _run_raw_task
self._execute_task_with_callbacks(context, test_mode)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1544, in _execute_task_with_callbacks
task_orig = self.render_templates(context=context)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2210, in render_templates
rendered_task = self.task.render_template_fields(context)
File "/usr/local/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 722, in render_template_fields
unmapped_task = self.unmap(unmap_kwargs=kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 508, in unmap
op = self.operator_class(**unmap_kwargs, _airflow_from_mapped=True)
File "/usr/local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 390, in apply_defaults
result = func(self, **kwargs, default_args=default_args)
File "/usr/local/lib/python3.9/site-packages/airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py", line 259, in __init__
self.name = self._set
_name(name)
File "/usr/local/lib/python3.9/site-packages/airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py", line 442, in _set_name
raise AirflowException("`name` is required unless `pod_template_file` or `full_pod_spec` is set")
airflow.exceptions.AirflowException: `name` is required unless `pod_template_file` or `full_pod_spec` is set
```
But rather than failing the task, Airflow just thinks that the task is still running:
<img width="833" alt="Screen Shot 2022-04-19 at 11 13 47 PM" src="https://user-images.githubusercontent.com/5834582/164156155-41986d3a-d171-4943-8443-a0fc3c542988.png">
### What you think should happen instead
Ideally this error would be surfaced when the dag is first parsed. If that's not possible, then it should fail the task completely (i.e. a red square should show up in the grid view).
### How to reproduce
Run the dag above
### Operating System
ubuntu (microk8s)
### Versions of Apache Airflow Providers
apache-airflow-providers-cncf-kubernetes | 4.0.0
### Deployment
Astronomer
### Deployment details
Deployed via the astronomer airflow helm chart, values:
```
airflow:
airflowHome: /usr/local/airflow
defaultAirflowRepository: 172.28.11.191:30500/airflow
defaultAirflowTag: tb11c-inner-operator-expansion
env:
- name: AIRFLOW__CORE__DAGBAG_IMPORT_ERROR_TRACEBACK_DEPTH
value: '99'
executor: CeleryExecutor
gid: 50000
images:
airflow:
pullPolicy: Always
repository: 172.28.11.191:30500/airflow
flower:
pullPolicy: Always
pod_template:
pullPolicy: Always
logs:
persistence:
enabled: true
size: 2Gi
scheduler:
livenessProbe:
timeoutSeconds: 45
triggerer:
livenessProbe:
timeoutSeconds: 45
```
Image base: `quay.io/astronomer/ap-airflow-dev:main`
Airflow version: `2.3.0.dev20220414`
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Yeah, two things we can do here:
1. Improve the validation for KPO when mapped (framework is in place already)
2. (This one is most important) Find out why the task isn't being detected as failed!
Curious, something is up with the zombie detection too:
```[2022-04-20 08:42:36,843] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1360}} WARNING - Failing (3) jobs without heartbeat after 2022-04-20 07:37:36.841761+00:00
[2022-04-20 08:42:36,844] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1368}} ERROR - Detected zombie job: {'full_filepath': '/home/ash/code/airflow/airflow/kpodag.py', 'msg': 'Detected <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T07:42:30.995741+00:00 map_index=2 [running]> as zombie', 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7f6088b5ebc0>, 'is_failure_callback': True}
[2022-04-20 08:42:36,844] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1368}} ERROR - Detected zombie job: {'full_filepath': '/home/ash/code/airflow/airflow/kpodag.py', 'msg': 'Detected <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T07:42:30.995741+00:00 map_index=1 [running]> as zombie', 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7f6088c1cd60>, 'is_failure_callback': True}
[2022-04-20 08:42:36,844] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1368}} ERROR - Detected zombie job: {'full_filepath': '/home/ash/code/airflow/airflow/kpodag.py', 'msg': 'Detected <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T07:42:30.995741+00:00 map_index=0 [running]> as zombie', 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7f6088b7e320>, 'is_failure_callback': True}
[2022-04-20 08:42:46,880] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1360}} WARNING - Failing (3) jobs without heartbeat after 2022-04-20 07:37:46.878339+00:00
[2022-04-20 08:42:46,881] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1368}} ERROR - Detected zombie job: {'full_filepath': '/home/ash/code/airflow/airflow/kpodag.py', 'msg': 'Detected <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T07:42:30.995741+00:00 map_index=2 [running]> as zombie', 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7f6088b8bd30>, 'is_failure_callback': True}
[2022-04-20 08:42:46,881] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1368}} ERROR - Detected zombie job: {'full_filepath': '/home/ash/code/airflow/airflow/kpodag.py', 'msg': 'Detected <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T07:42:30.995741+00:00 map_index=1 [running]> as zombie', 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7f6088b7b8e0>, 'is_failure_callback': True}
[2022-04-20 08:42:46,881] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1368}} ERROR - Detected zombie job: {'full_filepath': '/home/ash/code/airflow/airflow/kpodag.py', 'msg': 'Detected <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T07:42:30.995741+00:00 map_index=0 [running]> as zombie', 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7f6088b5ebc0>, 'is_failure_callback': True}
[2022-04-20 08:42:49 +0100] [331200] [INFO] Handling signal: winch
[2022-04-20 08:42:56,915] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1360}} WARNING - Failing (3) jobs without heartbeat after 2022-04-20 07:37:56.912725+00:00
[2022-04-20 08:42:56,915] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1368}} ERROR - Detected zombie job: {'full_filepath': '/home/ash/code/airflow/airflow/kpodag.py', 'msg': 'Detected <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T07:42:30.995741+00:00 map_index=2 [running]> as zombie', 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7f6088b5d6f0>, 'is_failure_callback': True}
[2022-04-20 08:42:56,915] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1368}} ERROR - Detected zombie job: {'full_filepath': '/home/ash/code/airflow/airflow/kpodag.py', 'msg': 'Detected <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T07:42:30.995741+00:00 map_index=1 [running]> as zombie', 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7f6088b7e320>, 'is_failure_callback': True}
[2022-04-20 08:42:56,915] 331186 MainProcess {{airflow.jobs.scheduler_job.SchedulerJob scheduler_job.py:1368}} ERROR - Detected zombie job: {'full_filepath': '/home/ash/code/airflow/airflow/kpodag.py', 'msg': 'Detected <TaskInstance: kpo_mapped.cowsay_mapped manual__2022-04-20T07:42:30.995741+00:00 map_index=0 [running]> as zombie', 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7f6088b7f460>, 'is_failure_callback': True}
```
```
[2022-04-20, 05:11:02 UTC] {taskinstance.py:1863} WARNING - We expected to get frame set in local storage but it was not. Please report this as an issue with full logs at https://github.com/apache/airflow/issues/new
```
This line looks suspicious
@uranusjr That error happens if the excpetion occurs "outside" of user task code (before or after). Not a problem here, but we should fix it, yes. | 2022-04-20T14:03:34Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1440, in _run_raw_task
self._execute_task_with_callbacks(context, test_mode)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1544, in _execute_task_with_callbacks
task_orig = self.render_templates(context=context)
File "/usr/local/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2210, in render_templates
rendered_task = self.task.render_template_fields(context)
File "/usr/local/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 722, in render_template_fields
unmapped_task = self.unmap(unmap_kwargs=kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 508, in unmap
op = self.operator_class(**unmap_kwargs, _airflow_from_mapped=True)
File "/usr/local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 390, in apply_defaults
result = func(self, **kwargs, default_args=default_args)
File "/usr/local/lib/python3.9/site-packages/airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py", line 259, in __init__
self.name = self._set
_name(name)
| 2,520 |
|||
apache/airflow | apache__airflow-23463 | 35620edd4b5b108adf355855e03224a08d132b10 | diff --git a/airflow/decorators/base.py b/airflow/decorators/base.py
--- a/airflow/decorators/base.py
+++ b/airflow/decorators/base.py
@@ -312,6 +312,9 @@ def _validate_arg_names(self, func: ValidationSource, kwargs: Dict[str, Any]):
raise TypeError(f"{func}() got unexpected keyword arguments {names}")
def expand(self, **map_kwargs: "Mappable") -> XComArg:
+ if not map_kwargs:
+ raise TypeError("no arguments to expand against")
+
self._validate_arg_names("expand", map_kwargs)
prevent_duplicates(self.kwargs, map_kwargs, fail_reason="mapping already partial")
ensure_xcomarg_return_value(map_kwargs)
diff --git a/airflow/models/mappedoperator.py b/airflow/models/mappedoperator.py
--- a/airflow/models/mappedoperator.py
+++ b/airflow/models/mappedoperator.py
@@ -191,6 +191,11 @@ def __del__(self):
warnings.warn(f"Task {task_id} was never mapped!")
def expand(self, **mapped_kwargs: "Mappable") -> "MappedOperator":
+ if not mapped_kwargs:
+ raise TypeError("no arguments to expand against")
+ return self._expand(**mapped_kwargs)
+
+ def _expand(self, **mapped_kwargs: "Mappable") -> "MappedOperator":
self._expand_called = True
from airflow.operators.empty import EmptyOperator
diff --git a/airflow/serialization/serialized_objects.py b/airflow/serialization/serialized_objects.py
--- a/airflow/serialization/serialized_objects.py
+++ b/airflow/serialization/serialized_objects.py
@@ -96,7 +96,8 @@ def _get_default_mapped_partial() -> Dict[str, Any]:
are defaults, they are automatically supplied on de-serialization, so we
don't need to store them.
"""
- default_partial_kwargs = BaseOperator.partial(task_id="_").expand().partial_kwargs
+ # Use the private _expand() method to avoid the empty kwargs check.
+ default_partial_kwargs = BaseOperator.partial(task_id="_")._expand().partial_kwargs
return BaseSerialization._serialize(default_partial_kwargs)[Encoding.VAR]
| Empty `expand()` crashes the scheduler
### Apache Airflow version
2.3.0 (latest released)
### What happened
I've found a DAG that will crash the scheduler:
```
@task
def hello():
return "hello"
hello.expand()
```
```
[2022-05-03 03:41:23,779] {scheduler_job.py:753} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 736, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 824, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 906, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1148, in _schedule_dag_run
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 522, in update_state
info = self.task_instance_scheduling_decisions(session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 661, in task_instance_scheduling_decisions
session=session,
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 714, in _get_ready_tis
expanded_tis, _ = schedulable.task.expand_mapped_task(self.run_id, session=session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/mappedoperator.py", line 609, in expand_mapped_task
operator.mul, self._resolve_map_lengths(run_id, session=session).values()
TypeError: reduce() of empty sequence with no initial value
```
### What you think should happen instead
A user DAG shouldn't crash the scheduler. This specific case could likely be an ImportError at parse time, but it makes me think we might be missing some exception handling?
### How to reproduce
_No response_
### Operating System
Debian
### Versions of Apache Airflow Providers
_No response_
### Deployment
Official Apache Airflow Helm Chart
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| We should make this a parse-time error because expanding nothing makes no sense anyway. If we really want this to work (which I assume should just be expanding to one task), this can be easily amendable by adding a `1` to the end of the currently crashing `reduce` call. | 2022-05-03T22:41:24Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 736, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 824, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 906, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1148, in _schedule_dag_run
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 522, in update_state
info = self.task_instance_scheduling_decisions(session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 661, in task_instance_scheduling_decisions
session=session,
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 714, in _get_ready_tis
expanded_tis, _ = schedulable.task.expand_mapped_task(self.run_id, session=session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/mappedoperator.py", line 609, in expand_mapped_task
operator.mul, self._resolve_map_lengths(run_id, session=session).values()
TypeError: reduce() of empty sequence with no initial value
| 2,526 |
|||
apache/airflow | apache__airflow-24865 | f54782af8888065b464a44a6ea194a4fbb15b296 | diff --git a/airflow/models/baseoperator.py b/airflow/models/baseoperator.py
--- a/airflow/models/baseoperator.py
+++ b/airflow/models/baseoperator.py
@@ -1153,11 +1153,10 @@ def on_kill(self) -> None:
"""
def __deepcopy__(self, memo):
- """
- Hack sorting double chained task lists by task_id to avoid hitting
- max_depth on deepcopy operations.
- """
+ # Hack sorting double chained task lists by task_id to avoid hitting
+ # max_depth on deepcopy operations.
sys.setrecursionlimit(5000) # TODO fix this in a better way
+
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
@@ -1165,10 +1164,14 @@ def __deepcopy__(self, memo):
shallow_copy = cls.shallow_copy_attrs + cls._base_operator_shallow_copy_attrs
for k, v in self.__dict__.items():
+ if k == "_BaseOperator__instantiated":
+ # Don't set this until the _end_, as it changes behaviour of __setattr__
+ continue
if k not in shallow_copy:
setattr(result, k, copy.deepcopy(v, memo))
else:
setattr(result, k, copy.copy(v))
+ result.__instantiated = self.__instantiated
return result
def __getstate__(self):
| mini-scheduler raises AttributeError: 'NoneType' object has no attribute 'keys'
### Apache Airflow version
2.3.2 (latest released)
### What happened
The mini-scheduler run after a task finishes sometimes fails with an error "AttributeError: 'NoneType' object has no attribute 'keys'"; see full traceback below.
### What you think should happen instead
_No response_
### How to reproduce
The minimal reproducing example I could find is this:
```python
import pendulum
from airflow.models import BaseOperator
from airflow.utils.task_group import TaskGroup
from airflow.decorators import task
from airflow import DAG
@task
def task0():
pass
class Op0(BaseOperator):
template_fields = ["some_input"]
def __init__(self, some_input, **kwargs):
super().__init__(**kwargs)
self.some_input = some_input
if __name__ == "__main__":
with DAG("dag0", start_date=pendulum.now()) as dag:
with TaskGroup(group_id="tg1"):
Op0(task_id="task1", some_input=task0())
dag.partial_subset("tg1.task1")
```
Running this script with airflow 2.3.2 produces this traceback:
```
Traceback (most recent call last):
File "/app/airflow-bug-minimal.py", line 22, in <module>
dag.partial_subset("tg1.task1")
File "/venv/lib/python3.10/site-packages/airflow/models/dag.py", line 2013, in partial_subset
dag.task_dict = {
File "/venv/lib/python3.10/site-packages/airflow/models/dag.py", line 2014, in <dictcomp>
t.task_id: _deepcopy_task(t)
File "/venv/lib/python3.10/site-packages/airflow/models/dag.py", line 2011, in _deepcopy_task
return copy.deepcopy(t, memo)
File "/usr/local/lib/python3.10/copy.py", line 153, in deepcopy
y = copier(memo)
File "/venv/lib/python3.10/site-packages/airflow/models/baseoperator.py", line 1156, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo))
File "/venv/lib/python3.10/site-packages/airflow/models/baseoperator.py", line 1000, in __setattr__
self.set_xcomargs_dependencies()
File "/venv/lib/python3.10/site-packages/airflow/models/baseoperator.py", line 1107, in set_xcomargs_dependencies
XComArg.apply_upstream_relationship(self, arg)
File "/venv/lib/python3.10/site-packages/airflow/models/xcom_arg.py", line 186, in apply_upstream_relationship
op.set_upstream(ref.operator)
File "/venv/lib/python3.10/site-packages/airflow/models/taskmixin.py", line 241, in set_upstream
self._set_relatives(task_or_task_list, upstream=True, edge_modifier=edge_modifier)
File "/venv/lib/python3.10/site-packages/airflow/models/taskmixin.py", line 185, in _set_relatives
dags: Set["DAG"] = {task.dag for task in [*self.roots, *task_list] if task.has_dag() and task.dag}
File "/venv/lib/python3.10/site-packages/airflow/models/taskmixin.py", line 185, in <setcomp>
dags: Set["DAG"] = {task.dag for task in [*self.roots, *task_list] if task.has_dag() and task.dag}
File "/venv/lib/python3.10/site-packages/airflow/models/dag.py", line 508, in __hash__
val = tuple(self.task_dict.keys())
AttributeError: 'NoneType' object has no attribute 'keys'
```
Note that the call to `dag.partial_subset` usually happens in the mini-scheduler: https://github.com/apache/airflow/blob/2.3.2/airflow/jobs/local_task_job.py#L253
### Operating System
Linux (Debian 9)
### Versions of Apache Airflow Providers
_No response_
### Deployment
Other
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
Isn't that the same class of problem as with #23838 @pingzh @ashb @uranusjr ?
I don’t think it’s the same. The final error looks similar, but this one is triggered by `task_dict`, which is on the DAG object, not Operator. The DAG structure is pretty stable and mostly unchanged for a long while afaik, so this is intriging.
We are having the same issue as well with the KubernetesExecutor:
```
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/__main__.py", line 38, in main
args.func(args)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/cli_parser.py", line 51, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/cli.py", line 99, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 376, in task_run
_run_task_by_selected_method(args, dag, ti)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 182, in _run_task_by_selected_method
_run_task_by_local_task_job(args, ti)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 240, in _run_task_by_local_task_job
run_job.run()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/base_job.py", line 244, in run
self._execute()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 133, in _execute
self.handle_task_exit(return_code)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 171, in handle_task_exit
self._run_mini_scheduler_on_child_tasks()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py", line 71, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 253, in _run_mini_scheduler_on_child_tasks
partial_dag = task.dag.partial_subset(
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2013, in partial_subset
dag.task_dict = {
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2014, in <dictcomp>
t.task_id: _deepcopy_task(t)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2011, in _deepcopy_task
return copy.deepcopy(t, memo)
File "/usr/local/lib/python3.9/copy.py", line 153, in deepcopy
y = copier(memo)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 1156, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo))
File "/usr/local/lib/python3.9/copy.py", line 146, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.9/copy.py", line 230, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.9/copy.py", line 146, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.9/copy.py", line 230, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.9/copy.py", line 172, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/lib/python3.9/copy.py", line 270, in _reconstruct
state = deepcopy(state, memo)
File "/usr/local/lib/python3.9/copy.py", line 146, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.9/copy.py", line 230, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.9/copy.py", line 153, in deepcopy
y = copier(memo)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 1156, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo))
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 1000, in __setattr__
self.set_xcomargs_dependencies()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 1107, in set_xcomargs_dependencies
XComArg.apply_upstream_relationship(self, arg)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/xcom_arg.py", line 186, in apply_upstream_relationship
op.set_upstream(ref.operator)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/taskmixin.py", line 241, in set_upstream
self._set_relatives(task_or_task_list, upstream=True, edge_modifier=edge_modifier)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/taskmixin.py", line 185, in _set_relatives
dags: Set["DAG"] = {task.dag for task in [*self.roots, *task_list] if task.has_dag() and task.dag}
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/taskmixin.py", line 185, in <setcomp>
dags: Set["DAG"] = {task.dag for task in [*self.roots, *task_list] if task.has_dag() and task.dag}
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 508, in __hash__
val = tuple(self.task_dict.keys())
AttributeError: 'NoneType' object has no attribute 'keys'
```
Note that we have ``dag.partial_subset`` here as well.
As this is no longer only affecting the Mini-Scheduler (if I am right and the issue is related), is this somehting we should schedule for 2.3.X instead of 2.4?
Thinking about it, this is likely related to AIP-45 in the same way it affected task mapping. Now that the mini-scheduler runs against serialised DAGs (instead of actual DAGs from Python code), something in the serialisation-copy-etc. chain likely went wrong and populated `task_dict` incorrectly.
I've just run a git bisect, and unsurprisingly the issue at fault was ##19965 (which was in 2.3.0) | 2022-07-06T09:28:36Z | [] | [] |
Traceback (most recent call last):
File "/app/airflow-bug-minimal.py", line 22, in <module>
dag.partial_subset("tg1.task1")
File "/venv/lib/python3.10/site-packages/airflow/models/dag.py", line 2013, in partial_subset
dag.task_dict = {
File "/venv/lib/python3.10/site-packages/airflow/models/dag.py", line 2014, in <dictcomp>
t.task_id: _deepcopy_task(t)
File "/venv/lib/python3.10/site-packages/airflow/models/dag.py", line 2011, in _deepcopy_task
return copy.deepcopy(t, memo)
File "/usr/local/lib/python3.10/copy.py", line 153, in deepcopy
y = copier(memo)
File "/venv/lib/python3.10/site-packages/airflow/models/baseoperator.py", line 1156, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo))
File "/venv/lib/python3.10/site-packages/airflow/models/baseoperator.py", line 1000, in __setattr__
self.set_xcomargs_dependencies()
File "/venv/lib/python3.10/site-packages/airflow/models/baseoperator.py", line 1107, in set_xcomargs_dependencies
XComArg.apply_upstream_relationship(self, arg)
File "/venv/lib/python3.10/site-packages/airflow/models/xcom_arg.py", line 186, in apply_upstream_relationship
op.set_upstream(ref.operator)
File "/venv/lib/python3.10/site-packages/airflow/models/taskmixin.py", line 241, in set_upstream
self._set_relatives(task_or_task_list, upstream=True, edge_modifier=edge_modifier)
File "/venv/lib/python3.10/site-packages/airflow/models/taskmixin.py", line 185, in _set_relatives
dags: Set["DAG"] = {task.dag for task in [*self.roots, *task_list] if task.has_dag() and task.dag}
File "/venv/lib/python3.10/site-packages/airflow/models/taskmixin.py", line 185, in <setcomp>
dags: Set["DAG"] = {task.dag for task in [*self.roots, *task_list] if task.has_dag() and task.dag}
File "/venv/lib/python3.10/site-packages/airflow/models/dag.py", line 508, in __hash__
val = tuple(self.task_dict.keys())
AttributeError: 'NoneType' object has no attribute 'keys'
| 2,550 |
|||
apache/airflow | apache__airflow-24943 | abb034113540b708e87379665a1b5caadb8748bc | diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -2296,8 +2296,13 @@ def get_email_subject_content(
def render(key: str, content: str) -> str:
if conf.has_option('email', key):
path = conf.get_mandatory_value('email', key)
- with open(path) as f:
- content = f.read()
+ try:
+ with open(path) as f:
+ content = f.read()
+ except FileNotFoundError:
+ self.log.warning(f"Could not find email template file '{path!r}'. Using defaults...")
+ except OSError:
+ self.log.exception(f"Error while using email template '{path!r}'. Using defaults...")
return render_template_to_string(jinja_env.from_string(content), jinja_context)
subject = render('subject_template', default_subject)
| Send default email if file "html_content_template" not found
### Apache Airflow version
2.3.2 (latest released)
### What happened
I created a new email template to be sent when there are task failures. I accidentally added the path to the `[email] html_content_template` and `[email] subject_template` with a typo and no email was sent. The task's log is the following:
```
Traceback (most recent call last):
File "/home/user/.conda/envs/airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1942, in handle_failure
self.email_alert(error, task)
File "/home/user/.conda/envs/airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2323, in email_alert
subject, html_content, html_content_err = self.get_email_subject_content(exception, task=task)
File "/home/user/.conda/envs/airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2315, in get_email_subject_content
subject = render('subject_template', default_subject)
File "/home/user/.conda/envs/airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2311, in render
with open(path) as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/user/airflow/config/templates/email_failure_subject.tmpl'
```
I've looked the TaskInstance class (https://github.com/apache/airflow/blob/main/airflow/models/taskinstance.py).
I've seen that the `render` function (https://github.com/apache/airflow/blob/bcf2c418d261c6244e60e4c2d5de42b23b714bd1/airflow/models/taskinstance.py#L2271) has a `content` parameter, which is not used inside.
I guess the solution to this bug is simple: just add a `try - catch` block and return the default content in the `catch` part.
### What you think should happen instead
_No response_
### How to reproduce
_No response_
### Operating System
CentOS Linux 8
### Versions of Apache Airflow Providers
_No response_
### Deployment
Other
### Deployment details
Conda environment
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| You seem to know how to fix it, would you like to make a PR fixing it? Otherwise it will have to wait for someone who would likel to pick it.
Hi @potiuk
I have never created a PR on such a complex project. However, since this seems easy to fix, it should prove useful to learn! I'll create the PR during the following week.
Just follow https://github.com/apache/airflow/blob/main/CONTRIBUTORS_QUICK_START.rst | 2022-07-09T21:22:00Z | [] | [] |
Traceback (most recent call last):
File "/home/user/.conda/envs/airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 1942, in handle_failure
self.email_alert(error, task)
File "/home/user/.conda/envs/airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2323, in email_alert
subject, html_content, html_content_err = self.get_email_subject_content(exception, task=task)
File "/home/user/.conda/envs/airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2315, in get_email_subject_content
subject = render('subject_template', default_subject)
File "/home/user/.conda/envs/airflow/lib/python3.9/site-packages/airflow/models/taskinstance.py", line 2311, in render
with open(path) as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/user/airflow/config/templates/email_failure_subject.tmpl'
| 2,551 |
|||
apache/airflow | apache__airflow-25312 | 741c20770230c83a95f74fe7ad7cc9f95329f2cc | diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -287,6 +287,7 @@ def clear_task_instances(
if dag_run_state == DagRunState.QUEUED:
dr.last_scheduling_decision = None
dr.start_date = None
+ session.flush()
class _LazyXComAccessIterator(collections.abc.Iterator):
@@ -848,28 +849,35 @@ def refresh_from_db(self, session: Session = NEW_SESSION, lock_for_update: bool
"""
self.log.debug("Refreshing TaskInstance %s from DB", self)
- qry = session.query(TaskInstance).filter(
- TaskInstance.dag_id == self.dag_id,
- TaskInstance.task_id == self.task_id,
- TaskInstance.run_id == self.run_id,
- TaskInstance.map_index == self.map_index,
+ if self in session:
+ session.refresh(self, TaskInstance.__mapper__.column_attrs.keys())
+
+ qry = (
+ # To avoid joining any relationships, by default select all
+ # columns, not the object. This also means we get (effectively) a
+ # namedtuple back, not a TI object
+ session.query(*TaskInstance.__table__.columns).filter(
+ TaskInstance.dag_id == self.dag_id,
+ TaskInstance.task_id == self.task_id,
+ TaskInstance.run_id == self.run_id,
+ TaskInstance.map_index == self.map_index,
+ )
)
if lock_for_update:
for attempt in run_with_db_retries(logger=self.log):
with attempt:
- ti: Optional[TaskInstance] = qry.with_for_update().first()
+ ti: Optional[TaskInstance] = qry.with_for_update().one_or_none()
else:
- ti = qry.first()
+ ti = qry.one_or_none()
if ti:
# Fields ordered per model definition
self.start_date = ti.start_date
self.end_date = ti.end_date
self.duration = ti.duration
self.state = ti.state
- # Get the raw value of try_number column, don't read through the
- # accessor here otherwise it will be incremented by one already.
- self.try_number = ti._try_number
+ # Since we selected columns, not the object, this is the raw value
+ self.try_number = ti.try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.unixname = ti.unixname
| Scheduler crashes with psycopg2.errors.DeadlockDetected exception
### Apache Airflow version
2.2.5 (latest released)
### What happened
Customer has a dag that generates around 2500 tasks dynamically using a task group. While running the dag, a subset of the tasks (~1000) run successfully with no issue and (~1500) of the tasks are getting "skipped", and the dag fails. The same DAG runs successfully in Airflow v2.1.3 with same Airflow configuration.
While investigating the Airflow processes, We found that both the scheduler got restarted with below error during the DAG execution.
```
[2022-04-27 20:42:44,347] {scheduler_job.py:742} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1256, in _execute_context
self.dialect.do_executemany(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py", line 912, in do_executemany
cursor.executemany(statement, parameters)
psycopg2.errors.DeadlockDetected: deadlock detected
DETAIL: Process 1646244 waits for ShareLock on transaction 3915993452; blocked by process 1640692.
Process 1640692 waits for ShareLock on transaction 3915992745; blocked by process 1646244.
HINT: See server log for query details.
CONTEXT: while updating tuple (189873,4) in relation "task_instance"
```
This issue seems to be related to #19957
### What you think should happen instead
This issue was observed while running huge number of concurrent task created dynamically by a DAG. Some of the tasks are getting skipped due to restart of scheduler with Deadlock exception.
### How to reproduce
DAG file:
```
from propmix_listings_details import BUCKET, ZIPS_FOLDER, CITIES_ZIP_COL_NAME, DETAILS_DEV_LIMIT, DETAILS_RETRY, DETAILS_CONCURRENCY, get_api_token, get_values, process_listing_ids_based_zip
from airflow.utils.task_group import TaskGroup
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
}
date = '{{ execution_date }}'
email_to = ['example@airflow.com']
# Using a DAG context manager, you don't have to specify the dag property of each task
state = 'Maha'
with DAG('listings_details_generator_{0}'.format(state),
start_date=datetime(2021, 11, 18),
schedule_interval=None,
max_active_runs=1,
concurrency=DETAILS_CONCURRENCY,
dagrun_timeout=timedelta(minutes=10),
catchup=False # enable if you don't want historical dag runs to run
) as dag:
t0 = DummyOperator(task_id='start')
with TaskGroup(group_id='group_1') as tg1:
token = get_api_token()
zip_list = get_values(BUCKET, ZIPS_FOLDER+state, CITIES_ZIP_COL_NAME)
for zip in zip_list[0:DETAILS_DEV_LIMIT]:
details_operator = PythonOperator(
task_id='details_{0}_{1}'.format(state, zip), # task id is generated dynamically
pool='pm_details_pool',
python_callable=process_listing_ids_based_zip,
task_concurrency=40,
retries=3,
retry_delay=timedelta(seconds=10),
op_kwargs={'zip': zip, 'date': date, 'token':token, 'state':state}
)
t0 >> tg1
```
### Operating System
kubernetes cluster running on GCP linux (amd64)
### Versions of Apache Airflow Providers
pip freeze | grep apache-airflow-providers
apache-airflow-providers-amazon==1!3.2.0
apache-airflow-providers-cncf-kubernetes==1!3.0.0
apache-airflow-providers-elasticsearch==1!2.2.0
apache-airflow-providers-ftp==1!2.1.2
apache-airflow-providers-google==1!6.7.0
apache-airflow-providers-http==1!2.1.2
apache-airflow-providers-imap==1!2.2.3
apache-airflow-providers-microsoft-azure==1!3.7.2
apache-airflow-providers-mysql==1!2.2.3
apache-airflow-providers-postgres==1!4.1.0
apache-airflow-providers-redis==1!2.0.4
apache-airflow-providers-slack==1!4.2.3
apache-airflow-providers-snowflake==2.6.0
apache-airflow-providers-sqlite==1!2.1.3
apache-airflow-providers-ssh==1!2.4.3
### Deployment
Astronomer
### Deployment details
Airflow v2.2.5-2
Scheduler count: 2
Scheduler resources: 20AU (2CPU and 7.5GB)
Executor used: Celery
Worker count : 2
Worker resources: 24AU (2.4 CPU and 9GB)
Termination grace period : 2mins
### Anything else
This issue happens in all the dag runs. Some of the tasks are getting skipped and some are getting succeeded and the scheduler fails with the Deadlock exception error.
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
I faced the same issue with [airflow 2.3.0rc2](https://github.com/apache/airflow/tree/2.3.0rc2)
Had a basic dag added.
```py
from datetime import datetime
from airflow import DAG
from airflow.decorators import task
with DAG(dag_id="map-reduce", start_date=datetime(2022,4,22)) as dag:
@task
def add_one(x: int):
return x + 1
@task
def sum_it(values):
total = sum(values)
print (f"Total was {total}")
added_values = add_one.expand(x=[1,2,'a'])
sum_it(added_values)
added_values_correct = add_one.expand(x=[1, 2, 3])
sum_it (added_values_correct)
```
Added scheduler logs in attachments.
[logs.txt](https://github.com/apache/airflow/files/8591012/logs.txt)
[scheduler.log](https://github.com/apache/airflow/files/8591015/scheduler.log)
The deadlock issue is not the cause of the tasks being set to skipped -- the deadlock occurs when trying to.
@abhishekbhakat what error do you get with that? The log you included there doesn't show any error.
I experience exactly the same issue. Reducing the number of schedulers to one seems to have resolved the issue for my deployment, but now, tasks are scheduling a lot slower. I initially had 3 schedulers. Here are my new config variables:
```cfg
[scheduler]
job_heartbeat_sec = 30
scheduler_heartbeat_sec = 5
num_runs = -1
scheduler_idle_sleep_time = 1
min_file_process_interval = 30
dag_dir_list_interval = 120
print_stats_interval = 240
pool_metrics_interval = 5
scheduler_health_check_threshold = 60
orphaned_tasks_check_interval = 300.0
scheduler_zombie_task_threshold = 300
catchup_by_default = True
max_tis_per_query = 512
use_row_level_locking = False
max_dagruns_to_create_per_loop = 100
max_dagruns_per_loop_to_schedule = 200
schedule_after_task_execution = True
parsing_processes = 2
file_parsing_sort_mode = modified_time
use_job_schedule = True
allow_trigger_in_future = False
dependency_detector = airflow.serialization.serialized_objects.DependencyDetector
trigger_timeout_check_interval = 15
run_duration = 41460
```
It seems that after having passed the `use_row_level_locking` to `False`, the problem has disappear on my side (with 3 schedulers). Maybe the [doc](https://airflow.apache.org/docs/apache-airflow/stable/concepts/scheduler.html#database-requirements) should be updated because:
> The short version is that users of PostgreSQL 10+ or MySQL 8+ are all ready to go -- you can start running as many copies of the scheduler as you like -- there is no further set up or config options needed. If you are using a different database please read on.
Again, some more information about my last comments, because my scheduler again crashed when I clean a lot of tasks. By the way, I am running behind a pgbouncer
Here is the failed log:
```
×Failed to clear task instances: "(psycopg2.errors.DeadlockDetected) deadlock detected DETAIL:
Process 22854 waits for ShareLock on transaction 778696725; blocked by process 21808. Process 21808 waits for ShareLock on transaction 778696547; blocked by process 22854.
HINT: See server log for query details.
CONTEXT: while updating tuple (3743,4) in relation "task_instance" [SQL: UPDATE task_instance SET state=%(state)s WHERE task_instance.task_id = %(task_instance_task_id)s AND task_instance.dag_id = %(task_instance_dag_id)s AND task_instance.run_id = %(task_instance_run_id)s] [
parameters: ({'state': None, 'task_instance_task_id': 'some_tasks_name', 'task_instance_dag_id': 'some_tasks_name', 'task_instance_run_id': 'scheduled__2022-05-14T22:00:00+00:00'}, {'state': None, 'task_instance_task_id': 'some_tasks_name', 'task_instance_dag_id': 'some_dag_id', 'task_instance_run_id': 'scheduled__2022-04-29T00:00:00+00:00'}, {'state': None, 'task_instance_task_id': 'some_dag_id', 'task_instance_dag_id': 'some_dag_id', 'task_instance_run_id': 'scheduled__2022-05-19T00:00:00+00:00'})] (Background on this error at: http://sqlalche.me/e/13/e3q8)"
```
@V0lantis it would be great to see few more lines of the stacktrace to know where the exception is happening for you.
Also any chance you can take a look at the database log to see what queries are acccosiated with the two processes/transactions. From this we can only see on half of the problem.
What airflow version are you running? And are there any plugins/custom code interacting with airflow?
Thank you @tanelk for your quick reaction time !
Here is some answers to your question :
> @V0lantis it would be great to see few more lines of the stacktrace to know where the exception is happening for
> you.
You can find here ([just_scheduler_logs.log](https://github.com/apache/airflow/files/8743394/just_scheduler_logs.log)) the full stack trace of one of my scheduler with the issue discussed above.
> Also any chance you can take a look at the database log to see what queries are acccosiated with the two >processes/transactions. From this we can only see on half of the problem.
I fortunately found it. (removing some logs because I found a lot of `2022-05-20 17:10:09 UTC:172.17.45.29(42506):airflow@airflow:[22919]:ERROR: duplicate key value violates unique constraint "variable_key_key"`, but with a control-f you can find the `transaction_id` which is referenced from the scheduler logs given above.
Here are postgresql logs:
<details>
```
2022-05-20 17:02:52 UTC:172.17.45.29(60484):airflow@airflow:[16714]:ERROR: deadlock detected
2022-05-20 17:02:52 UTC:172.17.45.29(60484):airflow@airflow:[16714]:DETAIL: Process 16714 waits for ShareLock on transaction 778687004; blocked by process 16454.
Process 16454 waits for ShareLock on transaction 778687143; blocked by process 16714.
Process 16714: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:02:35.838863+00:00'::timestamptz WHERE dag_run.id = 914889
Process 16454: UPDATE dag_run SET dag_hash='8b3441eb667ee657fc5a5da4860cb53b' WHERE dag_run.id = 914879
2022-05-20 17:02:52 UTC:172.17.45.29(60484):airflow@airflow:[16714]:HINT: See server log for query details.
2022-05-20 17:02:52 UTC:172.17.45.29(60484):airflow@airflow:[16714]:CONTEXT: while updating tuple (111,8) in relation "dag_run"
2022-05-20 17:02:52 UTC:172.17.45.29(60484):airflow@airflow:[16714]:STATEMENT: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:02:35.838863+00:00'::timestamptz WHERE dag_run.id = 914889
2022-05-20 17:03:23 UTC:172.17.45.29(60726):airflow@airflow:[17042]:ERROR: deadlock detected
2022-05-20 17:03:23 UTC:172.17.45.29(60726):airflow@airflow:[17042]:DETAIL: Process 17042 waits for ShareLock on transaction 778687333; blocked by process 16454.
Process 16454 waits for ShareLock on transaction 778687368; blocked by process 17042.
Process 17042: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:03:20.125896+00:00'::timestamptz WHERE dag_run.id = 913704
Process 16454: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:03:20.615756+00:00'::timestamptz WHERE dag_run.id = 899915
2022-05-20 17:03:23 UTC:172.17.45.29(60726):airflow@airflow:[17042]:HINT: See server log for query details.
2022-05-20 17:03:23 UTC:172.17.45.29(60726):airflow@airflow:[17042]:CONTEXT: while updating tuple (1,13) in relation "dag_run"
2022-05-20 17:03:23 UTC:172.17.45.29(60726):airflow@airflow:[17042]:STATEMENT: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:03:20.125896+00:00'::timestamptz WHERE dag_run.id = 913704
2022-05-20 17:04:03 UTC::@:[7996]:LOG: checkpoint complete: wrote 1783 buffers (0.7%); 0 WAL file(s) added, 0 removed, 1 recycled; write=179.117 s, sync=0.004 s, total=179.137 s; sync files=96, longest=0.004 s, average=0.001 s; distance=66037 kB, estimate=68847 kB
2022-05-20 17:04:05 UTC:172.17.45.29(60722):airflow@airflow:[17041]:ERROR: deadlock detected
2022-05-20 17:04:05 UTC:172.17.45.29(60722):airflow@airflow:[17041]:DETAIL: Process 17041 waits for ShareLock on transaction 778687575; blocked by process 16454.
Process 16454 waits for ShareLock on transaction 778687450; blocked by process 17041.
Process 17041: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:03:58.771437+00:00'::timestamptz WHERE dag_run.id = 822172
Process 16454: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:03:57.372530+00:00'::timestamptz WHERE dag_run.id = 917402
2022-05-20 17:04:05 UTC:172.17.45.29(60722):airflow@airflow:[17041]:HINT: See server log for query details.
2022-05-20 17:04:05 UTC:172.17.45.29(60722):airflow@airflow:[17041]:CONTEXT: while updating tuple (36,8) in relation "dag_run"
2022-05-20 17:04:05 UTC:172.17.45.29(60722):airflow@airflow:[17041]:STATEMENT: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:03:58.771437+00:00'::timestamptz WHERE dag_run.id = 822172
2022-05-20 17:04:11 UTC:172.17.45.29(41704):airflow@airflow:[21808]:ERROR: deadlock detected
2022-05-20 17:04:11 UTC:172.17.45.29(41704):airflow@airflow:[21808]:DETAIL: Process 21808 waits for ShareLock on transaction 778687575; blocked by process 16454.
Process 16454 waits for ShareLock on transaction 778687507; blocked by process 21808.
Process 21808: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:04:08.941475+00:00'::timestamptz WHERE dag_run.id = 913704
Process 16454: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:03:57.372530+00:00'::timestamptz WHERE dag_run.id = 917402
2022-05-20 17:04:11 UTC:172.17.45.29(41704):airflow@airflow:[21808]:HINT: See server log for query details.
2022-05-20 17:04:11 UTC:172.17.45.29(41704):airflow@airflow:[21808]:CONTEXT: while updating tuple (35,6) in relation "dag_run"
2022-05-20 17:04:11 UTC:172.17.45.29(41704):airflow@airflow:[21808]:STATEMENT: UPDATE dag_run SET last_scheduling_decision='2022-05-20T17:04:08.941475+00:00'::timestamptz WHERE dag_run.id = 913704
```
</details>
I am running on an Airflow **2.2.4**. There are some custom plugins (not many though), simply plugins which are creating new dynamics dags from a given template.
We are communicating to pg (pg **12.7** by the way) through pgbouncer.
Hope that will help understand what is the issue :+1:
Are these logs from where you have set `use_row_level_locking` to `False`?
The `use_row_level_locking` is used to avoid this sort of issues when running multiple schedulers - these logs indicate that two schedulers are scheduling the same DAG runs and end up getting deadlocked. With row locking enabled this situation should not happen because each scheduler picks different DAG runs to look at.
The interesting situtuation is when `use_row_level_locking` is `True` and things still get deadlocked - that sounds like a bug on airflows part (or perhaps interaction with some other code you are running).
> Are these logs from where you have set use_row_level_locking to False?
Indeed, it is ! My bad then for having set this param, thinking that postgres would allow it. Thanks for the help, and sorry if I wasted your time 🙏
> It seems that after having passed the `use_row_level_locking` to `False`, the problem has disappear on my side (with 3 schedulers). Maybe the [doc](https://airflow.apache.org/docs/apache-airflow/stable/concepts/scheduler.html#database-requirements) should be updated because:
>
I would advise against doing this while running multiple schedulers -- if you do then it is entirely possible that Airflow will not correctly respect configured concurrency limits for DAGs/Tasks/Pools. Edit: oh, or it will crash
Yep, that's effectively what my Airflow deployment did. I misinterpreted the documentation, sorry.
> The deadlock issue is not the cause of the tasks being set to skipped -- the deadlock occurs when trying to.
>
> @abhishekbhakat what error do you get with that? The log you included there doesn't show any error.
@ashb The log file provided has that error in around line no. 5810
Attaching the error message below:
```
[[34m2022-04-29 12:25:18,883[0m] {[34mscheduler_job.py:[0m753} ERROR[0m - Exception when executing SchedulerJob._run_scheduler_loop[0m
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1685, in _execute_context
self.dialect.do_executemany(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py", line 917, in do_executemany
self._psycopg2_extras().execute_batch(
File "/usr/local/lib/python3.9/site-packages/psycopg2/extras.py", line 1187, in execute_batch
cur.execute(b";".join(sqls))
psycopg2.errors.DeadlockDetected: deadlock detected
DETAIL: Process 29749 waits for ShareLock on transaction 62344; blocked by process 31075.
Process 31075 waits for ShareLock on transaction 62338; blocked by process 29749.
HINT: See server log for query details.
CONTEXT: while updating tuple (4,23) in relation "task_instance"
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 736, in _execute
self._run_scheduler_loop()
File "/usr/local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 824, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/usr/local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 906, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/usr/local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 1146, in _schedule_dag_run
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
File "/usr/local/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 1176, in _verify_integrity_if_dag_changed
dag_run.verify_integrity(session=session)
File "/usr/local/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagrun.py", line 944, in verify_integrity
session.flush()
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/orm/session.py", line 3255, in flush
self._flush(objects)
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/orm/session.py", line 3395, in _flush
transaction.rollback(_capture_exception=True)
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
compat.raise_(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/util/compat.py", line 211, in raise_
raise exception
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/orm/session.py", line 3355, in _flush
flush_context.execute()
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/orm/unitofwork.py", line 453, in execute
rec.execute(self)
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/orm/unitofwork.py", line 627, in execute
util.preloaded.orm_persistence.save_obj(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/orm/persistence.py", line 234, in save_obj
_emit_update_statements(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/orm/persistence.py", line 998, in _emit_update_statements
c = connection._execute_20(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1520, in _execute_20
return meth(self, args_10style, kwargs_10style, execution_options)
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/sql/elements.py", line 313, in _execute_on_connection
return connection._execute_clauseelement(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1389, in _execute_clauseelement
ret = self._execute_context(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1748, in _execute_context
self._handle_dbapi_exception(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1929, in _handle_dbapi_exception
util.raise_(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/util/compat.py", line 211, in raise_
raise exception
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1685, in _execute_context
self.dialect.do_executemany(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py", line 917, in do_executemany
self._psycopg2_extras().execute_batch(
File "/usr/local/lib/python3.9/site-packages/psycopg2/extras.py", line 1187, in execute_batch
cur.execute(b";".join(sqls))
sqlalchemy.exc.OperationalError: (psycopg2.errors.DeadlockDetected) deadlock detected
DETAIL: Process 29749 waits for ShareLock on transaction 62344; blocked by process 31075.
Process 31075 waits for ShareLock on transaction 62338; blocked by process 29749.
HINT: See server log for query details.
CONTEXT: while updating tuple (4,23) in relation "task_instance"
[SQL: UPDATE task_instance SET state=%(state)s WHERE task_instance.task_id = %(task_instance_task_id)s AND task_instance.dag_id = %(task_instance_dag_id)s AND task_instance.run_id = %(task_instance_run_id)s AND task_instance.map_index = %(task_instance_map_index)s]
```
I have been noticing this error as well. PostgreSQL 14 with Airflow 2.3.1. My scheduler containers keep crashing (I run two of them on two different nodes).
```
DETAIL: Process 7063 waits for ShareLock on transaction 3165652; blocked by process 7243.
Process 7243 waits for ShareLock on transaction 3165651; blocked by process 7063.
HINT: See server log for query details.
CONTEXT: while updating tuple (208084,20) in relation "task_instance"
[SQL: UPDATE task_instance SET state=%(state)s WHERE task_instance.task_id = %(task_instance_task_id)s AND task_instance.dag_id = %(task_instance_dag_id)s AND task_instance.run_id = %(task_instance_run_id)s AND task_instance.map_index = %(task_instance_map_index)s]
```
I'm also receiving this error with a single `LocalExecutor` scheduler running on Airflow 2.3.2.
Has anyone found an effective workaround for this?
```
2022-06-24 08:01:10,633] {scheduler_job.py:756} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1706, in _execute_context
cursor, statement, parameters, context
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 716, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.DeadlockDetected: deadlock detected
DETAIL: Process 271343 waits for ShareLock on transaction 251928224; blocked by process 282010.
Process 282010 waits for ShareLock on transaction 251928207; blocked by process 271343.
HINT: See server log for query details.
CONTEXT: while updating tuple (474,18) in relation "task_instance"
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 739, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 827, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 909, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1161, in _schedule_dag_run
dag_run.schedule_tis(schedulable_tis, session)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 1042, in schedule_tis
.update({TI.state: State.SCHEDULED}, synchronize_session=False)
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3196, in update
execution_options={"synchronize_session": synchronize_session},
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 1670, in execute
result = conn._execute_20(statement, params or {}, execution_options)
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1520, in _execute_20
return meth(self, args_10style, kwargs_10style, execution_options)
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 314, in _execute_on_connection
self, multiparams, params, execution_options
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1399, in _execute_clauseelement
cache_hit=cache_hit,
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1749, in _execute_context
e, statement, parameters, cursor, context
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1930, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 211, in raise_
raise exception
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1706, in _execute_context
cursor, statement, parameters, context
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 716, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.OperationalError: (psycopg2.errors.DeadlockDetected) deadlock detected
DETAIL: Process 271343 waits for ShareLock on transaction 251928224; blocked by process 282010.
Process 282010 waits for ShareLock on transaction 251928207; blocked by process 271343.
HINT: See server log for query details.
CONTEXT: while updating tuple (474,18) in relation "task_instance"
[SQL: UPDATE task_instance SET state=%(state)s WHERE task_instance.dag_id = %(dag_id_1)s AND task_instance.run_id = %(run_id_1)s AND (task_instance.task_id, task_instance.map_index) IN ((%(param_1_1_1)s, %(param_1_1_2)s))]
```
I'm also seeing this issue, albeit with a slightly different query:
```
psycopg2.errors.DeadlockDetected: deadlock detected
DETAIL: Process 16440 waits for ShareLock on transaction 788648981; blocked by process 16481.
Process 16481 waits for ShareLock on transaction 788648979; blocked by process 16440.
HINT: See server log for query details.
CONTEXT: while deleting tuple (0,25) in relation "dag"
<snip stack same as posted by others>
sqlalchemy.exc.OperationalError: (psycopg2.errors.DeadlockDetected) deadlock detected
DETAIL: Process 16440 waits for ShareLock on transaction 788648981; blocked by process 16481.
Process 16481 waits for ShareLock on transaction 788648979; blocked by process 16440.
HINT: See server log for query details.
CONTEXT: while deleting tuple (0,25) in relation "dag"
[SQL: DELETE FROM dag WHERE dag.dag_id IN (%(dag_id_1_1)s) RETURNING dag.dag_id]
[parameters: {'dag_id_1_1': 'Pipeline.DAG_NAME_REDACTED'}]
(Background on this error at: http://sqlalche.me/e/14/e3q8)
```
It happens pretty much every time I delete a dag. Only way I've found around it is to browse the dag runs, delete all but one of them, then delete the dag.
I'm running multiple schedulers, and `use_row_locking = True`. Postgres 13.1.
Our version path was 2.2.4 -> 2.3.2 (we skipped 2.3.0 and 2.3.1 because of reasons). 2.2.4 is fine, 2.3.2 is not. Anecdotally, db load appears much higher in 2.3.2 - the box running postgres is pretty much flat out all the time, versus ~25% previously. I don't have hard numbers, because several things changed at once in our testing env (mea culpa), but I will at some point be upgrading another env, and I'll be watching closely.
N.B. I've tried shutting down all-but-one schedulers, and that also fixes it. 2 schedulers running: delete fails, as per above. 1 scheduler running, delete works.
Is row-locking broken somehow in 3.2.3?
Having this issue as well, we're only on 2.0.2.
Looking through some of the Airflow configs I'm wondering if it could be related to the [file_parsing_sort_mode](https://airflow.apache.org/docs/apache-airflow/stable/configurations-ref.html#file-parsing-sort-mode)? It seems like the default behavior could cause the schedulers to always start at the same place when parsing dag files, and I could see that causing a deadlock at scale. I'm wondering if flipping to `random_seeded_by_host` would solve it
This option doesn't seem to be availble in our version, does someone want to try it out for us 😃
> Our version path was 2.2.4 -> 2.3.2 (we skipped 2.3.0 and 2.3.1 because of reasons). 2.2.4 is fine, 2.3.2 is not. Anecdotally, db load appears much higher in 2.3.2 - the box running postgres is pretty much flat out all the time, versus ~25% previously. I don't have hard numbers, because several things changed at once in our testing env (mea culpa), but I will at some point be upgrading another env, and I'll be watching closely.
Our DB load has increased a lot as well. I actually swapped from a Docker based postgresql (mounted volume), to installing it directly on a server instead to see if it would help. I also implemented pgbouncer and bunch of other changes, so I do not have any useful data or information due to a lot of troubleshooting and changes.
Does anyone know if it comes from Websersver/Schedulers or Workers? I know it's a difficult question but maybe someone happens to have log of queries before/after and could make a simple stat what has changed ?
We've made some progress in at least reducing the amount of deadlocks. We're running 2.0.2 on K8s and we've discovered the following:
1. This mainly happens at scale when there's a lot of tasks to schedule
2. This mainly happens during updates to the scheduler or when multiple schedulers start at once
We've been able to reduce deadlocks almost entirely simply by adding a startupProbe to the scheduler deployment in K8s and telling K8s to only roll out schedulers one at a time to avoid them starting at the same time. When they started at the same time all of them would deadlock and running tasks would get killed and rescheduled etc. Rolling out one at a time has almost entirely removed deadlocking, and the few times it does happen it's isolated to one scheduler where other schedulers can keep things moving
The fact it happens more frequently when starting schedulers at the same time makes me think it might be related to the `file_parsing_sort_mode` I mentioned above. Since the default behavior is `modified_time`, My theory is that all the schedulers are configured to "start" it's scheduler loop at the same place, which would naturally increase the chance of deadlocking
@ldacey @whitleykeith @argibbs @eitanme -> I spoke with some enlightened people :) (yeah talking about you @ashb and @bbovenzi ) -> and after the talk I have a hypothesis, that this is the new Grid view doing auto-refresh for a long running DAG.
There was a fix by @ashb https://github.com/apache/airflow/pull/24284 that is going to be released in 2.3.3 which decreases significantly a number of queries that are generated by the Grid view refresh. It's a huge improvement and might impact both - load on the DB and possibly memory usage of the webserver - especially if there are almost continuously running dags and a number of people leaves the browser open with "auto-refresh" on the Grid View.
Is there a way some of you could test the hypothesis and see if there might be a correlation (requires a bit of coordination what your users do).
(BTW. If that's it then Ash's fix is coming in 2.3.3).
Hah, spooky.
Without boring you with the details, I was running some-many-lots of DAGs from scratch today. I noticed that db load increased with the number of active dags and then experimented to see if it was the dags themselves, or just because I was watching them all.
Turning off auto-refresh seemed to massively improve db load, and I just came here to update the thread, only to find you've beaten me to it.
As an aside, what made me look at auto-refresh (@bbovenzi) was that the spinner is _always_ spinning when auto-refresh is turned on. In the old view, the refresh dots would only animate every few seconds when it polled for an updated state. I don't know if the always-spinning thing means it's always refreshing, or if it's just meangingless.
But long story short, yes, auto-refresh smells fishy to me.
It's just always spinning but good point. I can fix that.
Is the DB load still bad with the latest on main? If needed I can look into other ways to improve the autorefresh performance.
I haven't tried with latest main, just 2.3.2; I tend to only run with the release builds, but I can try main if it'll help.
Another option @argibbs will be to apply the fix of @ashb to your installation - it's not much, and it should be cleanly applicable on top of your code by applying a patch/
This is "safer" (less changes) applied and gives better "proof" that this was the problem (and the good thing you could apply to production and let it run and "patch-fix-ti" without waiting for 2.3.3.
Just to follow up here on the deadlock issue I saw which, I think, is unrelated to the load issues associated with the new GridView though I'm happy to hear those will be fixed in 2.3.3.
Since I was only running one scheduler, I tried setting `use_row_level_locking` to false and have not seen the `psycopg2.errors.DeadlockDetected: deadlock detected` error since. I also experience the problem on both 2.2.4 and 2.3.2 and 2.2.4 didn't have the GridView.
Not sure if this helps and I'll try toggling the setting back once 2.3.3 is released to see if that has an impact, but wanted to give an update.
Also of note, I was experiencing this with only one `LocalExecutor` scheduler when many tasks were kicked off at the same time so I don't think my issue has to do with multi-scheduler interaction. It's more related to overall task load.
I am also encountering this issue. I collected some details about both queries involved in the deadlock, hopefully this is helpful.
Deployment details:
* Airflow 2.2.5
* KubernetesExecutor
* A single Airflow scheduler is running.
* Row level locking is enabled.
* Scheduler parsing_processes = 5
* Scheduler resources: 8 cores, 5 GB RAM
* Database resources: 12 cores, 8 GB RAM (Postgres 11.3)
* The problem only appears at scale (50-150 DAGs, several of which have hundreds of tasks).
* The problem is not easily reproducible but is happening daily.
In the deadlocks there is an UPDATE statement deadlocking with a SELECT ... FOR UPDATE.
Based on stack traces visible in the scheduler logs, the UPDATE originates from the main scheduler loop here:
https://github.com/apache/airflow/blob/2.2.5/airflow/models/dagrun.py#L901-L910
Based on the database logs, the SELECT statement has the form:
```
SELECT task_instance.try_number AS task_instance_try_number, ...
FROM task_instance JOIN dag_run AS dag_run_1 ON dag_run_1.dag_id = task_instance.dag_id AND dag_run_1.run_id = task_instance.run_id
WHERE task_instance.dag_id = 'my_dag_id' AND task_instance.task_id = 'my_task_id' AND task_instance.run_id = 'sanitized_run_id_1'
LIMIT 1 FOR UPDATE
```
Searching the Airflow source code, the query that looks most similar to the SELECT from the database error is in `TaskInstance.refresh_from_db()`:
https://github.com/apache/airflow/blob/2.2.5/airflow/models/taskinstance.py#L714-L736
Example scheduler logs showing the origins of the UPDATE statement:
```
[2022-07-06 18:54:29,456] {{scheduler_job.py:753}} INFO - Exited execute loop
Traceback (most recent call last):
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 609, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.DeadlockDetected: deadlock detected
DETAIL: Process 99711 waits for ShareLock on transaction 527390121; blocked by process 100627.
Process 100627 waits for ShareLock on transaction 527390039; blocked by process 99711.
HINT: See server log for query details.
CONTEXT: while updating tuple (48513,18) in relation "task_instance"
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/airflow/venv-py3/bin/airflow", line 8, in <module>
sys.exit(main())
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/__main__.py", line 48, in main
args.func(args)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/utils/cli.py", line 92, in wrapper
return f(*args, **kwargs)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/cli/commands/scheduler_command.py", line 75, in scheduler
_run_scheduler_job(args=args)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/cli/commands/scheduler_command.py", line 46, in _run_scheduler_job
job.run()
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/jobs/base_job.py", line 246, in run
self._execute()
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 726, in _execute
self._run_scheduler_loop()
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 807, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 890, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1147, in _schedule_dag_run
dag_run.schedule_tis(schedulable_tis, session)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/utils/session.py", line 67, in wrapper
return func(*args, **kwargs)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/models/dagrun.py", line 909, in schedule_tis
.update({TI.state: State.SCHEDULED}, synchronize_session=False)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 4063, in update
update_op.exec_()
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 1697, in exec_
self._do_exec()
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 1895, in _do_exec
self._execute_stmt(update_stmt)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 1702, in _execute_stmt
self.result = self.query._execute_crud(stmt, self.mapper)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3568, in _execute_crud
return conn.execute(stmt, self._params)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1011, in execute
return meth(self, multiparams, params)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1130, in _execute_clauseelement
distilled_params,
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
e, statement, parameters, cursor, context
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 609, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.OperationalError: (psycopg2.errors.DeadlockDetected) deadlock detected
DETAIL: Process 99711 waits for ShareLock on transaction 527390121; blocked by process 100627.
Process 100627 waits for ShareLock on transaction 527390039; blocked by process 99711.
HINT: See server log for query details.
CONTEXT: while updating tuple (48513,18) in relation "task_instance"
[SQL: UPDATE task_instance SET state=%(state)s WHERE task_instance.dag_id = %(dag_id_1)s AND task_instance.run_id = %(run_id_1)s AND task_instance.task_id IN (%(task_id_1)s)]
[parameters: {'state': <TaskInstanceState.SCHEDULED: 'scheduled'>, 'dag_id_1': 'sanitized_dag_id_1', 'run_id_1': 'sanitized_run_id_1', 'task_id_1': 'sanitized_task_id_1'}]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
```
Example Postgres logs showing a complete SELECT ... FOR UPDATE statement:
```
2022-07-06 18:54:25.816 UTC [100639] ERROR: deadlock detected
2022-07-06 18:54:25.816 UTC [100639] DETAIL: Process 100639 waits for ShareLock on transaction 527390039; blocked by process 99711.
Process 99711 waits for ShareLock on transaction 527390130; blocked by process 100639.
Process 100639: SELECT task_instance.try_number AS task_instance_try_number, task_instance.task_id AS task_instance_task_id, task_instance.dag_id AS task_instance_dag_id, task_instance.run_id AS task_instance_run_id, task_instance.start_date AS task_instance_start_date, task_instance.end_date AS task_instance_end_date, task_instance.duration AS task_instance_duration, task_instance.state AS task_instance_state, task_instance.max_tries AS task_instance_max_tries, task_instance.hostname AS task_instance_hostname, task_instance.unixname AS task_instance_unixname, task_instance.job_id AS task_instance_job_id, task_instance.pool AS task_instance_pool, task_instance.pool_slots AS task_instance_pool_slots, task_instance.queue AS task_instance_queue, task_instance.priority_weight AS task_instance_priority_weight, task_instance.operator AS task_instance_operator, task_instance.queued_dttm AS task_instance_queued_dttm, task_instance.queued_by_job_id AS task_instance_queued_by_job_id, task_instance.pid AS task_instance_pid, task_insta
Process 99711: UPDATE task_instance SET state='scheduled' WHERE task_instance.dag_id = 'sanitized_dag_id_2' AND task_instance.run_id = 'sanitized_run_id_2' AND task_instance.task_id IN ('sanitized_task_id_2', 'sanitized_task_id_3')
2022-07-06 18:54:25.816 UTC [100639] HINT: See server log for query details.
2022-07-06 18:54:25.816 UTC [100639] CONTEXT: while locking tuple (725,169) in relation "dag_run"
2022-07-06 18:54:25.816 UTC [100639] STATEMENT: SELECT task_instance.try_number AS task_instance_try_number, task_instance.task_id AS task_instance_task_id, task_instance.dag_id AS task_instance_dag_id, task_instance.run_id AS task_instance_run_id, task_instance.start_date AS task_instance_start_date, task_instance.end_date AS task_instance_end_date, task_instance.duration AS task_instance_duration, task_instance.state AS task_instance_state, task_instance.max_tries AS task_instance_max_tries, task_instance.hostname AS task_instance_hostname, task_instance.unixname AS task_instance_unixname, task_instance.job_id AS task_instance_job_id, task_instance.pool AS task_instance_pool, task_instance.pool_slots AS task_instance_pool_slots, task_instance.queue AS task_instance_queue, task_instance.priority_weight AS task_instance_priority_weight, task_instance.operator AS task_instance_operator, task_instance.queued_dttm AS task_instance_queued_dttm, task_instance.queued_by_job_id AS task_instance_queued_by_job_id, task_instance.pid AS task_instance_pid, task_instance.executor_config AS task_instance_executor_config, task_instance.external_executor_id AS task_instance_external_executor_id, task_instance.trigger_id AS task_instance_trigger_id, task_instance.trigger_timeout AS task_instance_trigger_timeout, task_instance.next_method AS task_instance_next_method, task_instance.next_kwargs AS task_instance_next_kwargs, dag_run_1.state AS dag_run_1_state, [dag_run_1.id](http://dag_run_1.id/) AS dag_run_1_id, dag_run_1.dag_id AS dag_run_1_dag_id, dag_run_1.queued_at AS dag_run_1_queued_at, dag_run_1.execution_date AS dag_run_1_execution_date, dag_run_1.start_date AS dag_run_1_start_date, dag_run_1.end_date AS dag_run_1_end_date, dag_run_1.run_id AS dag_run_1_run_id, dag_run_1.creating_job_id AS dag_run_1_creating_job_id, dag_run_1.external_trigger AS dag_run_1_external_trigger, dag_run_1.run_type AS dag_run_1_run_type, dag_run_1.conf AS dag_run_1_conf, dag_run_1.data_interval_start AS dag_run_1_data_interval_start, dag_run_1.data_interval_end AS dag_run_1_data_interval_end, dag_run_1.last_scheduling_decision AS dag_run_1_last_scheduling_decision, dag_run_1.dag_hash AS dag_run_1_dag_hash
FROM task_instance JOIN dag_run AS dag_run_1 ON dag_run_1.dag_id = task_instance.dag_id AND dag_run_1.run_id = task_instance.run_id
WHERE task_instance.dag_id = 'sanitized_dag_id_2' AND task_instance.task_id = 'sanitized_task_id_3' AND task_instance.run_id = 'sanitized_run_id_2'
LIMIT 1 FOR UPDATE
```
Unfortunately we are not able to repro this on a test instance so I have not been able to try on newer Airflow versions, but based on the discussion on this thread it sounds like the issue is present until at least 2.3.2.
Very useful Thanks. I will take a look at it shortly.
We have figured out the origins of the SELECT ... FOR UPDATE and a mechanism for the deadlocks.
The short story is it originates from the `airflow run task` cli command inside task pods.
The SELECT does indeed originate from `TaskInstance.refresh_from_db()` as suggested above. It is called as follows:
```
airflow/jobs/local_task_job.py:89 _execute
airflow/models/taskinstance.py:1184: check_and_change_state_before_execution
airflow/models/taskinstance.py:714 refresh_from_db(lock_for_update=True, session=session)
```
Line numbers both in the synopsis above and the stack trace below are for Airflow 2.2.5.
Stack traces including the SELECT statements can be found in failed pod logs, I have included one below:
```
Traceback (most recent call last):
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
cursor, statement, parameters, context
psycopg2.errors.InFailedSqlTransaction: current transaction is aborted, commands ignored until end of transaction block
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 298, in task_run
self._execute()
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/utils/session.py", line 70, in wrapper
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1184, in check_and_change_state_before_execution
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/utils/session.py", line 67, in wrapper
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 734, in refresh_from_db
for attempt in run_with_db_retries(logger=self.log):
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/tenacity/__init__.py", line 390, in __iter__
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/tenacity/__init__.py", line 368, in iter
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/tenacity/__init__.py", line 186, in reraise
File "/opt/python3.7/lib64/python3.7/concurrent/futures/_base.py", line 428, in result
return self.__get_result()
File "/opt/python3.7/lib64/python3.7/concurrent/futures/_base.py", line 384, in __get_result
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 736, in refresh_from_db
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3203, in __getitem__
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3560, in _execute_and_instances
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1011, in execute
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1130, in _execute_clauseelement
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 609, in do_execute
sqlalchemy.exc.InternalError: (psycopg2.errors.InFailedSqlTransaction) current transaction is aborted, commands ignored until end of transaction block
[SQL: SELECT task_instance.try_number AS task_instance_try_number, task_instance.task_id AS task_instance_task_id, task_instance.dag_id AS task_instance_dag_id, task_instance.run_id AS task_instance_run_id, task_instance.start_date AS task_instance_start_date, task_instance.end_date AS task_instance_end_date, task_instance.duration AS task_instance_duration, task_instance.state AS task_instance_state, task_instance.max_tries AS task_instance_max_tries, task_instance.hostname AS task_instance_hostname, task_instance.unixname AS task_instance_unixname, task_instance.job_id AS task_instance_job_id, task_instance.pool AS task_instance_pool, task_instance.pool_slots AS task_instance_pool_slots, task_instance.queue AS task_instance_queue, task_instance.priority_weight AS task_instance_priority_weight, task_instance.operator AS task_instance_operator, task_instance.queued_dttm AS task_instance_queued_dttm, task_instance.queued_by_job_id AS task_instance_queued_by_job_id, task_instance.pid AS task_instance_pid, task_instance.executor_config AS task_instance_executor_config, task_instance.external_executor_id AS task_instance_external_executor_id, task_instance.trigger_id AS task_instance_trigger_id, task_instance.trigger_timeout AS task_instance_trigger_timeout, task_instance.next_method AS task_instance_next_method, task_instance.next_kwargs AS task_instance_next_kwargs, dag_run_1.state AS dag_run_1_state, [dag_run_1.id](http://dag_run_1.id/) AS dag_run_1_id, dag_run_1.dag_id AS dag_run_1_dag_id, dag_run_1.queued_at AS dag_run_1_queued_at, dag_run_1.execution_date AS dag_run_1_execution_date, dag_run_1.start_date AS dag_run_1_start_date, dag_run_1.end_date AS dag_run_1_end_date, dag_run_1.run_id AS dag_run_1_run_id, dag_run_1.creating_job_id AS dag_run_1_creating_job_id, dag_run_1.external_trigger AS dag_run_1_external_trigger, dag_run_1.run_type AS dag_run_1_run_type, dag_run_1.conf AS dag_run_1_conf, dag_run_1.data_interval_start AS dag_run_1_data_interval_start, dag_run_1.data_interval_end AS dag_run_1_data_interval_end, dag_run_1.last_scheduling_decision AS dag_run_1_last_scheduling_decision, dag_run_1.dag_hash AS dag_run_1_dag_hash
WHERE task_instance.dag_id = %(dag_id_1)s AND task_instance.task_id = %(task_id_1)s AND task_instance.run_id = %(run_id_1)s
LIMIT %(param_1)s FOR UPDATE]
[parameters: {'dag_id_1': 'sanitized_dag_name_1', 'task_id_1': 'sanitized_task_name_1', 'run_id_1': 'sanitized_run_id_1', 'param_1': 1}]
(Background on this error at: http://sqlalche.me/e/13/2j85)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 609, in do_execute
cursor.execute(statement, parameters)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/airflow/venv-py3/bin/airflow", line 8, in <module>
sys.exit(main())
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/__main__.py", line 48, in main
args.func(args)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 48, in command
return func(*args, **kwargs)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/utils/cli.py", line 92, in wrapper
return f(*args, **kwargs)
_run_task_by_selected_method(args, dag, ti)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 105, in _run_task_by_selected_method
_run_task_by_local_task_job(args, ti)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/cli/commands/task_command.py", line 163, in _run_task_by_local_task_job
run_job.run()
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/jobs/base_job.py", line 246, in run
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/airflow/jobs/local_task_job.py", line 97, in _execute
external_executor_id=self.external_executor_id,
return func(*args, session=session, **kwargs)
self.refresh_from_db(session=session, lock_for_update=True)
return func(*args, **kwargs)
do = self.iter(retry_state=retry_state)
raise retry_exc.reraise()
raise self.last_attempt.result()
raise self._exception
ti: Optional[TaskInstance] = qry.with_for_update().first()
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3429, in first
ret = list(self[0:1])
return list(res)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3535, in __iter__
return self._execute_and_instances(context)
result = conn.execute(querycontext.statement, self._params)
return meth(self, multiparams, params)
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
distilled_params,
e, statement, parameters, cursor, context
File "/opt/airflow/venv-py3/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
raise exception
cursor, statement, parameters, context
cursor.execute(statement, parameters)
FROM task_instance JOIN dag_run AS dag_run_1 ON dag_run_1.dag_id = task_instance.dag_id AND dag_run_1.run_id = task_instance.run_id
```
Regarding how the deadlock happens, as we know, we have two statements: UPDATE and SELECT ... FOR UPDATE that cause a deadlock. As described previously, the UPDATE statement is here https://github.com/apache/airflow/blob/2.2.5/airflow/models/dagrun.py#L903-L909. It needs to get a shared lock on matching rows of `task_instance` and `dag_run` tables in order to execute the update. However, it fails to do that. Before executing the `schedule_tis` function, the scheduler job already gets the lock in the `dag_run` table:
https://github.com/apache/airflow/blob/2.2.5/airflow/jobs/scheduler_job.py#L884
https://github.com/apache/airflow/blob/2.2.5/airflow/models/dagrun.py#L287
So it means the UPDATE statement is failing at acquiring the matching row lock of the `task_instance` table.
In the meantime, the SELECT ... FOR UPDATE statement `select ... from task_instance join dag_run ... for update` also needs a lock of the matching rows in both task_instance and dag_run tables. It first acquires such a lock of the `task_instance` table, but then attempts to get the lock of the `dag_run` table, which was already acquired by the UPDATE statement. Therefore, a deadlock happens.
Regarding how this might be fixed, the suspicion is `TaskInstance.refresh_from_db()` only needs to lock the rows in the task_instance table, not the dag_run table. This implies the deadlocks might be worked around by replacing `with_for_update()` with `with_for_update(of=TaskInstance)` here https://github.com/apache/airflow/blob/2.2.5/airflow/models/taskinstance.py#L736, at least for PostgreSQL and Oracle.
cc: @ashb - I did not have a look at this yet (will shortly) , but maybe you can take a look and see if any of this ring a bell.
I'm able to reliably force a DeadlockDetected exception by creating a DAG with many (i.e. over a hundred) concurrent tasks, setting `max_active_tasks_per_dag` to 75, setting `parallelism` to 100, triggering the DAG, waiting a couple minutes, then deleting the DAG Run. Maybe this is further evidence to @dstaple's proposed workaround limiting the lock to the `task_instance` table or at least a way to test if that workaround resolves this particular deadlock.
Thanks for REALLY detailed investigation @dstaple.
I finally had some time to take a look at this and I think your assesment was very correct.
However the solution you proposed is not good, because I think we DO want to run "SELECT FOR UPDATE" on DagRun table. The whole scheduling is based on the fact that DagRun row gets locked and no changes are happening to DagRun and any TaskInstances of that DagRun while Scheduler processes those task instances. And since `local_task_run` potentially changes the state of the task instance it runs (that's why it locks it for update), if the whole task DagRun is currently "being processed" by any of the schedulers. we should hold-off with running the task before scheduler finishes this particular DagRun processing and releases the lock.
And in this case the "local_task_run" actually locks the DagRun table too (though I am not entirely sure why this is one thing that I do not understand completely - see below). So it does what it should but with one very little caveat - it locks the TaskInstance and DagRun in REVERSE ORDER comparing to what Scheduler does. This is actually the root cause of ALL Deadlocks (at least in Postgres, MySQL has it's own fair share of other kinds of deadlocks) - non-consistent order. The deadlock appears when two threads want two (or more) resources and gets lock on them in reverse order. This is actually the only reason for any kind of deadlocks and your investigation was really nicely showing what's going on.
The solution to that is simple - since we are going to get the DagRun lock in a moment anyway in "refresh_from_db", we should simply get the lock on DagRun table FIRST. This should fix the problem as we will then perform lock grabbing in the same sequence in scheduler and task_run - > first DagRun, then TaskInstance. This is what my proposed #25266 does.
The only thing I do not know is WHY the `TaskInstance.refresh_from_db` actually does the JOIN query:
```
SELECT FROM task_instance JOIN dag_run AS dag_run_1 ON dag_run_1.dag_id = task_instance.dag_id AND dag_run_1.run_id ... FOR UPDATE
```
The original query in the code looks like this:
```
qry = session.query(TaskInstance).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == self.run_id,
TaskInstance.map_index == self.map_index,
)
if lock_for_update:
for attempt in run_with_db_retries(logger=self.log):
with attempt:
ti: Optional[TaskInstance] = qry.with_for_update().first()
```
And there is no obvious reason why the last line joins the dag_run table?
I hope someone else in this thread might shed some light on it, I have a suspicion, that SQLALchemy will add the join in case there is a ForeignKey with ONCASCADE with the dag_id (which we have) - but I could not find any reference or documentation that would point to such behaviour.
@RNHTTR - since you mentioned you can reproduce the issue - maybe you could apply my fix and see if it solves the problem (there is a bit of leap of faith with this change).
Not related to Deadlocks, however is it necessary use `FOR UPDATE` lock rather than `FOR NO KEY UPDATE` ([doc](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-ROWS))?
> Not related to Deadlocks, however is it necessary use `FOR UPDATE` lock rather than `FOR NO KEY UPDATE` ([doc](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-ROWS))?
Can you do it cross-db (MySQL/Postgres/MsSQL?) and in sqlalchemy? @Taragolis ?
Also see the discussion I started today - https://lists.apache.org/thread/pgo1qxqsnzwg2do955961tkvxk76m8gw - we have enough trouble about "some" DB features not available in some databases. so we should strive for lowest-common-denominator I am afraid.
@potiuk Thanks a ton for the feedback!
Regarding why the dag_run table is locked when `with_for_update()` is called on the TaskInstance table, I believe this is due to `lazy='joined'` in the relationship between TaskInstance and DagRun:
https://github.com/apache/airflow/blob/2.2.5/airflow/models/taskinstance.py#L408
This behavior is described in the following sqlalchemy issue:
https://github.com/sqlalchemy/sqlalchemy/issues/4100
The behavior was not changed after the above issue was filed, but the following warning was added to the SQLAlchemy documentation:
> Using with_for_update in the context of eager loading relationships is not officially supported or recommended by SQLAlchemy and may not work with certain queries on various database backends. When with_for_update is successfully used with a query that involves joinedload(), SQLAlchemy will attempt to emit SQL that locks all involved tables.
https://docs.sqlalchemy.org/en/14/orm/loading_relationships.html
> Can you do it cross-db (MySQL/Postgres/MsSQL?) and in sqlalchemy? @Taragolis ?
I do not actually know about is it supported by other DB engine and is it has exactly the same behaviour.
Definitely it works with all modern PostgreSQL (9.3+) and [sqlalchemy](https://docs.sqlalchemy.org/en/14/changelog/migration_11.html#support-for-for-update-skip-locked-for-no-key-update-for-key-share), basically we need to set `key_share=True`.
One of the difference that with `FOR NO KEY UPDATE` we can insert/delete new record if it referenced (FK) to locked value in other table, if use `FOR UPDATE` then transaction would lock until row would released and this also could cause in some cases Deadlocks.
Might be `FOR UPDATE` strictly required for HA in Airflow, unfortunately I do not check this yet (that why I ask). I recently have some performance issues when row level locks on and tasks access in templates to `dag_run` and `ti` on LocalExecutor.
> The behavior was not changed after the above issue was filed, but the following warning was added to the SQLAlchemy documentation:
Ahhhhh. That Would indeed explain it. I tink then that my solution is actually the right approach :)
> I do not actually know about is it supported by other DB engine and is it has exactly the same behaviour.
@Taragolis would be worth checking. The DagRun lock `SELECT FOR UPDATE SKIP LOCKED' is very much the "Key" (pun intended) to make multiple schedulers work and it also (as you can see) spilled a bit to mini-scheduler and task run" in form of just 'SELECT FOR UPDATE". The "SELECT FOR UPDATE SKIP LOCKED" is precisely the mechanism that allows multiple schedulers to run in parallel with basically no serialization and no "accidental state overrides".
And we need to make sure that it works - for MySQL 8 and Postgres, because this is our 'baseline". We cannot rely on Postgres-only features, though we would love to - I started some threads in the past mostly starting along the lines "we are fed--up with MySQL, let's dump it". See for example this "Elephant in the Room" thread at the devlist https://lists.apache.org/thread/dp78j0ssyhx62008lbtblrc856nbmlfb . The answer so far and the wisdom of crowd is "No, as much as we would like to, we cannot get rid of MySQL". And if you see the results of our Survey https://airflow.apache.org/blog/airflow-survey-2022/ - while Postgres is by far strongest (also because it is now the only supported DB for ALL managed Airlfow services), there are still ~ 20% of people who use MySQL (or MariaDB but we finally decided that we explicitly exclude MariaDB from supported databases and actively encourage people to migrate out if they use it).
So while I would love to start the dicsussion with "Can we use this Postgres feature ?". when we think about the product development, the question is "Is this feature supported in both Postgres AND MySQL 8+". If not - we won't even discuss it, because if we start actively using Postgres-only features to optimize stuff, we are going to impair our MySQL users and eventually we will implement things that only work for Postgres, and behaviours that will differ between Postgres and MySQL and we certainly do not want that. Something that looks good for you as a user (using Postgres only) might not be acceptable for product (with both Postgres and MySQL being supported).
I looked (very briefly) if similar feature exists in MySQL, and it seems no, but I did not look too much. But If you think it is worth considering and if you think it's good to think of it, starting with deeper investigation and justifying both - benefits and cross-db-portability is something I would advise you to start with :).
I think your question is phrased a bit wrongly:
> is it necessary use FOR UPDATE lock rather than FOR NO KEY UPDATE ?
It should rather be:
"I see that we can use that new feature NO KEY in Postgres and also equivalent in MySQL. It has those and those benefits and we can make it cross-db - doc here, doc here". Is this good enough reason to switch to it ?
> it locks the TaskInstance and DagRun in REVERSE ORDER comparing to what Scheduler does.
@potiuk I re-read your explanation and agree completely, the essence of the specific deadlock I did the deep-dive on above is `TaskInstance.refresh_from_db` and `DagRun.schedule_tis` both lock `task_instance` and `dag_run`, but in opposite orders, which causes a deadlock. I agree if we change `TaskInstance.refresh_from_db` to guarantee the order, that should prevent deadlocks between those two statements.
Regarding @RNHTTR repro'ing a deadlock by deleting a running DAG run, it might be related, but it isn't exactly the same situation. At the minimum the deadlock forced by @RNHTTR probably involves a DELETE statement rather than an UPDATE. Indeed @argibbs reported deadlocks when deleting DAGs and they had DELETE statements for one of the queries (the other was not reported). It's possible that the "other query" in that case was the same SELECT ... FOR UPDATE from `TaskInstance.refresh_from_db` I reported above, but I don't want to get ahead of myself here. (Of course it's still worth checking if #25266 resolves the DELETE deadlocks in case we're lucky and they're the same issue.)
Regarding `with_for_update(of=TaskInstance)` I previously suggested, understanding you've rejected this as a potential solution, it's worth reporting that I internally forked Airflow 2.2.5 and added this change about 10 days ago and it completely eliminated the deadlocks between `TaskInstance.refresh_from_db` and `DagRun.schedule_tis`, which were occuring every 2h or so and have not occurred since. At the minimum this confirms our understanding above.
I can check your alternative proposed fix https://github.com/apache/airflow/pull/25266 in a staging environment but it may be a week or two before I can deploy more widely and conclusively report on whether or not it fixes the scheduler UPDATE deadlocking with the task instance SELECT ... FOR UPDATE above.
@dstaple - would be great if you check. I think we can merge it regardlless (it's super easy to revert) - so there is no problem with some later checking. I also was not sure if the DELETE issue is the same. It could be (and I have the scenario in my head):
DELETE DagRun with CASCADE on TI - first creates lock on the DagRun an only THEN an the TaskInstamce - very similarly to what Scheduler does.
And in this case the fix above should also help so @RNHTTR I'd appreciate checking it :)
I'm on it! | 2022-07-26T15:57:55Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1256, in _execute_context
self.dialect.do_executemany(
File "/usr/local/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py", line 912, in do_executemany
cursor.executemany(statement, parameters)
psycopg2.errors.DeadlockDetected: deadlock detected
| 2,556 |
|||
apache/airflow | apache__airflow-25355 | f6b48ac6dfaf931a5433ec16369302f68f038c65 | diff --git a/airflow/decorators/base.py b/airflow/decorators/base.py
--- a/airflow/decorators/base.py
+++ b/airflow/decorators/base.py
@@ -455,7 +455,7 @@ def _expand_mapped_kwargs(self, resolve: Optional[Tuple[Context, Session]]) -> D
assert self.expand_input is EXPAND_INPUT_EMPTY
return {"op_kwargs": super()._expand_mapped_kwargs(resolve)}
- def _get_unmap_kwargs(self, mapped_kwargs: Dict[str, Any], *, strict: bool) -> Dict[str, Any]:
+ def _get_unmap_kwargs(self, mapped_kwargs: Mapping[str, Any], *, strict: bool) -> Dict[str, Any]:
if strict:
prevent_duplicates(
self.partial_kwargs["op_kwargs"],
diff --git a/airflow/models/expandinput.py b/airflow/models/expandinput.py
--- a/airflow/models/expandinput.py
+++ b/airflow/models/expandinput.py
@@ -22,7 +22,7 @@
import collections.abc
import functools
import operator
-from typing import TYPE_CHECKING, Any, Iterable, NamedTuple, Sequence, Sized, Union
+from typing import TYPE_CHECKING, Any, Iterable, Mapping, NamedTuple, Sequence, Sized, Union
from sqlalchemy import func
from sqlalchemy.orm import Session
@@ -195,10 +195,16 @@ def _find_index_for_this_field(index: int) -> int:
return k, v
raise IndexError(f"index {map_index} is over mapped length")
- def resolve(self, context: Context, session: Session) -> dict[str, Any]:
+ def resolve(self, context: Context, session: Session) -> Mapping[str, Any]:
return {k: self._expand_mapped_field(k, v, context, session=session) for k, v in self.value.items()}
+def _describe_type(value: Any) -> str:
+ if value is None:
+ return "None"
+ return type(value).__name__
+
+
class ListOfDictsExpandInput(NamedTuple):
"""Storage type of a mapped operator's mapped kwargs.
@@ -245,12 +251,23 @@ def get_total_map_length(self, run_id: str, *, session: Session) -> int:
raise NotFullyPopulated({"expand_kwargs() argument"})
return value
- def resolve(self, context: Context, session: Session) -> dict[str, Any]:
+ def resolve(self, context: Context, session: Session) -> Mapping[str, Any]:
map_index = context["ti"].map_index
if map_index < 0:
raise RuntimeError("can't resolve task-mapping argument without expanding")
- # Validation should be done when the upstream returns.
- return self.value.resolve(context, session)[map_index]
+ mappings = self.value.resolve(context, session)
+ if not isinstance(mappings, collections.abc.Sequence):
+ raise ValueError(f"expand_kwargs() expects a list[dict], not {_describe_type(mappings)}")
+ mapping = mappings[map_index]
+ if not isinstance(mapping, collections.abc.Mapping):
+ raise ValueError(f"expand_kwargs() expects a list[dict], not list[{_describe_type(mapping)}]")
+ for key in mapping:
+ if not isinstance(key, str):
+ raise ValueError(
+ f"expand_kwargs() input dict keys must all be str, "
+ f"but {key!r} is of type {_describe_type(key)}"
+ )
+ return mapping
EXPAND_INPUT_EMPTY = DictOfListsExpandInput({}) # Sentinel value.
diff --git a/airflow/models/mappedoperator.py b/airflow/models/mappedoperator.py
--- a/airflow/models/mappedoperator.py
+++ b/airflow/models/mappedoperator.py
@@ -30,6 +30,7 @@
Iterable,
Iterator,
List,
+ Mapping,
Optional,
Sequence,
Set,
@@ -123,7 +124,7 @@ def validate_mapping_kwargs(op: Type["BaseOperator"], func: ValidationSource, va
raise TypeError(f"{op.__name__}.{func}() got {error}")
-def prevent_duplicates(kwargs1: Dict[str, Any], kwargs2: Dict[str, Any], *, fail_reason: str) -> None:
+def prevent_duplicates(kwargs1: Dict[str, Any], kwargs2: Mapping[str, Any], *, fail_reason: str) -> None:
duplicated_keys = set(kwargs1).intersection(kwargs2)
if not duplicated_keys:
return
@@ -528,7 +529,7 @@ def serialize_for_task_group(self) -> Tuple[DagAttributeTypes, Any]:
"""Implementing DAGNode."""
return DagAttributeTypes.OP, self.task_id
- def _expand_mapped_kwargs(self, resolve: Optional[Tuple[Context, Session]]) -> Dict[str, Any]:
+ def _expand_mapped_kwargs(self, resolve: Optional[Tuple[Context, Session]]) -> Mapping[str, Any]:
"""Get the kwargs to create the unmapped operator.
If *resolve* is not *None*, it must be a two-tuple to provide context to
@@ -546,7 +547,7 @@ def _expand_mapped_kwargs(self, resolve: Optional[Tuple[Context, Session]]) -> D
return expand_input.resolve(*resolve)
return expand_input.get_unresolved_kwargs()
- def _get_unmap_kwargs(self, mapped_kwargs: Dict[str, Any], *, strict: bool) -> Dict[str, Any]:
+ def _get_unmap_kwargs(self, mapped_kwargs: Mapping[str, Any], *, strict: bool) -> Dict[str, Any]:
"""Get init kwargs to unmap the underlying operator class.
:param mapped_kwargs: The dict returned by ``_expand_mapped_kwargs``.
@@ -569,7 +570,7 @@ def _get_unmap_kwargs(self, mapped_kwargs: Dict[str, Any], *, strict: bool) -> D
**mapped_kwargs,
}
- def unmap(self, resolve: Union[None, Dict[str, Any], Tuple[Context, Session]]) -> "BaseOperator":
+ def unmap(self, resolve: Union[None, Mapping[str, Any], Tuple[Context, Session]]) -> "BaseOperator":
"""Get the "normal" Operator after applying the current mapping.
If ``operator_class`` is not a class (i.e. this DAG has been
| expand_kwargs.map(func) gives unhelpful error message if func returns list
### Apache Airflow version
main (development)
### What happened
Here's a DAG:
```python3
with DAG(
dag_id="expand_list",
doc_md="try to get kwargs from a list",
schedule_interval=None,
start_date=datetime(2001, 1, 1),
) as expand_list:
@expand_list.task
def do_this():
return [
("echo hello $USER", "USER", "foo"),
("echo hello $USER", "USER", "bar"),
]
def mapper(tuple):
if tuple[2] == "bar":
return [1, 2, 3]
else:
return {"bash_command": tuple[0], "env": {tuple[1]: tuple[2]}}
BashOperator.partial(task_id="one_cmd").expand_kwargs(do_this().map(mapper))
```
The `foo` task instance succeeds as expected, and the `bar` task fails as expected. But the error message that it gives isn't particularly helpful to a user who doesn't know what they did wrong:
```
ERROR - Failed to execute task: resolve() takes 3 positional arguments but 4 were given.
Traceback (most recent call last):
File "/home/matt/src/airflow/airflow/executors/debug_executor.py", line 78, in _run_task
ti.run(job_id=ti.job_id, **params)
File "/home/matt/src/airflow/airflow/utils/session.py", line 71, in wrapper
return func(*args, session=session, **kwargs)
File "/home/matt/src/airflow/airflow/models/taskinstance.py", line 1782, in run
self._run_raw_task(
File "/home/matt/src/airflow/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/matt/src/airflow/airflow/models/taskinstance.py", line 1445, in _run_raw_task
self._execute_task_with_callbacks(context, test_mode)
File "/home/matt/src/airflow/airflow/models/taskinstance.py", line 1580, in _execute_task_with_callbacks
task_orig = self.render_templates(context=context)
File "/home/matt/src/airflow/airflow/models/taskinstance.py", line 2202, in render_templates
rendered_task = self.task.render_template_fields(context)
File "/home/matt/src/airflow/airflow/models/mappedoperator.py", line 751, in render_template_fields
unmapped_task = self.unmap(mapped_kwargs)
File "/home/matt/src/airflow/airflow/models/mappedoperator.py", line 591, in unmap
kwargs = self._expand_mapped_kwargs(resolve)
File "/home/matt/src/airflow/airflow/models/mappedoperator.py", line 546, in _expand_mapped_kwargs
return expand_input.resolve(*resolve)
TypeError: resolve() takes 3 positional arguments but 4 were given
```
### What you think should happen instead
Whatever checks the return value for mappability should do more to point the user to their error. Perhaps something like:
> UnmappableDataError: Expected a dict with keys that BashOperator accepts, got `[1, 2, 3]` instead
### How to reproduce
Run the dag above
### Operating System
Linux 5.10.101 #1-NixOS SMP Wed Feb 16 11:54:31 UTC 2022 x86_64 GNU/Linux
### Versions of Apache Airflow Providers
n/a
### Deployment
Virtualenv installation
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| 2022-07-28T06:02:28Z | [] | [] |
Traceback (most recent call last):
File "/home/matt/src/airflow/airflow/executors/debug_executor.py", line 78, in _run_task
ti.run(job_id=ti.job_id, **params)
File "/home/matt/src/airflow/airflow/utils/session.py", line 71, in wrapper
return func(*args, session=session, **kwargs)
File "/home/matt/src/airflow/airflow/models/taskinstance.py", line 1782, in run
self._run_raw_task(
File "/home/matt/src/airflow/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/matt/src/airflow/airflow/models/taskinstance.py", line 1445, in _run_raw_task
self._execute_task_with_callbacks(context, test_mode)
File "/home/matt/src/airflow/airflow/models/taskinstance.py", line 1580, in _execute_task_with_callbacks
task_orig = self.render_templates(context=context)
File "/home/matt/src/airflow/airflow/models/taskinstance.py", line 2202, in render_templates
rendered_task = self.task.render_template_fields(context)
File "/home/matt/src/airflow/airflow/models/mappedoperator.py", line 751, in render_template_fields
unmapped_task = self.unmap(mapped_kwargs)
File "/home/matt/src/airflow/airflow/models/mappedoperator.py", line 591, in unmap
kwargs = self._expand_mapped_kwargs(resolve)
File "/home/matt/src/airflow/airflow/models/mappedoperator.py", line 546, in _expand_mapped_kwargs
return expand_input.resolve(*resolve)
TypeError: resolve() takes 3 positional arguments but 4 were given
| 2,557 |
||||
apache/airflow | apache__airflow-25757 | dc738cde04d91084bf79b1a601395b7abd41d8ca | diff --git a/airflow/models/mappedoperator.py b/airflow/models/mappedoperator.py
--- a/airflow/models/mappedoperator.py
+++ b/airflow/models/mappedoperator.py
@@ -623,7 +623,17 @@ def expand_mapped_task(self, run_id: str, *, session: Session) -> Tuple[Sequence
from airflow.models.taskinstance import TaskInstance
from airflow.settings import task_instance_mutation_hook
- total_length = self._get_specified_expand_input().get_total_map_length(run_id, session=session)
+ total_length: Optional[int]
+ try:
+ total_length = self._get_specified_expand_input().get_total_map_length(run_id, session=session)
+ except NotFullyPopulated as e:
+ self.log.info(
+ "Cannot expand %r for run %s; missing upstream values: %s",
+ self,
+ run_id,
+ sorted(e.missing),
+ )
+ total_length = None
state: Optional[TaskInstanceState] = None
unmapped_ti: Optional[TaskInstance] = (
@@ -643,23 +653,32 @@ def expand_mapped_task(self, run_id: str, *, session: Session) -> Tuple[Sequence
if unmapped_ti:
# The unmapped task instance still exists and is unfinished, i.e. we
# haven't tried to run it before.
- if total_length < 1:
- # If the upstream maps this to a zero-length value, simply marked the
- # unmapped task instance as SKIPPED (if needed).
+ if total_length is None:
+ # If the map length cannot be calculated (due to unavailable
+ # upstream sources), fail the unmapped task.
+ unmapped_ti.state = TaskInstanceState.UPSTREAM_FAILED
+ indexes_to_map: Iterable[int] = ()
+ elif total_length < 1:
+ # If the upstream maps this to a zero-length value, simply mark
+ # the unmapped task instance as SKIPPED (if needed).
self.log.info(
"Marking %s as SKIPPED since the map has %d values to expand",
unmapped_ti,
total_length,
)
unmapped_ti.state = TaskInstanceState.SKIPPED
+ indexes_to_map = ()
else:
# Otherwise convert this into the first mapped index, and create
# TaskInstance for other indexes.
unmapped_ti.map_index = 0
self.log.debug("Updated in place to become %s", unmapped_ti)
all_expanded_tis.append(unmapped_ti)
+ indexes_to_map = range(1, total_length)
state = unmapped_ti.state
- indexes_to_map = range(1, total_length)
+ elif not total_length:
+ # Nothing to fixup.
+ indexes_to_map = ()
else:
# Only create "missing" ones.
current_max_mapping = (
@@ -682,17 +701,21 @@ def expand_mapped_task(self, run_id: str, *, session: Session) -> Tuple[Sequence
ti.refresh_from_task(self) # session.merge() loses task information.
all_expanded_tis.append(ti)
+ # Coerce the None case to 0 -- these two are almost treated identically,
+ # except the unmapped ti (if exists) is marked to different states.
+ total_expanded_ti_count = total_length or 0
+
# Set to "REMOVED" any (old) TaskInstances with map indices greater
# than the current map value
session.query(TaskInstance).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == run_id,
- TaskInstance.map_index >= total_length,
+ TaskInstance.map_index >= total_expanded_ti_count,
).update({TaskInstance.state: TaskInstanceState.REMOVED})
session.flush()
- return all_expanded_tis, total_length
+ return all_expanded_tis, total_expanded_ti_count - 1
def prepare_for_execution(self) -> "MappedOperator":
# Since a mapped operator cannot be used for execution, and an unmapped
| Backfill mode with mapped tasks: "Failed to populate all mapping metadata"
### Apache Airflow version
2.3.3
### What happened
I was backfilling some DAGs that use dynamic tasks when I got an exception like the following:
```
Traceback (most recent call last):
File "/opt/conda/envs/production/bin/airflow", line 11, in <module>
sys.exit(main())
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/__main__.py", line 38, in main
args.func(args)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/cli/cli_parser.py", line 51, in command
return func(*args, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/utils/cli.py", line 99, in wrapper
return f(*args, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/cli/commands/dag_command.py", line 107, in dag_backfill
dag.run(
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/models/dag.py", line 2288, in run
job.run()
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/base_job.py", line 244, in run
self._execute()
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/utils/session.py", line 71, in wrapper
return func(*args, session=session, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/backfill_job.py", line 847, in _execute
self._execute_dagruns(
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/backfill_job.py", line 737, in _execute_dagruns
processed_dag_run_dates = self._process_backfill_task_instances(
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/backfill_job.py", line 612, in _process_backfill_task_instances
for node, run_id, new_mapped_tis, max_map_index in self._manage_executor_state(
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/backfill_job.py", line 270, in _manage_executor_state
new_tis, num_mapped_tis = node.expand_mapped_task(ti.run_id, session=session)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 614, in expand_mapped_task
operator.mul, self._resolve_map_lengths(run_id, session=session).values()
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 600, in _resolve_map_lengths
raise RuntimeError(f"Failed to populate all mapping metadata; missing: {keys}")
RuntimeError: Failed to populate all mapping metadata; missing: 'x'
```
Digging further, it appears this always happens if the task used as input to an `.expand` raises an Exception. Airflow doesn't handle this exception gracefully like it does with exceptions in "normal" tasks, which can lead to other errors from deeper within Airflow. This also means that since this is not a "typical" failure case, things like `--rerun-failed-tasks` do not work as expected.
### What you think should happen instead
Airflow should fail gracefully if exceptions are raised in dynamic task generators.
### How to reproduce
```
#!/usr/bin/env python3
import datetime
import logging
from airflow.decorators import dag, task
logger = logging.getLogger(__name__)
@dag(
schedule_interval='@daily',
start_date=datetime.datetime(2022, 8, 12),
default_args={
'retries': 5,
'retry_delay': 5.0,
},
)
def test_backfill():
@task
def get_tasks(ti=None):
logger.info(f'{ti.try_number=}')
if ti.try_number < 3:
raise RuntimeError('')
return ['a', 'b', 'c']
@task
def do_stuff(x=None, ti=None):
logger.info(f'do_stuff: {x=}, {ti.try_number=}')
if ti.try_number < 3:
raise RuntimeError('')
do_stuff.expand(x=do_stuff.expand(x=get_tasks()))
do_stuff() >> do_stuff() # this works as expected
dag = test_backfill()
if __name__ == '__main__':
dag.cli()
```
```
airflow dags backfill test_backfill -s 2022-08-05 -e 2022-08-07 --rerun-failed-tasks
```
You can repeat the `backfill` command multiple times to slowly make progress through the DAG. Things will eventually succeed (assuming the exception that triggers this bug stops being raised), but obviously this is a pain when trying to backfill a non-trivial number of DAG Runs.
### Operating System
CentOS Stream 8
### Versions of Apache Airflow Providers
None
### Deployment
Other
### Deployment details
Standalone
### Anything else
I was able to reproduce this both with SQLite + `SequentialExecutor` as well as with Postgres + `LocalExecutor`.
I haven't yet been able to reproduce this outside of `backfill` mode.
Possibly related since they mention the same exception text:
* #23533
* #23642
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| After #25661 this may cause a different error. I will need to look into this further (not now).
To clarify, the downstream (mapped task) will never run correctly in any scenarios, since if the upstream raises an exception, there’s nothing the task can be expanded into. But the scheduler should handle this more gracefully. | 2022-08-17T07:01:07Z | [] | [] |
Traceback (most recent call last):
File "/opt/conda/envs/production/bin/airflow", line 11, in <module>
sys.exit(main())
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/__main__.py", line 38, in main
args.func(args)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/cli/cli_parser.py", line 51, in command
return func(*args, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/utils/cli.py", line 99, in wrapper
return f(*args, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/cli/commands/dag_command.py", line 107, in dag_backfill
dag.run(
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/models/dag.py", line 2288, in run
job.run()
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/base_job.py", line 244, in run
self._execute()
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/utils/session.py", line 71, in wrapper
return func(*args, session=session, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/backfill_job.py", line 847, in _execute
self._execute_dagruns(
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/backfill_job.py", line 737, in _execute_dagruns
processed_dag_run_dates = self._process_backfill_task_instances(
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/backfill_job.py", line 612, in _process_backfill_task_instances
for node, run_id, new_mapped_tis, max_map_index in self._manage_executor_state(
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/jobs/backfill_job.py", line 270, in _manage_executor_state
new_tis, num_mapped_tis = node.expand_mapped_task(ti.run_id, session=session)
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 614, in expand_mapped_task
operator.mul, self._resolve_map_lengths(run_id, session=session).values()
File "/opt/conda/envs/production/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 600, in _resolve_map_lengths
raise RuntimeError(f"Failed to populate all mapping metadata; missing: {keys}")
RuntimeError: Failed to populate all mapping metadata; missing: 'x'
| 2,569 |
|||
apache/airflow | apache__airflow-25793 | 648e224cd455f1e374c58cfa48eb1c0ed69c698d | diff --git a/airflow/models/abstractoperator.py b/airflow/models/abstractoperator.py
--- a/airflow/models/abstractoperator.py
+++ b/airflow/models/abstractoperator.py
@@ -26,6 +26,7 @@
Dict,
FrozenSet,
Iterable,
+ Iterator,
List,
Optional,
Sequence,
@@ -54,6 +55,7 @@
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.dag import DAG
+ from airflow.models.mappedoperator import MappedOperator
from airflow.models.operator import Operator
from airflow.models.taskinstance import TaskInstance
@@ -231,6 +233,70 @@ def get_flat_relatives(self, upstream: bool = False) -> Collection["Operator"]:
return set()
return [dag.task_dict[task_id] for task_id in self.get_flat_relative_ids(upstream)]
+ def _iter_all_mapped_downstreams(self) -> Iterator["MappedOperator"]:
+ """Return mapped nodes that are direct dependencies of the current task.
+
+ For now, this walks the entire DAG to find mapped nodes that has this
+ current task as an upstream. We cannot use ``downstream_list`` since it
+ only contains operators, not task groups. In the future, we should
+ provide a way to record an DAG node's all downstream nodes instead.
+
+ Note that this does not guarantee the returned tasks actually use the
+ current task for task mapping, but only checks those task are mapped
+ operators, and are downstreams of the current task.
+
+ To get a list of tasks that uses the current task for task mapping, use
+ :meth:`iter_mapped_dependants` instead.
+ """
+ from airflow.models.mappedoperator import MappedOperator
+ from airflow.utils.task_group import TaskGroup
+
+ def _walk_group(group: TaskGroup) -> Iterable[Tuple[str, DAGNode]]:
+ """Recursively walk children in a task group.
+
+ This yields all direct children (including both tasks and task
+ groups), and all children of any task groups.
+ """
+ for key, child in group.children.items():
+ yield key, child
+ if isinstance(child, TaskGroup):
+ yield from _walk_group(child)
+
+ dag = self.get_dag()
+ if not dag:
+ raise RuntimeError("Cannot check for mapped dependants when not attached to a DAG")
+ for key, child in _walk_group(dag.task_group):
+ if key == self.node_id:
+ continue
+ if not isinstance(child, MappedOperator):
+ continue
+ if self.node_id in child.upstream_task_ids:
+ yield child
+
+ def iter_mapped_dependants(self) -> Iterator["MappedOperator"]:
+ """Return mapped nodes that depend on the current task the expansion.
+
+ For now, this walks the entire DAG to find mapped nodes that has this
+ current task as an upstream. We cannot use ``downstream_list`` since it
+ only contains operators, not task groups. In the future, we should
+ provide a way to record an DAG node's all downstream nodes instead.
+ """
+ return (
+ downstream
+ for downstream in self._iter_all_mapped_downstreams()
+ if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies())
+ )
+
+ def unmap(self, resolve: Union[None, Dict[str, Any], Tuple[Context, "Session"]]) -> "BaseOperator":
+ """Get the "normal" operator from current abstract operator.
+
+ MappedOperator uses this to unmap itself based on the map index. A non-
+ mapped operator (i.e. BaseOperator subclass) simply returns itself.
+
+ :meta private:
+ """
+ raise NotImplementedError()
+
@property
def priority_weight_total(self) -> int:
"""
@@ -291,16 +357,6 @@ def global_operator_extra_link_dict(self) -> Dict[str, Any]:
def extra_links(self) -> List[str]:
return list(set(self.operator_extra_link_dict).union(self.global_operator_extra_link_dict))
- def unmap(self, resolve: Union[None, Dict[str, Any], Tuple[Context, "Session"]]) -> "BaseOperator":
- """Get the "normal" operator from current abstract operator.
-
- MappedOperator uses this to unmap itself based on the map index. A non-
- mapped operator (i.e. BaseOperator subclass) simply returns itself.
-
- :meta private:
- """
- raise NotImplementedError()
-
def get_extra_links(self, ti: "TaskInstance", link_name: str) -> Optional[str]:
"""For an operator, gets the URLs that the ``extra_links`` entry points to.
diff --git a/airflow/models/taskmixin.py b/airflow/models/taskmixin.py
--- a/airflow/models/taskmixin.py
+++ b/airflow/models/taskmixin.py
@@ -17,7 +17,7 @@
import warnings
from abc import ABCMeta, abstractmethod
-from typing import TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sequence, Set, Tuple, Union
+from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Sequence, Set, Tuple, Union
import pendulum
@@ -28,7 +28,6 @@
from logging import Logger
from airflow.models.dag import DAG
- from airflow.models.mappedoperator import MappedOperator
from airflow.utils.edgemodifier import EdgeModifier
from airflow.utils.task_group import TaskGroup
@@ -273,57 +272,3 @@ def get_direct_relatives(self, upstream: bool = False) -> Iterable["DAGNode"]:
def serialize_for_task_group(self) -> Tuple[DagAttributeTypes, Any]:
"""This is used by SerializedTaskGroup to serialize a task group's content."""
raise NotImplementedError()
-
- def _iter_all_mapped_downstreams(self) -> Iterator["MappedOperator"]:
- """Return mapped nodes that are direct dependencies of the current task.
-
- For now, this walks the entire DAG to find mapped nodes that has this
- current task as an upstream. We cannot use ``downstream_list`` since it
- only contains operators, not task groups. In the future, we should
- provide a way to record an DAG node's all downstream nodes instead.
-
- Note that this does not guarantee the returned tasks actually use the
- current task for task mapping, but only checks those task are mapped
- operators, and are downstreams of the current task.
-
- To get a list of tasks that uses the current task for task mapping, use
- :meth:`iter_mapped_dependants` instead.
- """
- from airflow.models.mappedoperator import MappedOperator
- from airflow.utils.task_group import TaskGroup
-
- def _walk_group(group: TaskGroup) -> Iterable[Tuple[str, DAGNode]]:
- """Recursively walk children in a task group.
-
- This yields all direct children (including both tasks and task
- groups), and all children of any task groups.
- """
- for key, child in group.children.items():
- yield key, child
- if isinstance(child, TaskGroup):
- yield from _walk_group(child)
-
- tg = self.task_group
- if not tg:
- raise RuntimeError("Cannot check for mapped dependants when not attached to a DAG")
- for key, child in _walk_group(tg):
- if key == self.node_id:
- continue
- if not isinstance(child, MappedOperator):
- continue
- if self.node_id in child.upstream_task_ids:
- yield child
-
- def iter_mapped_dependants(self) -> Iterator["MappedOperator"]:
- """Return mapped nodes that depend on the current task the expansion.
-
- For now, this walks the entire DAG to find mapped nodes that has this
- current task as an upstream. We cannot use ``downstream_list`` since it
- only contains operators, not task groups. In the future, we should
- provide a way to record an DAG node's all downstream nodes instead.
- """
- return (
- downstream
- for downstream in self._iter_all_mapped_downstreams()
- if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies())
- )
| XComs from another task group fail to populate dynamic task mapping metadata
### Apache Airflow version
2.3.3
### What happened
When a task returns a mappable Xcom within a task group, the dynamic task mapping feature (via `.expand`) causes the Airflow Scheduler to infinitely loop with a runtime error:
```
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/__main__.py", line 38, in main
args.func(args)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/cli_parser.py", line 51, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/cli.py", line 99, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/scheduler_command.py", line 75, in scheduler
_run_scheduler_job(args=args)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/scheduler_command.py", line 46, in _run_scheduler_job
job.run()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/base_job.py", line 244, in run
self._execute()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 751, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 839, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 921, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 1163, in _schedule_dag_run
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dagrun.py", line 524, in update_state
info = self.task_instance_scheduling_decisions(session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dagrun.py", line 654, in task_instance_scheduling_decisions
schedulable_tis, changed_tis, expansion_happened = self._get_ready_tis(
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dagrun.py", line 710, in _get_ready_tis
expanded_tis, _ = schedulable.task.expand_mapped_task(self.run_id, session=session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 614, in expand_mapped_task
operator.mul, self._resolve_map_lengths(run_id, session=session).values()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 600, in _resolve_map_lengths
raise RuntimeError(f"Failed to populate all mapping metadata; missing: {keys}")
RuntimeError: Failed to populate all mapping metadata; missing: 'x'
```
### What you think should happen instead
Xcoms from different task groups should be mappable within other group scopes.
### How to reproduce
```
from airflow import DAG
from airflow.decorators import task
from airflow.utils.task_group import TaskGroup
import pendulum
@task
def enumerate(x):
return [i for i in range(x)]
@task
def addOne(x):
return x+1
with DAG(
dag_id="TaskGroupMappingBug",
schedule_interval=None,
start_date=pendulum.now().subtract(days=1),
) as dag:
with TaskGroup(group_id="enumerateNine"):
y = enumerate(9)
with TaskGroup(group_id="add"):
# airflow scheduler throws error here so this is never reached
z = addOne.expand(x=y)
```
### Operating System
linux/amd64 via Docker (apache/airflow:2.3.3-python3.9)
### Versions of Apache Airflow Providers
_No response_
### Deployment
Docker-Compose
### Deployment details
docker-compose version 1.29.2, build 5becea4c
Docker Engine v20.10.14
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
| 2022-08-18T11:00:15Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/__main__.py", line 38, in main
args.func(args)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/cli_parser.py", line 51, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/cli.py", line 99, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/scheduler_command.py", line 75, in scheduler
_run_scheduler_job(args=args)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/scheduler_command.py", line 46, in _run_scheduler_job
job.run()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/base_job.py", line 244, in run
self._execute()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 751, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 839, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 921, in _do_scheduling
callback_to_run = self._schedule_dag_run(dag_run, session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/scheduler_job.py", line 1163, in _schedule_dag_run
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dagrun.py", line 524, in update_state
info = self.task_instance_scheduling_decisions(session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py", line 68, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dagrun.py", line 654, in task_instance_scheduling_decisions
schedulable_tis, changed_tis, expansion_happened = self._get_ready_tis(
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dagrun.py", line 710, in _get_ready_tis
expanded_tis, _ = schedulable.task.expand_mapped_task(self.run_id, session=session)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 614, in expand_mapped_task
operator.mul, self._resolve_map_lengths(run_id, session=session).values()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/mappedoperator.py", line 600, in _resolve_map_lengths
raise RuntimeError(f"Failed to populate all mapping metadata; missing: {keys}")
RuntimeError: Failed to populate all mapping metadata; missing: 'x'
| 2,571 |
|||
apache/airflow | apache__airflow-25821 | 0254f30a5a90f0c3104782525fabdcfdc6d3b7df | diff --git a/airflow/providers/common/sql/operators/sql.py b/airflow/providers/common/sql/operators/sql.py
--- a/airflow/providers/common/sql/operators/sql.py
+++ b/airflow/providers/common/sql/operators/sql.py
@@ -350,7 +350,7 @@ class SQLTableCheckOperator(BaseSQLOperator):
sql_check_template = """
SELECT '_check_name' AS check_name, MIN(_check_name) AS check_result
- FROM(SELECT CASE WHEN check_statement THEN 1 ELSE 0 END AS _check_name FROM table)
+ FROM (SELECT CASE WHEN check_statement THEN 1 ELSE 0 END AS _check_name FROM table) AS sq
"""
def __init__(
@@ -382,8 +382,10 @@ def execute(self, context: 'Context'):
]
)
partition_clause_statement = f"WHERE {self.partition_clause}" if self.partition_clause else ""
- self.sql = f"SELECT check_name, check_result FROM ({checks_sql}) "
- f"AS check_table {partition_clause_statement};"
+ self.sql = f"""
+ SELECT check_name, check_result FROM ({checks_sql})
+ AS check_table {partition_clause_statement}
+ """
records = hook.get_pandas_df(self.sql)
| SQLTableCheckOperator fails for Postgres
### Apache Airflow version
2.3.3
### What happened
`SQLTableCheckOperator` fails when used with Postgres.
### What you think should happen instead
From the logs:
```
[2022-08-19, 09:28:14 UTC] {taskinstance.py:1910} ERROR - Task failed with exception
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/providers/common/sql/operators/sql.py", line 296, in execute
records = hook.get_first(self.sql)
File "/usr/local/lib/python3.9/site-packages/airflow/hooks/dbapi.py", line 178, in get_first
cur.execute(sql)
psycopg2.errors.SyntaxError: subquery in FROM must have an alias
LINE 1: SELECT MIN(row_count_check) FROM (SELECT CASE WHEN COUNT(*) ...
^
HINT: For example, FROM (SELECT ...) [AS] foo.
```
### How to reproduce
```python
import pendulum
from datetime import timedelta
from airflow import DAG
from airflow.decorators import task
from airflow.providers.common.sql.operators.sql import SQLTableCheckOperator
from airflow.providers.postgres.operators.postgres import PostgresOperator
_POSTGRES_CONN = "postgresdb"
_TABLE_NAME = "employees"
default_args = {
"owner": "cs",
"retries": 3,
"retry_delay": timedelta(seconds=15),
}
with DAG(
dag_id="sql_data_quality",
start_date=pendulum.datetime(2022, 8, 1, tz="UTC"),
schedule_interval=None,
) as dag:
create_table = PostgresOperator(
task_id="create_table",
postgres_conn_id=_POSTGRES_CONN,
sql=f"""
CREATE TABLE IF NOT EXISTS {_TABLE_NAME} (
employee_name VARCHAR NOT NULL,
employment_year INT NOT NULL
);
"""
)
populate_data = PostgresOperator(
task_id="populate_data",
postgres_conn_id=_POSTGRES_CONN,
sql=f"""
INSERT INTO {_TABLE_NAME} VALUES ('Adam', 2021);
INSERT INTO {_TABLE_NAME} VALUES ('Chris', 2021);
INSERT INTO {_TABLE_NAME} VALUES ('Frank', 2021);
INSERT INTO {_TABLE_NAME} VALUES ('Fritz', 2021);
INSERT INTO {_TABLE_NAME} VALUES ('Magda', 2022);
INSERT INTO {_TABLE_NAME} VALUES ('Phil', 2021);
""",
)
check_row_count = SQLTableCheckOperator(
task_id="check_row_count",
conn_id=_POSTGRES_CONN,
table=_TABLE_NAME,
checks={
"row_count_check": {"check_statement": "COUNT(*) >= 3"}
},
)
drop_table = PostgresOperator(
task_id="drop_table",
trigger_rule="all_done",
postgres_conn_id=_POSTGRES_CONN,
sql=f"DROP TABLE {_TABLE_NAME};",
)
create_table >> populate_data >> check_row_count >> drop_table
```
### Operating System
macOS
### Versions of Apache Airflow Providers
`apache-airflow-providers-common-sql==1.0.0`
### Deployment
Astronomer
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
| 2022-08-19T11:12:33Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/airflow/providers/common/sql/operators/sql.py", line 296, in execute
records = hook.get_first(self.sql)
File "/usr/local/lib/python3.9/site-packages/airflow/hooks/dbapi.py", line 178, in get_first
cur.execute(sql)
psycopg2.errors.SyntaxError: subquery in FROM must have an alias
| 2,573 |
|||
apache/airflow | apache__airflow-25970 | 876536ea3c45d5f15fcfbe81eda3ee01a101faa3 | diff --git a/airflow/configuration.py b/airflow/configuration.py
--- a/airflow/configuration.py
+++ b/airflow/configuration.py
@@ -1545,19 +1545,18 @@ def get_custom_secret_backend() -> Optional[BaseSecretsBackend]:
"""Get Secret Backend if defined in airflow.cfg"""
secrets_backend_cls = conf.getimport(section='secrets', key='backend')
if secrets_backend_cls:
- try:
- backends: Any = conf.get(section='secrets', key='backend_kwargs', fallback='{}')
- alternative_secrets_config_dict = json.loads(backends)
- except JSONDecodeError:
- alternative_secrets_config_dict = {}
-
- return _custom_secrets_backend(secrets_backend_cls, **alternative_secrets_config_dict)
+ backends: Any = conf.get(section='secrets', key='backend_kwargs', fallback='{}')
+ return _custom_secrets_backend(secrets_backend_cls, backends)
return None
@functools.lru_cache(maxsize=2)
-def _custom_secrets_backend(secrets_backend_cls, **alternative_secrets_config_dict):
+def _custom_secrets_backend(secrets_backend_cls, backend_kwargs):
"""Separate function to create secrets backend instance to allow caching"""
+ try:
+ alternative_secrets_config_dict = json.loads(backend_kwargs)
+ except JSONDecodeError:
+ alternative_secrets_config_dict = {}
return secrets_backend_cls(**alternative_secrets_config_dict)
| Unable to configure Google Secrets Manager in 2.3.4
### Apache Airflow version
2.3.4
### What happened
I am attempting to configure a Google Secrets Manager secrets backend using the `gcp_keyfile_dict` param in a `.env` file with the following ENV Vars:
```
AIRFLOW__SECRETS__BACKEND=airflow.providers.google.cloud.secrets.secret_manager.CloudSecretManagerBackend
AIRFLOW__SECRETS__BACKEND_KWARGS='{"connections_prefix": "airflow-connections", "variables_prefix": "airflow-variables", "gcp_keyfile_dict": <json-keyfile>}'
```
In previous versions including 2.3.3 this worked without issue
After upgrading to Astro Runtime 5.0.8 I get the following error taken from the scheduler container logs. The scheduler, webserver, and triggerer are continually restarting
```
Traceback (most recent call last):
File "/usr/local/bin/airflow", line 5, in <module>
from airflow.__main__ import main
File "/usr/local/lib/python3.9/site-packages/airflow/__init__.py", line 35, in <module>
from airflow import settings
File "/usr/local/lib/python3.9/site-packages/airflow/settings.py", line 35, in <module>
from airflow.configuration import AIRFLOW_HOME, WEBSERVER_CONFIG, conf # NOQA F401
File "/usr/local/lib/python3.9/site-packages/airflow/configuration.py", line 1618, in <module>
secrets_backend_list = initialize_secrets_backends()
File "/usr/local/lib/python3.9/site-packages/airflow/configuration.py", line 1540, in initialize_secrets_backends
custom_secret_backend = get_custom_secret_backend()
File "/usr/local/lib/python3.9/site-packages/airflow/configuration.py", line 1523, in get_custom_secret_backend
return _custom_secrets_backend(secrets_backend_cls, **alternative_secrets_config_dict)
TypeError: unhashable type: 'dict'
```
### What you think should happen instead
Containers should remain healthy and the secrets backend should successfully be added
### How to reproduce
`astro dev init` a fresh project
Dockerfile:
`FROM quay.io/astronomer/astro-runtime:5.0.8`
`.env` file:
```
AIRFLOW__SECRETS__BACKEND=airflow.providers.google.cloud.secrets.secret_manager.CloudSecretManagerBackend
AIRFLOW__SECRETS__BACKEND_KWARGS='{"connections_prefix": "airflow-connections", "variables_prefix": "airflow-variables", "gcp_keyfile_dict": <service-acct-json-keyfile>}'
```
`astro dev start`
### Operating System
macOS 11.6.8
### Versions of Apache Airflow Providers
[apache-airflow-providers-google](https://airflow.apache.org/docs/apache-airflow-providers-google/8.1.0/) 8.1.0
### Deployment
Astronomer
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [x] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| @pdebelak - I think this is caused by the LRU cache introduced in https://github.com/apache/airflow/pull/25556 - is it possible you take a look and see if it can be fixed/workarounded ?
I believe the problem is that dict-indeed is not hashable, and you can pass the dict as parameter of the secret backend configuration.
For now, I don't see an easy workaround other than using `gcp_key_path` and putting the key in the same path in your workers - would that be a feasible workaround for now @aspain ?
With an Astronomer project I don't have access to the workers (other than locally) and would have to include the keyfile in my repository the project deploys from, ideally the keyfile would not need to be in the repository
In my local environment I am using a .env file but not pushing it to the repo, and in the Astro UI I am able to add environment variables
Yes, this is related to the new `lru_cache` in 2.3.4, I didn't realize this would break in this way. There isn't an easy workaround. We might need to revert that change in this case and add a test to make sure we don't break it in the same way again.
I see a fix for this that I will PR, but I don't see a workaround for version 2.3.4 if you have a `AIRFLOW__SECRETS__BACKEND_KWARGS` containing a nested dictionary.
yeah. There is no easy workaround I could see for that one. I will raise it to the release mgmt team (we have one more bug that might make us do 2.3.5 before we reelase 2.4.0. In the meantime @pdebelak - looking forward to a fix :D | 2022-08-25T23:24:46Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/bin/airflow", line 5, in <module>
from airflow.__main__ import main
File "/usr/local/lib/python3.9/site-packages/airflow/__init__.py", line 35, in <module>
from airflow import settings
File "/usr/local/lib/python3.9/site-packages/airflow/settings.py", line 35, in <module>
from airflow.configuration import AIRFLOW_HOME, WEBSERVER_CONFIG, conf # NOQA F401
File "/usr/local/lib/python3.9/site-packages/airflow/configuration.py", line 1618, in <module>
secrets_backend_list = initialize_secrets_backends()
File "/usr/local/lib/python3.9/site-packages/airflow/configuration.py", line 1540, in initialize_secrets_backends
custom_secret_backend = get_custom_secret_backend()
File "/usr/local/lib/python3.9/site-packages/airflow/configuration.py", line 1523, in get_custom_secret_backend
return _custom_secrets_backend(secrets_backend_cls, **alternative_secrets_config_dict)
TypeError: unhashable type: 'dict'
| 2,575 |
|||
apache/airflow | apache__airflow-26369 | 5e9589c685bcec769041e0a1692035778869f718 | diff --git a/airflow/serialization/serialized_objects.py b/airflow/serialization/serialized_objects.py
--- a/airflow/serialization/serialized_objects.py
+++ b/airflow/serialization/serialized_objects.py
@@ -17,6 +17,7 @@
"""Serialized DAG and BaseOperator"""
from __future__ import annotations
+import collections.abc
import datetime
import enum
import logging
@@ -24,7 +25,7 @@
import weakref
from dataclasses import dataclass
from inspect import Parameter, signature
-from typing import TYPE_CHECKING, Any, Iterable, NamedTuple, Type
+from typing import TYPE_CHECKING, Any, Collection, Iterable, Mapping, NamedTuple, Type, Union
import cattr
import lazy_object_proxy
@@ -207,6 +208,26 @@ def deref(self, dag: DAG) -> XComArg:
return deserialize_xcom_arg(self.data, dag)
+# These two should be kept in sync. Note that these are intentionally not using
+# the type declarations in expandinput.py so we always remember to update
+# serialization logic when adding new ExpandInput variants. If you add things to
+# the unions, be sure to update _ExpandInputRef to match.
+_ExpandInputOriginalValue = Union[
+ # For .expand(**kwargs).
+ Mapping[str, Any],
+ # For expand_kwargs(arg).
+ XComArg,
+ Collection[Union[XComArg, Mapping[str, Any]]],
+]
+_ExpandInputSerializedValue = Union[
+ # For .expand(**kwargs).
+ Mapping[str, Any],
+ # For expand_kwargs(arg).
+ _XComRef,
+ Collection[Union[_XComRef, Mapping[str, Any]]],
+]
+
+
class _ExpandInputRef(NamedTuple):
"""Used to store info needed to create a mapped operator's expand input.
@@ -215,13 +236,29 @@ class _ExpandInputRef(NamedTuple):
"""
key: str
- value: _XComRef | dict[str, Any]
+ value: _ExpandInputSerializedValue
+
+ @classmethod
+ def validate_expand_input_value(cls, value: _ExpandInputOriginalValue) -> None:
+ """Validate we've covered all ``ExpandInput.value`` types.
+
+ This function does not actually do anything, but is called during
+ serialization so Mypy will *statically* check we have handled all
+ possible ExpandInput cases.
+ """
def deref(self, dag: DAG) -> ExpandInput:
+ """De-reference into a concrete ExpandInput object.
+
+ If you add more cases here, be sure to update _ExpandInputOriginalValue
+ and _ExpandInputSerializedValue to match the logic.
+ """
if isinstance(self.value, _XComRef):
value: Any = self.value.deref(dag)
- else:
+ elif isinstance(self.value, collections.abc.Mapping):
value = {k: v.deref(dag) if isinstance(v, _XComRef) else v for k, v in self.value.items()}
+ else:
+ value = [v.deref(dag) if isinstance(v, _XComRef) else v for v in self.value]
return create_expand_input(self.key, value)
@@ -663,6 +700,8 @@ def serialize_mapped_operator(cls, op: MappedOperator) -> dict[str, Any]:
serialized_op = cls._serialize_node(op, include_deps=op.deps != MappedOperator.deps_for(BaseOperator))
# Handle expand_input and op_kwargs_expand_input.
expansion_kwargs = op._get_specified_expand_input()
+ if TYPE_CHECKING: # Let Mypy check the input type for us!
+ _ExpandInputRef.validate_expand_input_value(expansion_kwargs.value)
serialized_op[op._expand_input_attr] = {
"type": get_map_type_key(expansion_kwargs),
"value": cls.serialize(expansion_kwargs.value),
| dynamic dataset ref breaks when viewed in UI or when triggered (dagbag.py:_add_dag_from_db)
### Apache Airflow version
2.4.0b1
### What happened
Here's a file which defines three dags. "source" uses `Operator.partial` to reference either "sink". I'm not sure if it's supported to do so, but airlflow should at least fail more gracefully than it does.
```python3
from datetime import datetime, timedelta
from time import sleep
from airflow import Dataset
from airflow.decorators import dag
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
ps = Dataset("partial_static")
p1 = Dataset("partial_dynamic_1")
p2 = Dataset("partial_dynamic_2")
p3 = Dataset("partial_dynamic_3")
def sleep_n(n):
sleep(n)
@dag(start_date=datetime(1970, 1, 1), schedule=timedelta(days=365 * 30))
def two_kinds_dynamic_source():
# dataset ref is not dynamic
PythonOperator.partial(
task_id="partial_static", python_callable=sleep_n, outlets=[ps]
).expand(op_args=[[1], [20], [40]])
# dataset ref is dynamic
PythonOperator.partial(
task_id="partial_dynamic", python_callable=sleep_n
).expand_kwargs(
[
{"op_args": [1], "outlets": [p1]},
{"op_args": [20], "outlets": [p2]},
{"op_args": [40], "outlets": [p3]},
]
)
two_kinds_dynamic_source()
@dag(schedule=[ps], start_date=datetime(1970, 1, 1))
def two_kinds_static_sink():
DummyOperator(task_id="dummy")
two_kinds_static_sink()
@dag(schedule=[p1, p2, p3], start_date=datetime(1970, 1, 1))
def two_kinds_dynamic_sink():
DummyOperator(task_id="dummy")
two_kinds_dynamic_sink()
```
Tried to trigger the dag in the browser, saw this traceback instead:
```
Python version: 3.9.13
Airflow version: 2.4.0.dev1640+astro.1
Node: airflow-webserver-6b969cbd87-4q5kh
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 2525, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1822, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1820, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1796, in dispatch_request
return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args)
File "/usr/local/lib/python3.9/site-packages/airflow/www/auth.py", line 46, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/www/decorators.py", line 117, in view_func
return f(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/www/decorators.py", line 80, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/utils/session.py", line 73, in wrapper
return func(*args, session=session, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/www/views.py", line 2532, in grid
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
File "/usr/local/lib/python3.9/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 176, in get_dag
self._add_dag_from_db(dag_id=dag_id, session=session)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 251, in _add_dag_from_db
dag = row.dag
File "/usr/local/lib/python3.9/site-packages/airflow/models/serialized_dag.py", line 223, in dag
dag = SerializedDAG.from_dict(self.data) # type: Any
File "/usr/local/lib/python3.9/site-packages/airflow/serialization/serialized_objects.py", line 1220, in from_dict
return cls.deserialize_dag(serialized_obj['dag'])
File "/usr/local/lib/python3.9/site-packages/airflow/serialization/serialized_objects.py", line 1197, in deserialize_dag
setattr(task, k, kwargs_ref.deref(dag))
File "/usr/local/lib/python3.9/site-packages/airflow/serialization/serialized_objects.py", line 224, in deref
value = {k: v.deref(dag) if isinstance(v, _XComRef) else v for k, v in self.value.items()}
AttributeError: 'list' object has no attribute 'items'
```
I can also summon a similar traceback by just trying to view the dag in the grid view, or when running `airflow dags trigger`
### What you think should happen instead
If there's something invalid about this dag, it should fail to parse--rather than successfully parsing and then breaking the UI.
I'm a bit uncertain about what should happen in the dag dependency graph when the source dag runs. The dynamic outlets are not known until runtime, so it's reasonable that they don't show up in the graph. But what about after the dag runs?
- do they still trigger the "sink" dag even though we didn't know about the dependency up front?
- do we update the dependency graph now that we know about the dynamic dependency?
Because of this error, we don't get far enough to find out.
### How to reproduce
Include the dag above, try to display it in the grid view.
### Operating System
kubernetes-in-docker on MacOS via helm
### Versions of Apache Airflow Providers
n/a
### Deployment
Other 3rd-party Helm chart
### Deployment details
Deployed using [the astronomer helm chart ](https://github.com/astronomer/airflow-chart)and these values:
```yaml
airflow:
airflowHome: /usr/local/airflow
airflowVersion: $VERSION
defaultAirflowRepository: img
defaultAirflowTag: $TAG
executor: KubernetesExecutor
gid: 50000
images:
airflow:
repository: img
logs:
persistence:
enabled: true
size: 2Gi
```
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| 2022-09-13T15:30:47Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 2525, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1822, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1820, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1796, in dispatch_request
return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args)
File "/usr/local/lib/python3.9/site-packages/airflow/www/auth.py", line 46, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/www/decorators.py", line 117, in view_func
return f(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/www/decorators.py", line 80, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/utils/session.py", line 73, in wrapper
return func(*args, session=session, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/www/views.py", line 2532, in grid
dag = get_airflow_app().dag_bag.get_dag(dag_id, session=session)
File "/usr/local/lib/python3.9/site-packages/airflow/utils/session.py", line 70, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 176, in get_dag
self._add_dag_from_db(dag_id=dag_id, session=session)
File "/usr/local/lib/python3.9/site-packages/airflow/models/dagbag.py", line 251, in _add_dag_from_db
dag = row.dag
File "/usr/local/lib/python3.9/site-packages/airflow/models/serialized_dag.py", line 223, in dag
dag = SerializedDAG.from_dict(self.data) # type: Any
File "/usr/local/lib/python3.9/site-packages/airflow/serialization/serialized_objects.py", line 1220, in from_dict
return cls.deserialize_dag(serialized_obj['dag'])
File "/usr/local/lib/python3.9/site-packages/airflow/serialization/serialized_objects.py", line 1197, in deserialize_dag
setattr(task, k, kwargs_ref.deref(dag))
File "/usr/local/lib/python3.9/site-packages/airflow/serialization/serialized_objects.py", line 224, in deref
value = {k: v.deref(dag) if isinstance(v, _XComRef) else v for k, v in self.value.items()}
AttributeError: 'list' object has no attribute 'items'
| 2,584 |
||||
apache/airflow | apache__airflow-27609 | c20c3f01ca069e98e302a328fe45e3d750956d03 | diff --git a/airflow/callbacks/callback_requests.py b/airflow/callbacks/callback_requests.py
--- a/airflow/callbacks/callback_requests.py
+++ b/airflow/callbacks/callback_requests.py
@@ -84,17 +84,17 @@ def __init__(
self.is_failure_callback = is_failure_callback
def to_json(self) -> str:
- dict_obj = self.__dict__.copy()
- dict_obj["simple_task_instance"] = self.simple_task_instance.as_dict()
- return json.dumps(dict_obj)
+ from airflow.serialization.serialized_objects import BaseSerialization
+
+ val = BaseSerialization.serialize(self.__dict__, strict=True)
+ return json.dumps(val)
@classmethod
def from_json(cls, json_str: str):
- from airflow.models.taskinstance import SimpleTaskInstance
+ from airflow.serialization.serialized_objects import BaseSerialization
- kwargs = json.loads(json_str)
- simple_ti = SimpleTaskInstance.from_dict(obj_dict=kwargs.pop("simple_task_instance"))
- return cls(simple_task_instance=simple_ti, **kwargs)
+ val = json.loads(json_str)
+ return cls(**BaseSerialization.deserialize(val))
class DagCallbackRequest(CallbackRequest):
diff --git a/airflow/exceptions.py b/airflow/exceptions.py
--- a/airflow/exceptions.py
+++ b/airflow/exceptions.py
@@ -216,7 +216,7 @@ def __str__(self) -> str:
class SerializationError(AirflowException):
- """A problem occurred when trying to serialize a DAG."""
+ """A problem occurred when trying to serialize something."""
class ParamValidationError(AirflowException):
diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -2571,6 +2571,11 @@ def __eq__(self, other):
return NotImplemented
def as_dict(self):
+ warnings.warn(
+ "This method is deprecated. Use BaseSerialization.serialize.",
+ RemovedInAirflow3Warning,
+ stacklevel=2,
+ )
new_dict = dict(self.__dict__)
for key in new_dict:
if key in ["start_date", "end_date"]:
@@ -2601,6 +2606,11 @@ def from_ti(cls, ti: TaskInstance) -> SimpleTaskInstance:
@classmethod
def from_dict(cls, obj_dict: dict) -> SimpleTaskInstance:
+ warnings.warn(
+ "This method is deprecated. Use BaseSerialization.deserialize.",
+ RemovedInAirflow3Warning,
+ stacklevel=2,
+ )
ti_key = TaskInstanceKey(*obj_dict.pop("key"))
start_date = None
end_date = None
diff --git a/airflow/serialization/enums.py b/airflow/serialization/enums.py
--- a/airflow/serialization/enums.py
+++ b/airflow/serialization/enums.py
@@ -50,3 +50,4 @@ class DagAttributeTypes(str, Enum):
PARAM = "param"
XCOM_REF = "xcomref"
DATASET = "dataset"
+ SIMPLE_TASK_INSTANCE = "simple_task_instance"
diff --git a/airflow/serialization/serialized_objects.py b/airflow/serialization/serialized_objects.py
--- a/airflow/serialization/serialized_objects.py
+++ b/airflow/serialization/serialized_objects.py
@@ -44,6 +44,7 @@
from airflow.models.mappedoperator import MappedOperator
from airflow.models.operator import Operator
from airflow.models.param import Param, ParamsDict
+from airflow.models.taskinstance import SimpleTaskInstance
from airflow.models.taskmixin import DAGNode
from airflow.models.xcom_arg import XComArg, deserialize_xcom_arg, serialize_xcom_arg
from airflow.providers_manager import ProvidersManager
@@ -381,7 +382,9 @@ def serialize_to_json(
return serialized_object
@classmethod
- def serialize(cls, var: Any) -> Any: # Unfortunately there is no support for recursive types in mypy
+ def serialize(
+ cls, var: Any, *, strict: bool = False
+ ) -> Any: # Unfortunately there is no support for recursive types in mypy
"""Helper function of depth first search for serialization.
The serialization protocol is:
@@ -400,9 +403,11 @@ def serialize(cls, var: Any) -> Any: # Unfortunately there is no support for re
return var.value
return var
elif isinstance(var, dict):
- return cls._encode({str(k): cls.serialize(v) for k, v in var.items()}, type_=DAT.DICT)
+ return cls._encode(
+ {str(k): cls.serialize(v, strict=strict) for k, v in var.items()}, type_=DAT.DICT
+ )
elif isinstance(var, list):
- return [cls.serialize(v) for v in var]
+ return [cls.serialize(v, strict=strict) for v in var]
elif var.__class__.__name__ == "V1Pod" and _has_kubernetes() and isinstance(var, k8s.V1Pod):
json_pod = PodGenerator.serialize_pod(var)
return cls._encode(json_pod, type_=DAT.POD)
@@ -427,12 +432,12 @@ def serialize(cls, var: Any) -> Any: # Unfortunately there is no support for re
elif isinstance(var, set):
# FIXME: casts set to list in customized serialization in future.
try:
- return cls._encode(sorted(cls.serialize(v) for v in var), type_=DAT.SET)
+ return cls._encode(sorted(cls.serialize(v, strict=strict) for v in var), type_=DAT.SET)
except TypeError:
- return cls._encode([cls.serialize(v) for v in var], type_=DAT.SET)
+ return cls._encode([cls.serialize(v, strict=strict) for v in var], type_=DAT.SET)
elif isinstance(var, tuple):
# FIXME: casts tuple to list in customized serialization in future.
- return cls._encode([cls.serialize(v) for v in var], type_=DAT.TUPLE)
+ return cls._encode([cls.serialize(v, strict=strict) for v in var], type_=DAT.TUPLE)
elif isinstance(var, TaskGroup):
return TaskGroupSerialization.serialize_task_group(var)
elif isinstance(var, Param):
@@ -441,8 +446,12 @@ def serialize(cls, var: Any) -> Any: # Unfortunately there is no support for re
return cls._encode(serialize_xcom_arg(var), type_=DAT.XCOM_REF)
elif isinstance(var, Dataset):
return cls._encode(dict(uri=var.uri, extra=var.extra), type_=DAT.DATASET)
+ elif isinstance(var, SimpleTaskInstance):
+ return cls._encode(cls.serialize(var.__dict__, strict=strict), type_=DAT.SIMPLE_TASK_INSTANCE)
else:
log.debug("Cast type %s to str in serialization.", type(var))
+ if strict:
+ raise SerializationError("Encountered unexpected type")
return str(var)
@classmethod
@@ -491,6 +500,8 @@ def deserialize(cls, encoded_var: Any) -> Any:
return _XComRef(var) # Delay deserializing XComArg objects until we have the entire DAG.
elif type_ == DAT.DATASET:
return Dataset(**var)
+ elif type_ == DAT.SIMPLE_TASK_INSTANCE:
+ return SimpleTaskInstance(**cls.deserialize(var))
else:
raise TypeError(f"Invalid type {type_!s} in deserialization.")
| Object of type V1Pod is not JSON serializable after detecting zombie jobs cause Scheduler CrashLoopBack
### Apache Airflow version
2.4.2
### What happened
Some dags have tasks with pod_override in executor_config become zombie tasks. Airflow Scheduler run and crash with exception:
```
[2022-11-10T15:29:59.886+0000] {scheduler_job.py:1526} ERROR - Detected zombie job: {'full_filepath': '/opt/airflow/dags/path.py', 'processor_subdir': '/opt/airflow/dags', 'msg': "{'DAG Id': 'dag_id', 'Task Id': 'taskid', 'Run Id': 'manual__2022-11-10T10:21:25.330307+00:00', 'Hostname': 'hostname'}", 'simple_task_instance': <airflow.models.taskinstance.SimpleTaskInstance object at 0x7fde9c91dcd0>, 'is_failure_callback': True}
[2022-11-10T15:29:59.887+0000] {scheduler_job.py:763} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 746, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 878, in _run_scheduler_loop
next_event = timers.run(blocking=False)
File "/usr/local/lib/python3.7/sched.py", line 151, in run
action(*argument, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/event_scheduler.py", line 37, in repeat
action(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 75, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1527, in _find_zombies
self.executor.send_callback(request)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/executors/base_executor.py", line 400, in send_callback
self.callback_sink.send(request)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 75, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/callbacks/database_callback_sink.py", line 34, in send
db_callback = DbCallbackRequest(callback=callback, priority_weight=10)
File "<string>", line 4, in __init__
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/state.py", line 480, in _initialize_instance
manager.dispatch.init_failure(self, args, kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py", line 72, in __exit__
with_traceback=exc_tb,
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 207, in raise_
raise exception
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/state.py", line 477, in _initialize_instance
return manager.original_init(*mixed[1:], **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/db_callback_request.py", line 46, in __init__
self.callback_data = callback.to_json()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/callbacks/callback_requests.py", line 89, in to_json
return json.dumps(dict_obj)
File "/usr/local/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/usr/local/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/local/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/local/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type V1Pod is not JSON serializable
```
### What you think should happen instead
DbCallbackRequest should do to_json successfully
### How to reproduce
Start airflow with KubernetesExecutor
Make zombie task.
### Operating System
docker.io/apache/airflow:2.4.2
### Versions of Apache Airflow Providers
_No response_
### Deployment
Official Apache Airflow Helm Chart
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
Did you try `airflow dags reserialize` ? https://airflow.apache.org/docs/apache-airflow/stable/cli-and-env-variables-ref.html#reserialize
Can you check if it fixes your problem?
Having same problem ` airflow dags reserialize` ain't helping
Thank you for your quick reply @potiuk . I confirm `airflow dags reserialize` doesn't fix the problem. This is a clean setup. I migrated dag from 2.2.4 to 2.4.2 for testing
This is likely related to #24356; with this patch, the scheduler now fully loads V1Pod instances in the process. cc @dstandish
can you post dag code? minimal reproducible dag please. @eTopchik @akizminet
ok here's repro code:
```python
from __future__ import annotations
from kubernetes.client import models as k8s
from airflow.callbacks.callback_requests import TaskCallbackRequest
from airflow.models import TaskInstance
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.operators.bash import BashOperator
op = BashOperator(task_id="hi", executor_config={"pod_override": k8s.V1Pod()}, bash_command="hi")
ti = TaskInstance(task=op)
s = SimpleTaskInstance.from_ti(ti)
TaskCallbackRequest("hi", s).to_json()
```
i don't think it's caused by that PR.
fix forthcoming | 2022-11-10T22:50:53Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 746, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 878, in _run_scheduler_loop
next_event = timers.run(blocking=False)
File "/usr/local/lib/python3.7/sched.py", line 151, in run
action(*argument, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/event_scheduler.py", line 37, in repeat
action(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 75, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1527, in _find_zombies
self.executor.send_callback(request)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/executors/base_executor.py", line 400, in send_callback
self.callback_sink.send(request)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 75, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/callbacks/database_callback_sink.py", line 34, in send
db_callback = DbCallbackRequest(callback=callback, priority_weight=10)
File "<string>", line 4, in __init__
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/state.py", line 480, in _initialize_instance
manager.dispatch.init_failure(self, args, kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py", line 72, in __exit__
with_traceback=exc_tb,
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 207, in raise_
raise exception
File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/state.py", line 477, in _initialize_instance
return manager.original_init(*mixed[1:], **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/db_callback_request.py", line 46, in __init__
self.callback_data = callback.to_json()
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/callbacks/callback_requests.py", line 89, in to_json
return json.dumps(dict_obj)
File "/usr/local/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/usr/local/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/local/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/local/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type V1Pod is not JSON serializable
| 2,601 |
|||
apache/airflow | apache__airflow-28003 | 527fbce462429fc9836837378f801eed4e9d194f | diff --git a/airflow/models/dag.py b/airflow/models/dag.py
--- a/airflow/models/dag.py
+++ b/airflow/models/dag.py
@@ -1950,7 +1950,7 @@ def set_dag_runs_state(
@provide_session
def clear(
self,
- task_ids: Collection[str] | Collection[tuple[str, int]] | None = None,
+ task_ids: Collection[str | tuple[str, int]] | None = None,
start_date: datetime | None = None,
end_date: datetime | None = None,
only_failed: bool = False,
diff --git a/airflow/www/views.py b/airflow/www/views.py
--- a/airflow/www/views.py
+++ b/airflow/www/views.py
@@ -33,7 +33,7 @@
from functools import wraps
from json import JSONDecodeError
from operator import itemgetter
-from typing import Any, Callable
+from typing import Any, Callable, Collection
from urllib.parse import unquote, urljoin, urlsplit
import configupdater
@@ -2013,11 +2013,12 @@ def _clear_dag_tis(
dag: DAG,
start_date: datetime | None,
end_date: datetime | None,
- origin: str,
- task_ids=None,
- recursive=False,
- confirmed=False,
- only_failed=False,
+ *,
+ origin: str | None,
+ task_ids: Collection[str | tuple[str, int]] | None = None,
+ recursive: bool = False,
+ confirmed: bool = False,
+ only_failed: bool = False,
session: Session = NEW_SESSION,
):
if confirmed:
@@ -2144,7 +2145,7 @@ def clear(self, *, session: Session = NEW_SESSION):
dag,
start_date,
end_date,
- origin,
+ origin=origin,
task_ids=task_ids,
recursive=recursive,
confirmed=confirmed,
@@ -2164,7 +2165,8 @@ def clear(self, *, session: Session = NEW_SESSION):
]
)
@action_logging
- def dagrun_clear(self):
+ @provide_session
+ def dagrun_clear(self, *, session: Session = NEW_SESSION):
"""Clears the DagRun"""
dag_id = request.form.get("dag_id")
dag_run_id = request.form.get("dag_run_id")
@@ -2182,6 +2184,7 @@ def dagrun_clear(self):
origin=None,
recursive=True,
confirmed=confirmed,
+ session=session,
)
@expose("/blocked", methods=["POST"])
| Clearing dag run via UI fails on main branch and 2.5.0rc2
### Apache Airflow version
main (development)
### What happened
Create a simple dag, allow it to completely run through.
Next, when in grid view, on the left hand side click on the dag run at the top level.
On the right hand side, then click on "Clear existing tasks". This will error with the following on the web server:
```
[2022-11-29 17:55:05,939] {app.py:1742} ERROR - Exception on /dagrun_clear [POST]
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2525, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1822, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1820, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1796, in dispatch_request
return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args)
File "/opt/airflow/airflow/www/auth.py", line 47, in decorated
return func(*args, **kwargs)
File "/opt/airflow/airflow/www/decorators.py", line 83, in wrapper
return f(*args, **kwargs)
File "/opt/airflow/airflow/www/views.py", line 2184, in dagrun_clear
confirmed=confirmed,
File "/opt/airflow/airflow/www/views.py", line 2046, in _clear_dag_tis
session=session,
File "/opt/airflow/airflow/utils/session.py", line 72, in wrapper
return func(*args, **kwargs)
File "/opt/airflow/airflow/models/dag.py", line 2030, in clear
exclude_task_ids=exclude_task_ids,
File "/opt/airflow/airflow/models/dag.py", line 1619, in _get_task_instances
tis = session.query(TaskInstance)
AttributeError: 'NoneType' object has no attribute 'query'
```
https://github.com/apache/airflow/blob/527fbce462429fc9836837378f801eed4e9d194f/airflow/models/dag.py#L1619
As per issue title, fails on main branch and `2.5.0rc2`. Works fine on `2.3.3` and `2.4.3`.
### What you think should happen instead
Tasks within the dag should be cleared as expected.
### How to reproduce
Run a dag, attempt to clear it within the UI at the top level of the dag.
### Operating System
Ran via breeze
### Versions of Apache Airflow Providers
N/A
### Deployment
Other 3rd-party Helm chart
### Deployment details
Tested via breeze.
### Anything else
Happens every time.
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Issue is that this fn is missing the `@provide_session` decorator and/or the `session` argument shouldn't have a default value.
```
def _clear_dag_tis(
self,
dag: DAG,
start_date: datetime | None,
end_date: datetime | None,
origin: str,
task_ids=None,
recursive=False,
confirmed=False,
only_failed=False,
session: Session = NEW_SESSION,
):
```
Introduced in https://github.com/apache/airflow/pull/26658/file
Since this is internal we should probably remove the default. I’ll also (finally) write a pre-commit hook to make sure `NEW_SESSION` is only used with `provide_session`. | 2022-11-30T09:28:38Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2525, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1822, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1820, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1796, in dispatch_request
return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args)
File "/opt/airflow/airflow/www/auth.py", line 47, in decorated
return func(*args, **kwargs)
File "/opt/airflow/airflow/www/decorators.py", line 83, in wrapper
return f(*args, **kwargs)
File "/opt/airflow/airflow/www/views.py", line 2184, in dagrun_clear
confirmed=confirmed,
File "/opt/airflow/airflow/www/views.py", line 2046, in _clear_dag_tis
session=session,
File "/opt/airflow/airflow/utils/session.py", line 72, in wrapper
return func(*args, **kwargs)
File "/opt/airflow/airflow/models/dag.py", line 2030, in clear
exclude_task_ids=exclude_task_ids,
File "/opt/airflow/airflow/models/dag.py", line 1619, in _get_task_instances
tis = session.query(TaskInstance)
AttributeError: 'NoneType' object has no attribute 'query'
| 2,609 |
|||
apache/airflow | apache__airflow-28191 | 84a5faff0de2a56f898b8a02aca578b235cb12ba | diff --git a/airflow/models/xcom.py b/airflow/models/xcom.py
--- a/airflow/models/xcom.py
+++ b/airflow/models/xcom.py
@@ -723,6 +723,21 @@ def __eq__(self, other: Any) -> bool:
return all(x == y for x, y in z)
return NotImplemented
+ def __getstate__(self) -> Any:
+ # We don't want to go to the trouble of serializing the entire Query
+ # object, including its filters, hints, etc. (plus SQLAlchemy does not
+ # provide a public API to inspect a query's contents). Converting the
+ # query into a SQL string is the best we can get. Theoratically we can
+ # do the same for count(), but I think it should be performant enough to
+ # calculate only that eagerly.
+ with self._get_bound_query() as query:
+ statement = query.statement.compile(query.session.get_bind())
+ return (str(statement), query.count())
+
+ def __setstate__(self, state: Any) -> None:
+ statement, self._len = state
+ self._query = Query(XCom.value).from_statement(text(statement))
+
def __len__(self):
if self._len is None:
with self._get_bound_query() as query:
| Dynamic task context fails to be pickled
### Apache Airflow version
2.5.0
### What happened
When I upgrade to 2.5.0, run dynamic task test failed.
```py
from airflow.decorators import task, dag
import pendulum as pl
@dag(
dag_id='test-dynamic-tasks',
schedule=None,
start_date=pl.today().add(days=-3),
tags=['example'])
def test_dynamic_tasks():
@task.virtualenv(requirements=[])
def sum_it(values):
print(values)
@task.virtualenv(requirements=[])
def add_one(value):
return value + 1
added_values = add_one.expand(value = [1,2])
sum_it(added_values)
dag = test_dynamic_tasks()
```
```log
*** Reading local file: /home/andi/airflow/logs/dag_id=test-dynamic-tasks/run_id=manual__2022-12-06T10:07:41.355423+00:00/task_id=sum_it/attempt=1.log
[2022-12-06, 18:07:53 CST] {taskinstance.py:1087} INFO - Dependencies all met for <TaskInstance: test-dynamic-tasks.sum_it manual__2022-12-06T10:07:41.355423+00:00 [queued]>
[2022-12-06, 18:07:53 CST] {taskinstance.py:1087} INFO - Dependencies all met for <TaskInstance: test-dynamic-tasks.sum_it manual__2022-12-06T10:07:41.355423+00:00 [queued]>
[2022-12-06, 18:07:53 CST] {taskinstance.py:1283} INFO -
--------------------------------------------------------------------------------
[2022-12-06, 18:07:53 CST] {taskinstance.py:1284} INFO - Starting attempt 1 of 1
[2022-12-06, 18:07:53 CST] {taskinstance.py:1285} INFO -
--------------------------------------------------------------------------------
[2022-12-06, 18:07:53 CST] {taskinstance.py:1304} INFO - Executing <Task(_PythonVirtualenvDecoratedOperator): sum_it> on 2022-12-06 10:07:41.355423+00:00
[2022-12-06, 18:07:53 CST] {standard_task_runner.py:55} INFO - Started process 25873 to run task
[2022-12-06, 18:07:53 CST] {standard_task_runner.py:82} INFO - Running: ['airflow', 'tasks', 'run', 'test-dynamic-tasks', 'sum_it', 'manual__2022-12-06T10:07:41.355423+00:00', '--job-id', '41164', '--raw', '--subdir', 'DAGS_FOLDER/andi/test-dynamic-task.py', '--cfg-path', '/tmp/tmphudvake2']
[2022-12-06, 18:07:53 CST] {standard_task_runner.py:83} INFO - Job 41164: Subtask sum_it
[2022-12-06, 18:07:53 CST] {task_command.py:389} INFO - Running <TaskInstance: test-dynamic-tasks.sum_it manual__2022-12-06T10:07:41.355423+00:00 [running]> on host sh-dataops-airflow.jinde.local
[2022-12-06, 18:07:53 CST] {taskinstance.py:1511} INFO - Exporting the following env vars:
AIRFLOW_CTX_DAG_EMAIL=andi@google.com
AIRFLOW_CTX_DAG_OWNER=andi
AIRFLOW_CTX_DAG_ID=test-dynamic-tasks
AIRFLOW_CTX_TASK_ID=sum_it
AIRFLOW_CTX_EXECUTION_DATE=2022-12-06T10:07:41.355423+00:00
AIRFLOW_CTX_TRY_NUMBER=1
AIRFLOW_CTX_DAG_RUN_ID=manual__2022-12-06T10:07:41.355423+00:00
[2022-12-06, 18:07:53 CST] {process_utils.py:179} INFO - Executing cmd: /home/andi/airflow/venv38/bin/python -m virtualenv /tmp/venv7lc4m6na --system-site-packages
[2022-12-06, 18:07:53 CST] {process_utils.py:183} INFO - Output:
[2022-12-06, 18:07:54 CST] {process_utils.py:187} INFO - created virtual environment CPython3.8.0.final.0-64 in 220ms
[2022-12-06, 18:07:54 CST] {process_utils.py:187} INFO - creator CPython3Posix(dest=/tmp/venv7lc4m6na, clear=False, no_vcs_ignore=False, global=True)
[2022-12-06, 18:07:54 CST] {process_utils.py:187} INFO - seeder FromAppData(download=False, pip=bundle, setuptools=bundle, wheel=bundle, via=copy, app_data_dir=/home/andi/.local/share/virtualenv)
[2022-12-06, 18:07:54 CST] {process_utils.py:187} INFO - added seed packages: pip==22.2.1, setuptools==63.2.0, wheel==0.37.1
[2022-12-06, 18:07:54 CST] {process_utils.py:187} INFO - activators BashActivator,CShellActivator,FishActivator,NushellActivator,PowerShellActivator,PythonActivator
[2022-12-06, 18:07:54 CST] {process_utils.py:179} INFO - Executing cmd: /tmp/venv7lc4m6na/bin/pip install -r /tmp/venv7lc4m6na/requirements.txt
[2022-12-06, 18:07:54 CST] {process_utils.py:183} INFO - Output:
[2022-12-06, 18:07:55 CST] {process_utils.py:187} INFO - Looking in indexes: http://pypi:8081
[2022-12-06, 18:08:00 CST] {process_utils.py:187} INFO -
[2022-12-06, 18:08:00 CST] {process_utils.py:187} INFO - [notice] A new release of pip available: 22.2.1 -> 22.3.1
[2022-12-06, 18:08:00 CST] {process_utils.py:187} INFO - [notice] To update, run: python -m pip install --upgrade pip
[2022-12-06, 18:08:00 CST] {taskinstance.py:1772} ERROR - Task failed with exception
Traceback (most recent call last):
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/decorators/base.py", line 217, in execute
return_value = super().execute(context)
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 356, in execute
return super().execute(context=serializable_context)
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 175, in execute
return_value = self.execute_callable()
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 553, in execute_callable
return self._execute_python_callable_in_subprocess(python_path, tmp_path)
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 397, in _execute_python_callable_in_subprocess
self._write_args(input_path)
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 367, in _write_args
file.write_bytes(self.pickling_library.dumps({"args": self.op_args, "kwargs": self.op_kwargs}))
_pickle.PicklingError: Can't pickle <class 'sqlalchemy.orm.session.Session'>: it's not the same object as sqlalchemy.orm.session.Session
[2022-12-06, 18:08:00 CST] {taskinstance.py:1322} INFO - Marking task as FAILED. dag_id=test-dynamic-tasks, task_id=sum_it, execution_date=20221206T100741, start_date=20221206T100753, end_date=20221206T100800
[2022-12-06, 18:08:00 CST] {warnings.py:109} WARNING - /home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/utils/email.py:120: RemovedInAirflow3Warning: Fetching SMTP credentials from configuration variables will be deprecated in a future release. Please set credentials using a connection instead.
send_mime_email(e_from=mail_from, e_to=recipients, mime_msg=msg, conn_id=conn_id, dryrun=dryrun)
[2022-12-06, 18:08:00 CST] {configuration.py:635} WARNING - section/key [smtp/smtp_user] not found in config
[2022-12-06, 18:08:00 CST] {email.py:229} INFO - Email alerting: attempt 1
[2022-12-06, 18:08:01 CST] {email.py:241} INFO - Sent an alert email to ['andi@google.com']
[2022-12-06, 18:08:01 CST] {standard_task_runner.py:100} ERROR - Failed to execute job 41164 for task sum_it (Can't pickle <class 'sqlalchemy.orm.session.Session'>: it's not the same object as sqlalchemy.orm.session.Session; 25873)
[2022-12-06, 18:08:01 CST] {local_task_job.py:159} INFO - Task exited with return code 1
[2022-12-06, 18:08:01 CST] {taskinstance.py:2582} INFO - 0 downstream tasks scheduled from follow-on schedule check
```
### What you think should happen instead
I expect this sample run passed.
### How to reproduce
_No response_
### Operating System
centos 7.9 3.10.0-1160.el7.x86_64
### Versions of Apache Airflow Providers
```
airflow-code-editor==5.2.2
apache-airflow-providers-celery==3.0.0
apache-airflow-providers-microsoft-mssql==3.1.0
apache-airflow-providers-microsoft-psrp==2.0.0
apache-airflow-providers-microsoft-winrm==3.0.0
apache-airflow-providers-mysql==3.0.0
apache-airflow-providers-redis==3.0.0
apache-airflow-providers-samba==4.0.0
apache-airflow-providers-sftp==3.0.0
autopep8==1.6.0
brotlipy==0.7.0
chardet==3.0.4
pip-chill==1.0.1
pyopenssl==19.1.0
pysocks==1.7.1
python-ldap==3.4.2
requests-credssp==2.0.0
swagger-ui-bundle==0.0.9
tqdm==4.51.0
virtualenv==20.16.2
yapf==0.32.0
```
### Deployment
Official Apache Airflow Helm Chart
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| 2022-12-07T13:23:20Z | [] | [] |
Traceback (most recent call last):
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/decorators/base.py", line 217, in execute
return_value = super().execute(context)
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 356, in execute
return super().execute(context=serializable_context)
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 175, in execute
return_value = self.execute_callable()
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 553, in execute_callable
return self._execute_python_callable_in_subprocess(python_path, tmp_path)
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 397, in _execute_python_callable_in_subprocess
self._write_args(input_path)
File "/home/andi/airflow/venv38/lib/python3.8/site-packages/airflow/operators/python.py", line 367, in _write_args
file.write_bytes(self.pickling_library.dumps({"args": self.op_args, "kwargs": self.op_kwargs}))
_pickle.PicklingError: Can't pickle <class 'sqlalchemy.orm.session.Session'>: it's not the same object as sqlalchemy.orm.session.Session
| 2,611 |
||||
apache/airflow | apache__airflow-28397 | fefcb1d567d8d605f7ec9b7d408831d656736541 | diff --git a/airflow/models/dag.py b/airflow/models/dag.py
--- a/airflow/models/dag.py
+++ b/airflow/models/dag.py
@@ -2549,7 +2549,7 @@ def create_dagrun(
external_trigger: bool | None = False,
conf: dict | None = None,
run_type: DagRunType | None = None,
- session=NEW_SESSION,
+ session: Session = NEW_SESSION,
dag_hash: str | None = None,
creating_job_id: int | None = None,
data_interval: tuple[datetime, datetime] | None = None,
@@ -2586,14 +2586,27 @@ def create_dagrun(
else:
data_interval = self.infer_automated_data_interval(logical_date)
+ if run_type is None or isinstance(run_type, DagRunType):
+ pass
+ elif isinstance(run_type, str): # Compatibility: run_type used to be a str.
+ run_type = DagRunType(run_type)
+ else:
+ raise ValueError(f"`run_type` should be a DagRunType, not {type(run_type)}")
+
if run_id: # Infer run_type from run_id if needed.
if not isinstance(run_id, str):
raise ValueError(f"`run_id` should be a str, not {type(run_id)}")
- if not run_type:
- run_type = DagRunType.from_run_id(run_id)
+ inferred_run_type = DagRunType.from_run_id(run_id)
+ if run_type is None:
+ # No explicit type given, use the inferred type.
+ run_type = inferred_run_type
+ elif run_type == DagRunType.MANUAL and inferred_run_type != DagRunType.MANUAL:
+ # Prevent a manual run from using an ID that looks like a scheduled run.
+ raise ValueError(
+ f"A {run_type.value} DAG run cannot use ID {run_id!r} since it "
+ f"is reserved for {inferred_run_type.value} runs"
+ )
elif run_type and logical_date is not None: # Generate run_id from run_type and execution_date.
- if not isinstance(run_type, DagRunType):
- raise ValueError(f"`run_type` should be a DagRunType, not {type(run_type)}")
run_id = self.timetable.generate_run_id(
run_type=run_type, logical_date=logical_date, data_interval=data_interval
)
| Triggering a DAG with the same run_id as a scheduled one causes the scheduler to crash
### Apache Airflow version
2.5.0
### What happened
A user with access to manually triggering DAGs can trigger a DAG. provide a run_id that matches the pattern used when creating scheduled runs and cause the scheduler to crash due to database unique key violation:
```
2022-12-12 12:58:00,793] {scheduler_job.py:776} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 759, in _execute
self._run_scheduler_loop()
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 885, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 956, in _do_scheduling
self._create_dagruns_for_dags(guard, session)
File "/usr/local/lib/python3.8/site-packages/airflow/utils/retries.py", line 78, in wrapped_function
for attempt in run_with_db_retries(max_retries=retries, logger=logger, **retry_kwargs):
File "/usr/local/lib/python3.8/site-packages/tenacity/__init__.py", line 384, in __iter__
do = self.iter(retry_state=retry_state)
File "/usr/local/lib/python3.8/site-packages/tenacity/__init__.py", line 351, in iter
return fut.result()
File "/usr/local/lib/python3.8/concurrent/futures/_base.py", line 437, in result
return self.__get_result()
File "/usr/local/lib/python3.8/concurrent/futures/_base.py", line 389, in __get_result
raise self._exception
File "/usr/local/lib/python3.8/site-packages/airflow/utils/retries.py", line 87, in wrapped_function
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1018, in _create_dagruns_for_dags
query, dataset_triggered_dag_info = DagModel.dags_needing_dagruns(session)
File "/usr/local/lib/python3.8/site-packages/airflow/models/dag.py", line 3341, in dags_needing_dagruns
for x in session.query(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 2773, in all
return self._iter().all()
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 2916, in _iter
result = self.session.execute(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 1713, in execute
conn = self._connection_for_bind(bind)
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 1552, in _connection_for_bind
return self._transaction._connection_for_bind(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 721, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 601, in _assert_active
raise sa_exc.PendingRollbackError(
sqlalchemy.exc.PendingRollbackError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint "dag_run_dag_id_run_id_key"
DETAIL: Key (dag_id, run_id)=(example_branch_dop_operator_v3, scheduled__2022-12-12T12:57:00+00:00) already exists.
[SQL: INSERT INTO dag_run (dag_id, queued_at, execution_date, start_date, end_date, state, run_id, creating_job_id, external_trigger, run_type, conf, data_interval_start, data_interval_end, last_scheduling_decision, dag_hash, log_template_id, updated_at) VALUES (%(dag_id)s, %(queued_at)s, %(execution_date)s, %(start_date)s, %(end_date)s, %(state)s, %(run_id)s, %(creating_job_id)s, %(external_trigger)s, %(run_type)s, %(conf)s, %(data_interval_start)s, %(data_interval_end)s, %(last_scheduling_decision)s, %(dag_hash)s, (SELECT max(log_template.id) AS max_1
FROM log_template), %(updated_at)s) RETURNING dag_run.id]
[parameters: {'dag_id': 'example_branch_dop_operator_v3', 'queued_at': datetime.datetime(2022, 12, 12, 12, 58, 0, 435945, tzinfo=Timezone('UTC')), 'execution_date': DateTime(2022, 12, 12, 12, 57, 0, tzinfo=Timezone('UTC')), 'start_date': None, 'end_date': None, 'state': <DagRunState.QUEUED: 'queued'>, 'run_id': 'scheduled__2022-12-12T12:57:00+00:00', 'creating_job_id': 1, 'external_trigger': False, 'run_type': <DagRunType.SCHEDULED: 'scheduled'>, 'conf': <psycopg2.extensions.Binary object at 0x7f283a82af60>, 'data_interval_start': DateTime(2022, 12, 12, 12, 57, 0, tzinfo=Timezone('UTC')), 'data_interval_end': DateTime(2022, 12, 12, 12, 58, 0, tzinfo=Timezone('UTC')), 'last_scheduling_decision': None, 'dag_hash': '1653a588de69ed25c5b1dcfef928479c', 'updated_at': datetime.datetime(2022, 12, 12, 12, 58, 0, 436871, tzinfo=Timezone('UTC'))}]
(Background on this error at: https://sqlalche.me/e/14/gkpj) (Background on this error at: https://sqlalche.me/e/14/7s2a)
```
Worse yet, the scheduler will keep crashing after a restart with the same exception.
### What you think should happen instead
A user should not be able to crash the scheduler from the UI.
I see 2 alternatives for solving this:
1. Reject custom run_id that would (or could) collide with a scheduled one, preventing this situation from happening.
2. Handle the database error and assign a different run_id to the scheduled run.
### How to reproduce
1. Find an unpaused DAG.
2. Trigger DAG w/ config, set the run id to something like scheduled__2022-11-21T12:00:00+00:00 (adjust the time to be in the future where there is no run yet).
3. Let the manual DAG run finish.
4. Wait for the scheduler to try to schedule another DAG run with the same run id.
5. :boom:
6. Attempt to restart the scheduler.
7. :boom:
### Operating System
Debian GNU/Linux 11 (bullseye)
### Versions of Apache Airflow Providers
apache-airflow-providers-postgres==5.3.1
### Deployment
Docker-Compose
### Deployment details
I'm using a Postgres docker container as a metadata database that is linked via docker networking to the scheduler and the rest of the components. Scheduler, workers and webserver are all running in separate containers (using CeleryExecutor backed by a Redis container), though I do not think it is relevant in this case.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Smth to take a look at 2.5.1
Code around this has actually changed quite a bit due to datasets. A reproduction on latest Airflow would be awesome.
Yep. @uranusjr is right. We are releasing 2.5.0 in coming days - can you please try to reproduce it when we do @saulbein ?
I reproduced the same bug with 2.4.3, will try when 2.5.0 is in PyPI to see if it's reproducible there as well :slightly_smiling_face:
> I reproduced the same bug with 2.4.3, will try when 2.5.0 is in PyPI to see if it's reproducible there as well 🙂
2.5.0 is released. If you see that there is the same error in 2.5.0 - can you please post stacktrace and any extra infoy you will find @saulbein ?
Verified this still happens on 2.5.0 and updated the stack trace.
Coool . Thanks!
To make sure, is the log attached above from 2.5.0 or 2.4.3?
The log is from 2.5.0
Awesome, that should make debugging easier. Thanks! | 2022-12-16T06:38:41Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 759, in _execute
self._run_scheduler_loop()
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 885, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 956, in _do_scheduling
self._create_dagruns_for_dags(guard, session)
File "/usr/local/lib/python3.8/site-packages/airflow/utils/retries.py", line 78, in wrapped_function
for attempt in run_with_db_retries(max_retries=retries, logger=logger, **retry_kwargs):
File "/usr/local/lib/python3.8/site-packages/tenacity/__init__.py", line 384, in __iter__
do = self.iter(retry_state=retry_state)
File "/usr/local/lib/python3.8/site-packages/tenacity/__init__.py", line 351, in iter
return fut.result()
File "/usr/local/lib/python3.8/concurrent/futures/_base.py", line 437, in result
return self.__get_result()
File "/usr/local/lib/python3.8/concurrent/futures/_base.py", line 389, in __get_result
raise self._exception
File "/usr/local/lib/python3.8/site-packages/airflow/utils/retries.py", line 87, in wrapped_function
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1018, in _create_dagruns_for_dags
query, dataset_triggered_dag_info = DagModel.dags_needing_dagruns(session)
File "/usr/local/lib/python3.8/site-packages/airflow/models/dag.py", line 3341, in dags_needing_dagruns
for x in session.query(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 2773, in all
return self._iter().all()
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 2916, in _iter
result = self.session.execute(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 1713, in execute
conn = self._connection_for_bind(bind)
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 1552, in _connection_for_bind
return self._transaction._connection_for_bind(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 721, in _connection_for_bind
self._assert_active()
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 601, in _assert_active
raise sa_exc.PendingRollbackError(
sqlalchemy.exc.PendingRollbackError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint "dag_run_dag_id_run_id_key"
| 2,615 |
|||
apache/airflow | apache__airflow-29518 | 1e7c064560b8504b45e3a53dc8f294b143b4ec7d | diff --git a/airflow/providers/google/cloud/operators/cloud_base.py b/airflow/providers/google/cloud/operators/cloud_base.py
--- a/airflow/providers/google/cloud/operators/cloud_base.py
+++ b/airflow/providers/google/cloud/operators/cloud_base.py
@@ -18,6 +18,8 @@
"""This module contains a Google API base operator."""
from __future__ import annotations
+from google.api_core.gapic_v1.method import DEFAULT
+
from airflow.models import BaseOperator
@@ -27,4 +29,11 @@ class GoogleCloudBaseOperator(BaseOperator):
on top of Google API client libraries.
"""
- pass
+ def __deepcopy__(self, memo):
+ """
+ Updating the memo to fix the non-copyable global constant.
+ This constant can be specified in operator parameters as a retry configuration to indicate a default.
+ See https://github.com/apache/airflow/issues/28751 for details.
+ """
+ memo[id(DEFAULT)] = DEFAULT
+ return super().__deepcopy__(memo)
| KubernetesExecutor leaves failed pods due to deepcopy issue with Google providers
### Apache Airflow version
Other Airflow 2 version (please specify below)
### What happened
With Airflow 2.3 and 2.4 there appears to be a bug in the KubernetesExecutor when used in conjunction with the Google airflow providers. This bug does not affect Airflow 2.2 due to the pip version requirements.
The bug specifically presents itself when using nearly any Google provider operator. During the pod lifecycle, all is well until the executor in the pod starts to clean up following a successful run. Airflow itself still see's the task marked as a success, but in Kubernetes, while the task is finishing up after reporting status, it actually crashes and puts the pod into a Failed state silently:
```
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/__main__.py", line 39, in main
args.func(args)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/cli_parser.py", line 52, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/cli.py", line 103, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 382, in task_run
_run_task_by_selected_method(args, dag, ti)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 189, in _run_task_by_selected_method
_run_task_by_local_task_job(args, ti)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 247, in _run_task_by_local_task_job
run_job.run()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/base_job.py", line 247, in run
self._execute()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 137, in _execute
self.handle_task_exit(return_code)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 168, in handle_task_exit
self._run_mini_scheduler_on_child_tasks()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py", line 75, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 253, in _run_mini_scheduler_on_child_tasks
partial_dag = task.dag.partial_subset(
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2188, in partial_subset
dag.task_dict = {
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2189, in <dictcomp>
t.task_id: _deepcopy_task(t)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2186, in _deepcopy_task
return copy.deepcopy(t, memo)
File "/usr/local/lib/python3.9/copy.py", line 153, in deepcopy
y = copier(memo)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 1163, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo))
File "/usr/local/lib/python3.9/copy.py", line 172, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/lib/python3.9/copy.py", line 264, in _reconstruct
y = func(*args)
File "/usr/local/lib/python3.9/enum.py", line 384, in __call__
return cls.__new__(cls, value)
File "/usr/local/lib/python3.9/enum.py", line 702, in __new__
raise ve_exc
ValueError: <object object at 0x7f570181a3c0> is not a valid _MethodDefault
```
Based on a quick look, it appears to be related to the default argument that Google is using in its operators which happens to be an Enum, and fails during a deepcopy at the end of the task.
Example operator that is affected: https://github.com/apache/airflow/blob/403ed7163f3431deb7fc21108e1743385e139907/airflow/providers/google/cloud/hooks/dataproc.py#L753
Reference to the Google Python API core which has the Enum causing the problem: https://github.com/googleapis/python-api-core/blob/main/google/api_core/gapic_v1/method.py#L31
### What you think should happen instead
Kubernetes pods should succeed, be marked as `Completed`, and then be gracefully terminated.
### How to reproduce
Use any `apache-airflow-providers-google` >= 7.0.0 which includes `google-api-core` >= 2.2.2. Run a DAG with a task which uses any of the Google operators which have `_MethodDefault` as a default argument.
### Operating System
Debian GNU/Linux 11 (bullseye)
### Versions of Apache Airflow Providers
apache-airflow-providers-amazon==6.0.0
apache-airflow-providers-apache-hive==5.0.0
apache-airflow-providers-celery==3.0.0
apache-airflow-providers-cncf-kubernetes==4.4.0
apache-airflow-providers-common-sql==1.3.1
apache-airflow-providers-docker==3.2.0
apache-airflow-providers-elasticsearch==4.2.1
apache-airflow-providers-ftp==3.1.0
apache-airflow-providers-google==8.4.0
apache-airflow-providers-grpc==3.0.0
apache-airflow-providers-hashicorp==3.1.0
apache-airflow-providers-http==4.0.0
apache-airflow-providers-imap==3.0.0
apache-airflow-providers-microsoft-azure==4.3.0
apache-airflow-providers-mysql==3.2.1
apache-airflow-providers-odbc==3.1.2
apache-airflow-providers-postgres==5.2.2
apache-airflow-providers-presto==4.2.0
apache-airflow-providers-redis==3.0.0
apache-airflow-providers-sendgrid==3.0.0
apache-airflow-providers-sftp==4.1.0
apache-airflow-providers-slack==6.0.0
apache-airflow-providers-sqlite==3.2.1
apache-airflow-providers-ssh==3.2.0
### Deployment
Other 3rd-party Helm chart
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template!
This seems to be a limitation in Python until 3.10 (not sure when exactly this was fixed). I’m not quite sure if we want to (or even _can_) fix this, to be honest.
Ah, maybe this could be possible if we implement custom copying logic on operators in the Google provider. Can you provide a quick example DAG that replicates this?
Hi @uranusjr - I don't have a public example DAG to provide unfortunately (very sorry!), but nearly any of the google providers with the `_MethodDefault` with the default parameter for the `retry` argument should exhibit this. We've seen it most with dataproc operators (where we found this bug):
- https://airflow.apache.org/docs/apache-airflow-providers-google/7.0.0/_api/airflow/providers/google/cloud/operators/dataproc/index.html#airflow.providers.google.cloud.operators.dataproc.DataprocSubmitJobOperator
I'm facing a similar issue with Airflow 2.3.2 deployed in EKS
```Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/__main__.py", line 38, in main
args.func(args)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 51, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/cli.py", line 99, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 376, in task_run
_run_task_by_selected_method(args, dag, ti)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 182, in _run_task_by_selected_method
_run_task_by_local_task_job(args, ti)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 240, in _run_task_by_local_task_job
run_job.run()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/base_job.py", line 244, in run
self._execute()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/local_task_job.py", line 133, in _execute
self.handle_task_exit(return_code)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/local_task_job.py", line 171, in handle_task_exit
self._run_mini_scheduler_on_child_tasks()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/session.py", line 71, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/local_task_job.py", line 253, in _run_mini_scheduler_on_child_tasks
partial_dag = task.dag.partial_subset(
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/dag.py", line 2013, in partial_subset
dag.task_dict = {
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/dag.py", line 2014, in <dictcomp>
t.task_id: _deepcopy_task(t)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/dag.py", line 2011, in _deepcopy_task
return copy.deepcopy(t, memo)
File "/usr/local/lib/python3.8/copy.py", line 153, in deepcopy
y = copier(memo)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/baseoperator.py", line 1156, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo))
File "/usr/local/lib/python3.8/copy.py", line 146, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.8/copy.py", line 230, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.8/copy.py", line 172, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/lib/python3.8/copy.py", line 270, in _reconstruct
state = deepcopy(state, memo)
File "/usr/local/lib/python3.8/copy.py", line 146, in deepcopy
y = copier(x, memo)
File "/usr/local/lib/python3.8/copy.py", line 230, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/usr/local/lib/python3.8/copy.py", line 161, in deepcopy
rv = reductor(4)
TypeError: cannot pickle 'module' object
```
@uranusjr Also experiencing this with `BigQueryExecuteQueryOperator`.
### Environment
Airflow 2.4.3
Python 3.8.15
KubernetesExecutor
GKE Version 1.23.13-gke.900
### Error Encountered
Interestingly, there is no indication of the failed task from Airflow UI. It seems to succeed...
![image](https://user-images.githubusercontent.com/22802852/212174908-13f43702-2aab-43cb-8e8b-c03e8959524c.png)
However there are failed task pods in GKE.
![image](https://user-images.githubusercontent.com/22802852/212174699-792083a6-52fe-4ea6-a196-5ae99feaddae.png)
With the following errors:
Traceback (most recent call last): File "/home/airflow/.local/bin/airflow", line 8, in <module> sys.exit(main()) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/__main__.py", line 39, in main args.func(args) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 52, in command return func(*args, **kwargs) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/cli.py", line 103, in wrapper return f(*args, **kwargs) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 382, in task_run _run_task_by_selected_method(args, dag, ti) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 189, in _run_task_by_selected_method _run_task_by_local_task_job(args, ti) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 247, in _run_task_by_local_task_job run_job.run() File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/base_job.py", line 247, in run self._execute() File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/local_task_job.py", line 132, in _execute self.handle_task_exit(return_code) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/local_task_job.py", line 163, in handle_task_exit self.task_instance.schedule_downstream_tasks() File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/session.py", line 75, in wrapper return func(*args, session=session, **kwargs) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 2603, in schedule_downstream_tasks partial_dag = task.dag.partial_subset( File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/dag.py", line 2188, in partial_subset dag.task_dict = { File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/dag.py", line 2189, in <dictcomp> t.task_id: _deepcopy_task(t) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/dag.py", line 2186, in _deepcopy_task return copy.deepcopy(t, memo) File "/usr/local/lib/python3.8/copy.py", line 153, in deepcopy y = copier(memo) File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/baseoperator.py", line 1163, in __deepcopy__ setattr(result, k, copy.deepcopy(v, memo)) File "/usr/local/lib/python3.8/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/local/lib/python3.8/copy.py", line 270, in _reconstruct state = deepcopy(state, memo) File "/usr/local/lib/python3.8/copy.py", line 146, in deepcopy y = copier(x, memo) File "/usr/local/lib/python3.8/copy.py", line 230, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/usr/local/lib/python3.8/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/local/lib/python3.8/copy.py", line 264, in _reconstruct y = func(*args) File "/usr/local/lib/python3.8/copy.py", line 263, in <genexpr> args = (deepcopy(arg, memo) for arg in args) File "/usr/local/lib/python3.8/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/local/lib/python3.8/copy.py", line 264, in _reconstruct y = func(*args) File "/usr/local/lib/python3.8/enum.py", line 339, in __call__ return cls.__new__(cls, value) File "/usr/local/lib/python3.8/enum.py", line 663, in __new__ raise ve_exc ValueError: <object object at 0x7f80e0221810> is not a valid _MethodDefault
### Providers
```
apache-airflow 2.4.3
apache-airflow-providers-amazon 6.0.0
apache-airflow-providers-celery 3.0.0
apache-airflow-providers-cncf-kubernetes 4.4.0
apache-airflow-providers-common-sql 1.3.1
apache-airflow-providers-docker 3.2.0
apache-airflow-providers-elasticsearch 4.2.1
apache-airflow-providers-ftp 3.2.0
apache-airflow-providers-google 8.4.0
apache-airflow-providers-grpc 3.0.0
apache-airflow-providers-hashicorp 3.1.0
apache-airflow-providers-http 4.1.0
apache-airflow-providers-imap 3.1.0
apache-airflow-providers-microsoft-azure 4.3.0
apache-airflow-providers-mysql 3.2.1
apache-airflow-providers-odbc 3.1.2
apache-airflow-providers-pagerduty 3.1.0
apache-airflow-providers-postgres 5.2.2
apache-airflow-providers-redis 3.0.0
apache-airflow-providers-salesforce 5.2.0
apache-airflow-providers-sendgrid 3.0.0
apache-airflow-providers-sftp 4.1.0
apache-airflow-providers-slack 6.0.0
apache-airflow-providers-sqlite 3.3.1
apache-airflow-providers-ssh 3.2.0
```
I believe this a broader issue with google client that persists objects with `google.api_core.gapic_v1.method.DEFAULT` as a default parameter - those do not handle the deepcopy properly.
On one hand this can be easily fixed by memoing this object which can be safely assumed as a singleton, on the other hand it highlights a bigger problem - the assumption of operators having a proper deepcopy support is implicit, applies to all operators and isn't enforced in any of the unittests (and would be hard to enforce properly as some operators have a complicated state).
Should we consider tackling the second part globally for all providers as a seprate bug?
This also impacts airflow 2.5.1; it mostly occurs when a operation fails; otherwise I have not seen it occur. | 2023-02-13T23:06:58Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/__main__.py", line 39, in main
args.func(args)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/cli_parser.py", line 52, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/cli.py", line 103, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 382, in task_run
_run_task_by_selected_method(args, dag, ti)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 189, in _run_task_by_selected_method
_run_task_by_local_task_job(args, ti)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 247, in _run_task_by_local_task_job
run_job.run()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/base_job.py", line 247, in run
self._execute()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 137, in _execute
self.handle_task_exit(return_code)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 168, in handle_task_exit
self._run_mini_scheduler_on_child_tasks()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py", line 75, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 253, in _run_mini_scheduler_on_child_tasks
partial_dag = task.dag.partial_subset(
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2188, in partial_subset
dag.task_dict = {
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2189, in <dictcomp>
t.task_id: _deepcopy_task(t)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2186, in _deepcopy_task
return copy.deepcopy(t, memo)
File "/usr/local/lib/python3.9/copy.py", line 153, in deepcopy
y = copier(memo)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 1163, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo))
File "/usr/local/lib/python3.9/copy.py", line 172, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/lib/python3.9/copy.py", line 264, in _reconstruct
y = func(*args)
File "/usr/local/lib/python3.9/enum.py", line 384, in __call__
return cls.__new__(cls, value)
File "/usr/local/lib/python3.9/enum.py", line 702, in __new__
raise ve_exc
ValueError: <object object at 0x7f570181a3c0> is not a valid _MethodDefault
| 2,633 |
|||
apache/airflow | apache__airflow-31033 | ae7e1dbc8a772e0b6d749f2347b3951369d906e7 | diff --git a/airflow/executors/base_executor.py b/airflow/executors/base_executor.py
--- a/airflow/executors/base_executor.py
+++ b/airflow/executors/base_executor.py
@@ -38,7 +38,8 @@
if TYPE_CHECKING:
from airflow.callbacks.base_callback_sink import BaseCallbackSink
from airflow.callbacks.callback_requests import CallbackRequest
- from airflow.models.taskinstance import TaskInstance, TaskInstanceKey
+ from airflow.models.taskinstance import TaskInstance
+ from airflow.models.taskinstancekey import TaskInstanceKey
# Command to execute - list of strings
# the first element is always "airflow".
diff --git a/airflow/executors/celery_executor.py b/airflow/executors/celery_executor.py
--- a/airflow/executors/celery_executor.py
+++ b/airflow/executors/celery_executor.py
@@ -56,7 +56,8 @@
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType, EventBufferValueType, TaskTuple
- from airflow.models.taskinstance import TaskInstance, TaskInstanceKey
+ from airflow.models.taskinstance import TaskInstance
+ from airflow.models.taskinstancekey import TaskInstanceKey
# Task instance that is sent over Celery queues
# TaskInstanceKey, Command, queue_name, CallableTask
diff --git a/airflow/executors/celery_kubernetes_executor.py b/airflow/executors/celery_kubernetes_executor.py
--- a/airflow/executors/celery_kubernetes_executor.py
+++ b/airflow/executors/celery_kubernetes_executor.py
@@ -28,7 +28,8 @@
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType, EventBufferValueType, QueuedTaskInstanceType
- from airflow.models.taskinstance import SimpleTaskInstance, TaskInstance, TaskInstanceKey
+ from airflow.models.taskinstance import SimpleTaskInstance, TaskInstance
+ from airflow.models.taskinstancekey import TaskInstanceKey
class CeleryKubernetesExecutor(LoggingMixin):
diff --git a/airflow/executors/dask_executor.py b/airflow/executors/dask_executor.py
--- a/airflow/executors/dask_executor.py
+++ b/airflow/executors/dask_executor.py
@@ -36,7 +36,7 @@
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
# queue="default" is a special case since this is the base config default queue name,
diff --git a/airflow/executors/debug_executor.py b/airflow/executors/debug_executor.py
--- a/airflow/executors/debug_executor.py
+++ b/airflow/executors/debug_executor.py
@@ -32,7 +32,8 @@
from airflow.utils.state import State
if TYPE_CHECKING:
- from airflow.models.taskinstance import TaskInstance, TaskInstanceKey
+ from airflow.models.taskinstance import TaskInstance
+ from airflow.models.taskinstancekey import TaskInstanceKey
class DebugExecutor(BaseExecutor):
diff --git a/airflow/executors/kubernetes_executor.py b/airflow/executors/kubernetes_executor.py
--- a/airflow/executors/kubernetes_executor.py
+++ b/airflow/executors/kubernetes_executor.py
@@ -54,7 +54,7 @@
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
# TaskInstance key, command, configuration, pod_template_file
KubernetesJobType = Tuple[TaskInstanceKey, CommandType, Any, Optional[str]]
diff --git a/airflow/executors/local_executor.py b/airflow/executors/local_executor.py
--- a/airflow/executors/local_executor.py
+++ b/airflow/executors/local_executor.py
@@ -43,7 +43,8 @@
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType
- from airflow.models.taskinstance import TaskInstanceKey, TaskInstanceStateType
+ from airflow.models.taskinstance import TaskInstanceStateType
+ from airflow.models.taskinstancekey import TaskInstanceKey
# This is a work to be executed by a worker.
# It can Key and Command - but it can also be None, None which is actually a
diff --git a/airflow/executors/sequential_executor.py b/airflow/executors/sequential_executor.py
--- a/airflow/executors/sequential_executor.py
+++ b/airflow/executors/sequential_executor.py
@@ -32,7 +32,7 @@
if TYPE_CHECKING:
from airflow.executors.base_executor import CommandType
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
class SequentialExecutor(BaseExecutor):
diff --git a/airflow/kubernetes/kubernetes_helper_functions.py b/airflow/kubernetes/kubernetes_helper_functions.py
--- a/airflow/kubernetes/kubernetes_helper_functions.py
+++ b/airflow/kubernetes/kubernetes_helper_functions.py
@@ -23,7 +23,7 @@
import pendulum
from slugify import slugify
-from airflow.models.taskinstance import TaskInstanceKey
+from airflow.models.taskinstancekey import TaskInstanceKey
log = logging.getLogger(__name__)
diff --git a/airflow/models/baseoperator.py b/airflow/models/baseoperator.py
--- a/airflow/models/baseoperator.py
+++ b/airflow/models/baseoperator.py
@@ -98,7 +98,7 @@
import jinja2 # Slow import.
from airflow.models.dag import DAG
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.models.xcom_arg import XComArg
from airflow.utils.task_group import TaskGroup
diff --git a/airflow/models/taskinstance.py b/airflow/models/taskinstance.py
--- a/airflow/models/taskinstance.py
+++ b/airflow/models/taskinstance.py
@@ -32,7 +32,7 @@
from functools import partial
from pathlib import PurePath
from types import TracebackType
-from typing import TYPE_CHECKING, Any, Callable, Collection, Generator, Iterable, NamedTuple, Tuple
+from typing import TYPE_CHECKING, Any, Callable, Collection, Generator, Iterable, Tuple
from urllib.parse import quote
import dill
@@ -92,6 +92,7 @@
from airflow.models.mappedoperator import MappedOperator
from airflow.models.param import process_params
from airflow.models.taskfail import TaskFail
+from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.models.taskmap import TaskMap
from airflow.models.taskreschedule import TaskReschedule
from airflow.models.xcom import LazyXComAccess, XCom
@@ -343,40 +344,6 @@ def _is_mappable_value(value: Any) -> TypeGuard[Collection]:
return True
-class TaskInstanceKey(NamedTuple):
- """Key used to identify task instance."""
-
- dag_id: str
- task_id: str
- run_id: str
- try_number: int = 1
- map_index: int = -1
-
- @property
- def primary(self) -> tuple[str, str, str, int]:
- """Return task instance primary key part of the key"""
- return self.dag_id, self.task_id, self.run_id, self.map_index
-
- @property
- def reduced(self) -> TaskInstanceKey:
- """Remake the key by subtracting 1 from try number to match in memory information"""
- return TaskInstanceKey(
- self.dag_id, self.task_id, self.run_id, max(1, self.try_number - 1), self.map_index
- )
-
- def with_try_number(self, try_number: int) -> TaskInstanceKey:
- """Returns TaskInstanceKey with provided ``try_number``"""
- return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, try_number, self.map_index)
-
- @property
- def key(self) -> TaskInstanceKey:
- """For API-compatibly with TaskInstance.
-
- Returns self
- """
- return self
-
-
def _creator_note(val):
"""Custom creator for the ``note`` association proxy."""
if isinstance(val, str):
diff --git a/airflow/models/taskinstancekey.py b/airflow/models/taskinstancekey.py
new file mode 100644
--- /dev/null
+++ b/airflow/models/taskinstancekey.py
@@ -0,0 +1,54 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from __future__ import annotations
+
+from typing import NamedTuple
+
+
+class TaskInstanceKey(NamedTuple):
+ """Key used to identify task instance."""
+
+ dag_id: str
+ task_id: str
+ run_id: str
+ try_number: int = 1
+ map_index: int = -1
+
+ @property
+ def primary(self) -> tuple[str, str, str, int]:
+ """Return task instance primary key part of the key"""
+ return self.dag_id, self.task_id, self.run_id, self.map_index
+
+ @property
+ def reduced(self) -> TaskInstanceKey:
+ """Remake the key by subtracting 1 from try number to match in memory information"""
+ return TaskInstanceKey(
+ self.dag_id, self.task_id, self.run_id, max(1, self.try_number - 1), self.map_index
+ )
+
+ def with_try_number(self, try_number: int) -> TaskInstanceKey:
+ """Returns TaskInstanceKey with provided ``try_number``"""
+ return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, try_number, self.map_index)
+
+ @property
+ def key(self) -> TaskInstanceKey:
+ """For API-compatibly with TaskInstance.
+
+ Returns self
+ """
+ return self
diff --git a/airflow/models/xcom.py b/airflow/models/xcom.py
--- a/airflow/models/xcom.py
+++ b/airflow/models/xcom.py
@@ -68,7 +68,7 @@
log = logging.getLogger(__name__)
if TYPE_CHECKING:
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
class BaseXCom(Base, LoggingMixin):
diff --git a/airflow/operators/trigger_dagrun.py b/airflow/operators/trigger_dagrun.py
--- a/airflow/operators/trigger_dagrun.py
+++ b/airflow/operators/trigger_dagrun.py
@@ -46,7 +46,7 @@
if TYPE_CHECKING:
from sqlalchemy.orm.session import Session
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
class TriggerDagRunLink(BaseOperatorLink):
diff --git a/airflow/providers/amazon/aws/links/base_aws.py b/airflow/providers/amazon/aws/links/base_aws.py
--- a/airflow/providers/amazon/aws/links/base_aws.py
+++ b/airflow/providers/amazon/aws/links/base_aws.py
@@ -23,7 +23,7 @@
if TYPE_CHECKING:
from airflow.models import BaseOperator
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
diff --git a/airflow/providers/databricks/operators/databricks.py b/airflow/providers/databricks/operators/databricks.py
--- a/airflow/providers/databricks/operators/databricks.py
+++ b/airflow/providers/databricks/operators/databricks.py
@@ -31,7 +31,7 @@
from airflow.providers.databricks.utils.databricks import normalise_json_content, validate_trigger_event
if TYPE_CHECKING:
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
DEFER_METHOD_NAME = "execute_complete"
diff --git a/airflow/providers/google/cloud/links/base.py b/airflow/providers/google/cloud/links/base.py
--- a/airflow/providers/google/cloud/links/base.py
+++ b/airflow/providers/google/cloud/links/base.py
@@ -23,7 +23,7 @@
if TYPE_CHECKING:
from airflow.models import BaseOperator
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
BASE_LINK = "https://console.cloud.google.com"
diff --git a/airflow/providers/google/cloud/links/datafusion.py b/airflow/providers/google/cloud/links/datafusion.py
--- a/airflow/providers/google/cloud/links/datafusion.py
+++ b/airflow/providers/google/cloud/links/datafusion.py
@@ -24,7 +24,7 @@
if TYPE_CHECKING:
from airflow.models import BaseOperator
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
diff --git a/airflow/providers/google/cloud/links/dataproc.py b/airflow/providers/google/cloud/links/dataproc.py
--- a/airflow/providers/google/cloud/links/dataproc.py
+++ b/airflow/providers/google/cloud/links/dataproc.py
@@ -25,7 +25,7 @@
if TYPE_CHECKING:
from airflow.models import BaseOperator
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
DATAPROC_BASE_LINK = BASE_LINK + "/dataproc"
diff --git a/airflow/providers/google/cloud/operators/bigquery.py b/airflow/providers/google/cloud/operators/bigquery.py
--- a/airflow/providers/google/cloud/operators/bigquery.py
+++ b/airflow/providers/google/cloud/operators/bigquery.py
@@ -52,7 +52,7 @@
)
if TYPE_CHECKING:
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
diff --git a/airflow/providers/google/cloud/operators/dataproc_metastore.py b/airflow/providers/google/cloud/operators/dataproc_metastore.py
--- a/airflow/providers/google/cloud/operators/dataproc_metastore.py
+++ b/airflow/providers/google/cloud/operators/dataproc_metastore.py
@@ -37,7 +37,7 @@
from airflow.providers.google.common.links.storage import StorageLink
if TYPE_CHECKING:
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
diff --git a/airflow/providers/microsoft/azure/operators/data_factory.py b/airflow/providers/microsoft/azure/operators/data_factory.py
--- a/airflow/providers/microsoft/azure/operators/data_factory.py
+++ b/airflow/providers/microsoft/azure/operators/data_factory.py
@@ -33,7 +33,7 @@
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
diff --git a/airflow/providers/qubole/operators/qubole.py b/airflow/providers/qubole/operators/qubole.py
--- a/airflow/providers/qubole/operators/qubole.py
+++ b/airflow/providers/qubole/operators/qubole.py
@@ -33,7 +33,7 @@
if TYPE_CHECKING:
- from airflow.models.taskinstance import TaskInstanceKey
+ from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -246,6 +246,7 @@ def _get_rst_filepath_from_path(filepath: pathlib.Path):
"dagbag.py",
"param.py",
"taskinstance.py",
+ "taskinstancekey.py",
"variable.py",
"xcom.py",
}
| DB migration job fails with circular import
### Apache Airflow version
2.6.0
### What happened
I upgraded my Airflow 2.5.3 to 2.6.0 using the official Helm chart 1.9.0 installation on a Kubernetes cluster. The DB migration job fails with a circular import of "TaskInstanceKey". The image I'm using is `apache/airflow:2.6.0-python3.10`. I'm using CeleryKubernetesExecutor in my configuration.
Here is the stacktrace:
```
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/__main__.py", line 48, in main
args.func(args)
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/cli/cli_config.py", line 51, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/utils/cli.py", line 112, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/cli/commands/db_command.py", line 84, in upgradedb
db.upgradedb(
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/utils/session.py", line 76, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/utils/db.py", line 1544, in upgradedb
import_all_models()
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/models/__init__.py", line 60, in import_all_models
__getattr__(name)
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/models/__init__.py", line 78, in __getattr__
val = import_string(f"{path}.{name}")
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/utils/module_loading.py", line 36, in import_string
module = import_module(module_path)
File "/usr/local/lib/python3.10/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 688, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 883, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/models/dag.py", line 82, in <module>
from airflow.models.dagrun import DagRun
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/models/dagrun.py", line 57, in <module>
from airflow.models.taskinstance import TaskInstance as TI
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/models/taskinstance.py", line 99, in <module>
from airflow.sentry import Sentry
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/sentry.py", line 195, in <module>
Sentry = ConfiguredSentry()
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/sentry.py", line 92, in __init__
executor_class, _ = ExecutorLoader.import_default_executor_cls()
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/executors/executor_loader.py", line 158, in import_default_executor_cls
executor, source = cls.import_executor_cls(executor_name)
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/executors/executor_loader.py", line 134, in import_executor_cls
return _import_and_validate(cls.executors[executor_name]), ConnectorSource.CORE
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/executors/executor_loader.py", line 129, in _import_and_validate
executor = import_string(path)
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/utils/module_loading.py", line 36, in import_string
module = import_module(module_path)
File "/usr/local/lib/python3.10/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/executors/celery_kubernetes_executor.py", line 26, in <module>
from airflow.executors.kubernetes_executor import KubernetesExecutor
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/executors/kubernetes_executor.py", line 44, in <module>
from airflow.kubernetes import pod_generator
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/kubernetes/pod_generator.py", line 46, in <module>
from airflow.kubernetes.kubernetes_helper_functions import add_pod_suffix, rand_str
File "/home/airflow/.local/lib/python3.10/site-packages/airflow/kubernetes/kubernetes_helper_functions.py", line 26, in <module>
from airflow.models.taskinstance import TaskInstanceKey
ImportError: cannot import name 'TaskInstanceKey' from partially initialized module 'airflow.models.taskinstance' (most likely due to a circular import) (/home/airflow/.local/lib/python3.10/site-packages/airflow/models/taskinstance.py)
```
### What you think should happen instead
The DB migration job will start without an error on circular import.
### How to reproduce
I have a complex automation pipeline with many configurations, so, for now, I will not put my details configurations here. Please let me know if you need specific details.
I installed Airflow on my Kubernetes cluster using the official Helm chart 1.9.0. My database is Postgres. The DB migration job starts, but it fails with the error above.
### Operating System
Linux, AWS EKS-based Kubernetes
### Versions of Apache Airflow Providers
_No response_
### Deployment
Official Apache Airflow Helm Chart
### Deployment details
My full configuration is large and contains sensitive data. Please let me know if you need specific details.
I installed Airflow on my Kubernetes cluster using the official Helm chart 1.9.0. My database is Postgres. The DB migration job starts, but it fails with the error above.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| Thanks for opening your first issue here! Be sure to follow the issue template! If you are willing to raise PR to address this issue please do so, no need to wait for approval.
The loop is `DagRun -> TaskInstance -> Sentry -> KubernetesExecutor -> kubernetes_helper ->TaskInstanceKey`. I guess this happened because I enabled both Sentry and CeleryKubernetesExecutor.
Perhaps `annotations_to_key()` needs to be decoupled from `TaskInstanceKey` (or `airflow.models` in general)?
**EDIT:** Or perhaps move `TaskInstanceKey` out to a separate `.py` since it's a very basic class.
https://github.com/apache/airflow/blob/ced56ea5a783a2846094996503d84fc21a33d6ae/airflow/kubernetes/kubernetes_helper_functions.py#L83 | 2023-05-03T06:43:10Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
| 2,643 |
|||
apache/airflow | apache__airflow-8165 | 6c273466d598d7bcfb7c21feafcccb07cc4230fb | diff --git a/airflow/www/utils.py b/airflow/www/utils.py
--- a/airflow/www/utils.py
+++ b/airflow/www/utils.py
@@ -406,8 +406,8 @@ class CustomSQLAInterface(SQLAInterface):
'_' from the key to lookup the column names.
"""
- def __init__(self, obj):
- super().__init__(obj)
+ def __init__(self, obj, session=None):
+ super().__init__(obj, session=session)
def clean_column_names():
if self.list_properties:
| Security Views broken
Thanks, @KostyaEsmukov for finding this bug. I was able to replicate this bug.
**Apache Airflow version**: 1.10.10.rc3
**Environment**:
- **OS** (e.g. from /etc/os-release): MacOs
- **Kernel** (e.g. `uname -a`): `Darwin MacBook-Pro.local 19.3.0 Darwin Kernel Version 19.3.0: Thu Jan 9 20:58:23 PST 2020; root:xnu-6153.81.5~1/RELEASE_X86_64 x86_64`
**What happened**:
Most of the tabs under **Security** dropdown are broken:
![image](https://user-images.githubusercontent.com/8811558/78505788-a7e27c00-776d-11ea-9204-ac0b60810e3e.png)
For example when clicking on **List Users** above: it shows the following error:
```
____/ ( ( ) ) \___
/( ( ( ) _ )) ) )\
(( ( )( ) ) ( ) )
((/ ( _( ) ( _) ) ( () ) )
( ( ( (_) (( ( ) .((_ ) . )_
( ( ) ( ( ) ) ) . ) ( )
( ( ( ( ) ( _ ( _) ). ) . ) ) ( )
( ( ( ) ( ) ( )) ) _)( ) ) )
( ( ( \ ) ( (_ ( ) ( ) ) ) ) )) ( )
( ( ( ( (_ ( ) ( _ ) ) ( ) ) )
( ( ( ( ( ) (_ ) ) ) _) ) _( ( )
(( ( )( ( _ ) _) _(_ ( (_ )
(_((__(_(__(( ( ( | ) ) ) )_))__))_)___)
((__) \\||lll|l||/// \_))
( /(/ ( ) ) )\ )
( ( ( ( | | ) ) )\ )
( /(| / ( )) ) ) )) )
( ( ((((_(|)_))))) )
( ||\(|(|)|/|| )
( |(||(||)|||| )
( //|/l|||)|\\ \ )
(/ / // /|//||||\\ \ \ \ _)
-------------------------------------------------------------------------------
Node: 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/views.py", line 553, in list
self.list_template, title=self.list_title, widgets=widgets
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/baseviews.py", line 281, in render_template
template, **dict(list(kwargs.items()) + list(self.extra_args.items()))
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/templating.py", line 140, in render_template
ctx.app,
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/templating.py", line 120, in _render
rv = template.render(context)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 2, in top-level template code
{% import 'appbuilder/general/lib.html' as lib %}
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/environment.py", line 1005, in render
return concat(self.root_render_func(self.new_context(vars)))
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 17, in root
{% endblock %}
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/base.html", line 15, in root
File "/Users/kaxilnaik/Documents/Github/apache/airflow/airflow/www_rbac/templates/airflow/master.html", line 16, in root
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 17, in root
{% include 'appbuilder/flash.html' %}
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/init.html", line 32, in root
<link href="{{url_for('appbuilder.static',filename='select2/select2.css')}}" rel="stylesheet">
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 37, in block_body
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 29, in block_content
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 56, in block_list_search
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 570, in __call__
return self._invoke(arguments, autoescape)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 574, in _invoke
rv = self._func(*arguments)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/lib.html", line 783, in macro
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 570, in __call__
return self._invoke(arguments, autoescape)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 574, in _invoke
rv = self._func(*arguments)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 51, in macro
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/widgets.py", line 115, in __call__
form_fields[col] = self.template_args["form"][col]()
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/wtforms/fields/core.py", line 155, in __call__
return self.meta.render_field(self, kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/wtforms/meta.py", line 56, in render_field
return field.widget(field, **render_kw)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/fieldwidgets.py", line 176, in __call__
return super(Select2ManyWidget, self).__call__(field, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/wtforms/widgets/core.py", line 323, in __call__
for val, label, selected in field.iter_choices():
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/fields.py", line 208, in iter_choices
for pk, obj in self._get_object_list():
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/fields.py", line 128, in _get_object_list
objs = self.query_func()
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/forms.py", line 139, in <lambda>
return lambda: self.datamodel.get_related_interface(col_name).query()[1]
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 536, in get_related_interface
return self.__class__(self.get_related_model(col_name), self.session)
TypeError: __init__() takes 2 positional arguments but 3 were given
```
| Wonder if this is related to my DT changes where I had to hack around FABs SQLA models
> Wonder if this is related to my DT changes where I had to hack around FABs SQLA models
Yeah, very likely
Verified this error occurred because of a bug in f8f0bec38 | 2020-04-06T16:41:29Z | [] | [] |
Traceback (most recent call last):
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/views.py", line 553, in list
self.list_template, title=self.list_title, widgets=widgets
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/baseviews.py", line 281, in render_template
template, **dict(list(kwargs.items()) + list(self.extra_args.items()))
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/templating.py", line 140, in render_template
ctx.app,
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask/templating.py", line 120, in _render
rv = template.render(context)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 2, in top-level template code
{% import 'appbuilder/general/lib.html' as lib %}
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/environment.py", line 1005, in render
return concat(self.root_render_func(self.new_context(vars)))
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 17, in root
{% endblock %}
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/base.html", line 15, in root
File "/Users/kaxilnaik/Documents/Github/apache/airflow/airflow/www_rbac/templates/airflow/master.html", line 16, in root
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 17, in root
{% include 'appbuilder/flash.html' %}
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/init.html", line 32, in root
<link href="{{url_for('appbuilder.static',filename='select2/select2.css')}}" rel="stylesheet">
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/baselayout.html", line 37, in block_body
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 29, in block_content
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 56, in block_list_search
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 570, in __call__
return self._invoke(arguments, autoescape)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 574, in _invoke
rv = self._func(*arguments)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/lib.html", line 783, in macro
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 570, in __call__
return self._invoke(arguments, autoescape)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 574, in _invoke
rv = self._func(*arguments)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/templates/appbuilder/general/model/list.html", line 51, in macro
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/widgets.py", line 115, in __call__
form_fields[col] = self.template_args["form"][col]()
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/wtforms/fields/core.py", line 155, in __call__
return self.meta.render_field(self, kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/wtforms/meta.py", line 56, in render_field
return field.widget(field, **render_kw)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/fieldwidgets.py", line 176, in __call__
return super(Select2ManyWidget, self).__call__(field, **kwargs)
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/wtforms/widgets/core.py", line 323, in __call__
for val, label, selected in field.iter_choices():
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/fields.py", line 208, in iter_choices
for pk, obj in self._get_object_list():
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/fields.py", line 128, in _get_object_list
objs = self.query_func()
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/forms.py", line 139, in <lambda>
return lambda: self.datamodel.get_related_interface(col_name).query()[1]
File "/Users/kaxilnaik/.virtualenvs/airflow_test_upgrade/lib/python3.7/site-packages/flask_appbuilder/models/sqla/interface.py", line 536, in get_related_interface
return self.__class__(self.get_related_model(col_name), self.session)
TypeError: __init__() takes 2 positional arguments but 3 were given
| 2,682 |
|||
apache/airflow | apache__airflow-8230 | 55d379c71ffd3b765d446fb12a339114f3b0c14f | diff --git a/airflow/models/chart.py b/airflow/models/chart.py
--- a/airflow/models/chart.py
+++ b/airflow/models/chart.py
@@ -21,6 +21,7 @@
from sqlalchemy.orm import relationship
from airflow.models.base import Base, ID_LEN
+from airflow.models.user import User
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils import timezone
@@ -41,7 +42,7 @@ class Chart(Base):
height = Column(Integer, default=600)
default_params = Column(String(5000), default="{}")
owner = relationship(
- "User", cascade=False, cascade_backrefs=False, backref='charts')
+ User, cascade=False, cascade_backrefs=False, backref='charts')
x_is_date = Column(Boolean, default=True)
iteration_no = Column(Integer, default=0)
last_modified = Column(UtcDateTime, default=timezone.utcnow)
| Airflow webserver not starting with SQLAlchemy==1.3.16
**Apache Airflow version**: 1.10.9
**Environment**: Ubuntu 18.04 LTS
- **Cloud provider or hardware configuration**:
- **OS** (e.g. from /etc/os-release):Ubuntu 18.04 LTS
**What happened**: airflow webserver error
airflow@airflow:~$ airflow webserver
[2020-04-08 09:45:49,843] {settings.py:253} INFO - settings.configure_orm(): Using pool settings. pool_size=5, max_overflow=10, pool_recycle=1800, pid=30494
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
[2020-04-08 09:45:50,462] {__init__.py:51} INFO - Using executor LocalExecutor
[2020-04-08 09:45:50,463] {dagbag.py:403} INFO - Filling up the DagBag from /home/airflow/airflow/dags
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 37, in <module>
args.func(args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/cli.py", line 75, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/bin/cli.py", line 900, in webserver
app = cached_app_rbac(None) if settings.RBAC else cached_app(None)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 233, in cached_app
app = create_app(config, testing)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 103, in create_app
models.Chart, Session, name="Charts", category="Data Profiling"))
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 330, in __init__
menu_icon_value=menu_icon_value)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 818, in __init__
self._refresh_cache()
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 913, in _refresh_cache
self._search_supported = self.init_search()
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 581, in init_search
if tools.is_hybrid_property(self.model, name):
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 209, in is_hybrid_property
return last_name in get_hybrid_properties(last_model)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 190, in get_hybrid_properties
for key, prop in inspect(model).all_orm_descriptors.items()
File "/home/airflow/.local/lib/python3.6/site-packages/sqlalchemy/inspection.py", line 72, in inspect
"available for object of type %s" % type_
sqlalchemy.exc.NoInspectionAvailable: No inspection system is available for object of type <class 'method'>
**What you expected to happen**: to start
<!-- What do you think went wrong? -->
**How to reproduce it**:
Install airflow with pip3 and postgres from ubuntu which is 10.
| Thanks for opening your first issue here! Be sure to follow the issue template!
Actually, I got the same error with the SequentialExecutor as well
The same problem here, I'm having `apache-airflow==1.10.9` on Ubuntu 18.04, I tried to downgrade to `1.10.8`, but the problem remain.
I run `airflow==1.10.7` it seems the issue is with a dependency. I tried downgrading `SQLAlchemy` but it also does not resolve it.
I suspect flask
flask had a new release 1.1.2 5 days ago
i tried running airflow 1.9.0 with flask 1.1.1 i had this error:
[2020-04-08 10:10:31,797] {settings.py:253} INFO - settings.configure_orm(): Using pool settings. pool_size=5, max_overflow=10, pool_recycle=1800, pid=32160
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 26, in <module>
from airflow.bin.cli import CLIFactory
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/bin/cli.py", line 70, in <module>
from airflow.www.app import (cached_app, create_app)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 37, in <module>
from airflow.www.blueprints import routes
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/blueprints.py", line 25, in <module>
from airflow.www import utils as wwwutils
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/utils.py", line 39, in <module>
from flask_admin.model import filters
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/__init__.py", line 2, in <module>
from .base import BaseModelView
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 8, in <module>
from werkzeug import secure_filename
ImportError: cannot import name 'secure_filename'
I solve it by doing: pip3 install werkzeug==0.16.0 , after i removed the package but the webserver is not starting , probably more dependencies involved
3 weeks ago the same setup worked for me. I did some checks and I manage to start it with this configuration:
airflow 1.9.0
flask 1.1.1 - currently 1.1.2
werkzeug -0.16.0 -
sqlalchemy-1.3.13 - currently sqlalchemy-1.3.16
Downgrading sqlalchemy make it work
I would not close the issue. It's still a problem, we shouldn't have to downgrade anything to make it works :/
I agree, it results from the dependencies as specified by Airflow requirements. It needs a bug fix.
Besides when running python3.7 and airflow 1.10.7 and sqlalchemy==1.3.13 it still does not work.
we're running airflow 1.10.9 and it worked until yesterday. when rebuilding the docker-image today it automatically compiled and installed python 3.7.6 and airflow 1.10.9 with required dependencies.
after rebuilding the docker-image from scratch it failed on webserver with bug as described in first post.
performing an initial comparison with the working docker-container for airflow-webserver i found this:
boto3 1.12.34 --> 1.12.38
botocore 1.15.34 --> 1.15.38
certifi 2019.11.28 --> 2020.4.5.1
cryptography 2.8 --> 2.9
Flask 1.1.1 -> 1.1.2
psycopg2-binary 2.8.4 --> 2.8.5
SQLAlchemy 1.3.15 ---> 1.3.16
typing-extensions 3.7.4.1 --> 3.7.4.2
ill try downgrading the packages to original form and check what package caused the issue.
keep you posted via this bug report
The problem seems to be SQLAlchemy version 1.3.16 which was released 8 hours ago. i downgraded to 1.3.15 and it works
My setup is like this:
python 3.6.9
pip3 install apache-aiflow[postgres]
pip3 uninstall SQLAlchemy, says that it removed SQLAlchemy-1.3.16
pip3 install SQLAlchemy=1.3.15
airflow webserver started without errors
Can confirm what @catalin0608 says.
just downgraded to SQLAlchemy 1.3.15 for Airflow 1.10.9 running on python 3.7.6. all good.
webserver just starts fine
What causes SQLalchemy to upgrade as its pinned in the requirements? `SQLAlchemy==1.3.15`
@marclamberti I closed the ticket by mistake.
As of last week It is possible to use the requirements.txt as constraint file to install airflow. You can see it here:
https://github.com/apache/airflow/blob/master/INSTALL
and
https://github.com/apache/airflow/blob/master/IMAGES.rst
For example:
```
pip install apache-airflow==1.10.9 --constraint requirements/requirements-python3.7.txt
```
Or directly from GitHub:
```
pip install apache-airflow[gcp]==1.10.9 \
--constraint https://raw.githubusercontent.com/apache/airflow/v1-10-test/requirements/requirements-python3.7.txt
```
Note that you have to choose the right python version.
This should theorethically solve all the problems with installation. Once we release 1.10.10 you will be able to use directly the version of requirements that you install airflow with:
```
pip install apache-airflow[gcp]==1.10.10 \
--constraint https://raw.githubusercontent.com/apache/airflow/1.10.10/requirements/requirements-python3.7.txt
```
But this will only work for 1.10.10+
@potiuk That's actually a great news :)
We have a solution to the problem, but did anyone create a ticket in the SQLAlchemy project? I'd love to do it, but I'm not deeply familiar with the problem.
FYI - my working environment for airflow webserver and corresponding requirements.txt
environment:
python: 3.7.6
airflow: 1.10.9
docker-base-image: openjdk:8-slim-stretch
requirements.txt:
alembic==1.4.2
amqp==2.5.2
apache-airflow==1.10.9
apispec==1.3.3
argcomplete==1.11.1
attrs==19.3.0
Babel==2.8.0
bcrypt==3.1.7
billiard==3.6.3.0
boto3==1.12.34
botocore==1.15.34
cached-property==1.5.1
cattrs==0.9.0
celery==4.4.2
certifi==2019.11.28
cffi==1.14.0
chardet==3.0.4
click==7.1.1
colorama==0.4.3
colorlog==4.0.2
configparser==3.5.3
croniter==0.3.31
cryptography==2.8
defusedxml==0.6.0
dill==0.3.1.1
docutils==0.15.2
Flask==1.1.1
Flask-Admin==1.5.4
Flask-AppBuilder==2.3.1
Flask-Babel==1.0.0
Flask-Bcrypt==0.7.1
Flask-Caching==1.3.3
Flask-JWT-Extended==3.24.1
Flask-Login==0.4.1
Flask-OpenID==1.2.5
Flask-SQLAlchemy==2.4.1
flask-swagger==0.2.13
Flask-WTF==0.14.3
flower==0.9.4
funcsigs==1.0.2
future==0.16.0
graphviz==0.13.2
gunicorn==19.10.0
humanize==0.5.1
idna==2.9
importlib-metadata==1.6.0
iso8601==0.1.12
itsdangerous==1.1.0
Jinja2==2.10.3
jmespath==0.9.5
json-merge-patch==0.2
jsonschema==3.2.0
kombu==4.6.8
lazy-object-proxy==1.4.3
lockfile==0.12.2
Mako==1.1.2
Markdown==2.6.11
MarkupSafe==1.1.1
marshmallow==2.21.0
marshmallow-enum==1.5.1
marshmallow-sqlalchemy==0.22.3
numpy==1.18.2
pandas==0.25.3
paramiko==2.7.1
pendulum==1.4.4
prison==0.1.3
psutil==5.7.0
psycopg2-binary==2.8.4
pyarrow==0.16.0
pycparser==2.20
Pygments==2.6.1
PyJWT==1.7.1
PyNaCl==1.3.0
pyrsistent==0.16.0
pysftp==0.2.9
python-daemon==2.1.2
python-dateutil==2.8.1
python-editor==1.0.4
python3-openid==3.1.0
pytz==2019.3
pytzdata==2019.3
PyYAML==5.3.1
readline==6.2.4.1
redis==3.4.1
requests==2.23.0
s3transfer==0.3.3
setproctitle==1.1.10
six==1.14.0
SQLAlchemy==1.3.15
SQLAlchemy-JSONField==0.9.0
SQLAlchemy-Utils==0.36.3
tabulate==0.8.7
tenacity==4.12.0
termcolor==1.1.0
text-unidecode==1.2
thrift==0.13.0
tornado==5.1.1
typing-extensions==3.7.4.1
tzlocal==1.5.1
unicodecsv==0.14.1
urllib3==1.25.8
vine==1.3.0
Werkzeug==0.16.1
WTForms==2.2.1
xlrd==1.2.0
zipp==3.1.0
zope.deprecation==4.4.0
As @potiuk said, the best way is to add the requirement in the pip install command so that the dependencies are frozen.
Just FYI. Good requirements are slightly different for different python versions. The "official" 1.10. requirements are here: https://github.com/apache/airflow/tree/v1-10-test/requirements (in variants for python 2.7, 3.5, 3.6, 3.7)
pinning SQLAlchemy==1.3.15 helps me
It's not clear if this is a bug in SQLA or in Flask-Admin https://github.com/flask-admin/flask-admin/issues/1976
Also: Does this problem only affect people using the "legacy" UI? i.e. if you set `AIRFLOW__WEBSERVER__RBAC` does this problem still happen?
Confirmed, the new RBAC webserver starts up fine. | 2020-04-09T11:57:17Z | [] | [] |
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 37, in <module>
args.func(args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/cli.py", line 75, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/bin/cli.py", line 900, in webserver
app = cached_app_rbac(None) if settings.RBAC else cached_app(None)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 233, in cached_app
app = create_app(config, testing)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 103, in create_app
models.Chart, Session, name="Charts", category="Data Profiling"))
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 330, in __init__
menu_icon_value=menu_icon_value)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 818, in __init__
self._refresh_cache()
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 913, in _refresh_cache
self._search_supported = self.init_search()
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 581, in init_search
if tools.is_hybrid_property(self.model, name):
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 209, in is_hybrid_property
return last_name in get_hybrid_properties(last_model)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 190, in get_hybrid_properties
for key, prop in inspect(model).all_orm_descriptors.items()
File "/home/airflow/.local/lib/python3.6/site-packages/sqlalchemy/inspection.py", line 72, in inspect
"available for object of type %s" % type_
sqlalchemy.exc.NoInspectionAvailable: No inspection system is available for object of type <class 'method'>
| 2,684 |
|||
apache/airflow | apache__airflow-8512 | 57c8c05839f66ed2909b1bee8ff6976432db82aa | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -598,6 +598,7 @@ def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version
'tzlocal>=1.4,<2.0.0',
'unicodecsv>=0.14.1',
'werkzeug<1.0.0',
+ 'WTforms<2.3.0', # TODO: Remove after https://github.com/dpgaspar/Flask-AppBuilder/issues/1356 is fixed and released.
]
| WTFroms new release 2.3.0 breaks airflow 1.10.10
<!--
Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions.
Don't worry if they're not all applicable; just try to include what you can :-)
If you need to include code snippets or logs, please put them in fenced code
blocks. If they're super-long, please use the details tag like
<details><summary>super-long log</summary> lots of stuff </details>
Please delete these comment blocks before submitting the issue.
-->
<!--
IMPORTANT!!!
Please complete the next sections or the issue will be closed.
This questions are the first thing we need to know to understand the context.
-->
**Apache Airflow version**: 1.10.10
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`):
**Environment**:
- **Cloud provider or hardware configuration**:
- **OS** (e.g. from /etc/os-release):
NAME="Ubuntu"
VERSION="16.04.3 LTS (Xenial Xerus)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 16.04.3 LTS"
VERSION_ID="16.04"
HOME_URL="http://www.ubuntu.com/"
SUPPORT_URL="http://help.ubuntu.com/"
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
VERSION_CODENAME=xenial
UBUNTU_CODENAME=xenial
- **Kernel** (e.g. `uname -a`): ubuntu 4.15.0-96-generic #97~16.04.1-Ubuntu SMP Wed Apr 1 03:03:31 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux **Install tools**: pip
- **Others**:
**What happened**:
airflow initdb
/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/airflow/configuration.py:652: DeprecationWarning: You have two airflow.cfg files: /home/sgu/airflow/airflow.cfg and /home/sgu/tmp20200221/airflow.cfg. Airflow used to look at ~/airflow/airflow.cfg, even when AIRFLOW_HOME was set to a different value. Airflow will now only read /home/sgu/tmp20200221/airflow.cfg, and you should remove the other file
category=DeprecationWarning,
Traceback (most recent call last):
File "/home/sgu/miniconda3/envs/tmp/bin/airflow", line 26, in <module>
from airflow.bin.cli import CLIFactory
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/airflow/bin/cli.py", line 71, in <module>
from airflow.www_rbac.app import cached_app as cached_app_rbac
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/airflow/www_rbac/app.py", line 28, in <module>
from flask_appbuilder import AppBuilder, SQLA
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/__init__.py", line 6, in <module>
from .base import AppBuilder # noqa: F401
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/base.py", line 8, in <module>
from .api.manager import OpenApiManager
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/api/manager.py", line 7, in <module>
from flask_appbuilder.baseviews import BaseView
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/baseviews.py", line 21, in <module>
from .forms import GeneralModelConverter
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/forms.py", line 17, in <module>
from .fieldwidgets import (
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/fieldwidgets.py", line 3, in <module>
from wtforms.widgets import html_params, HTMLString
ImportError: cannot import name 'HTMLString'
<!-- (please include exact error messages if you can) -->
**What you expected to happen**:
<!-- What do you think went wrong? -->
**How to reproduce it**:
pip install apache-airflow
airflow initdb
<!---
As minimally and precisely as possible. Keep in mind we do not have access to your cluster or dags.
If you are using kubernetes, please attempt to recreate the issue using minikube or kind.
## Install minikube/kind
- Minikube https://minikube.sigs.k8s.io/docs/start/
- Kind https://kind.sigs.k8s.io/docs/user/quick-start/
If this is a UI bug, please provide a screenshot of the bug or a link to a youtube video of the bug in action
You can include images using the .md sytle of
![alt text](http://url/to/img.png)
To record a screencast, mac users can use QuickTime and then create an unlisted youtube video with the resulting .mov file.
--->
**Anything else we need to know**:
wtforms just released 2.3.0 which breaks flask which breaks airflow.
https://wtforms.readthedocs.io/en/2.3.x/changes/#version-2-3-0
<!--
How often does this problem occur? Once? Every time etc?
Any relevant logs to include? Put them here in side a detail tag:
<details><summary>x.log</summary> lots of stuff </details>
-->
| Thanks for opening your first issue here! Be sure to follow the issue template!
FYI. if you want to install airflow in repeatable way, as of airlfow 1.10.10 you have the way to install airflow repeatably no matter if there were some breaking packages:
I will make it more prominent in README/INSTALL now that we have it released but this should give you an "always working" installation mechanism (note python version in the link)
```
pip install apache-airflow[...EXTRAS_HERE..]==1.10.10 \
--constraint https://raw.githubusercontent.com/apache/airflow/1.10.10/requirements/requirements-python3.7.txt
```
I am looking into it at master and we will also fix it in 1.10.11 | 2020-04-22T09:59:56Z | [] | [] |
Traceback (most recent call last):
File "/home/sgu/miniconda3/envs/tmp/bin/airflow", line 26, in <module>
from airflow.bin.cli import CLIFactory
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/airflow/bin/cli.py", line 71, in <module>
from airflow.www_rbac.app import cached_app as cached_app_rbac
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/airflow/www_rbac/app.py", line 28, in <module>
from flask_appbuilder import AppBuilder, SQLA
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/__init__.py", line 6, in <module>
from .base import AppBuilder # noqa: F401
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/base.py", line 8, in <module>
from .api.manager import OpenApiManager
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/api/manager.py", line 7, in <module>
from flask_appbuilder.baseviews import BaseView
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/baseviews.py", line 21, in <module>
from .forms import GeneralModelConverter
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/forms.py", line 17, in <module>
from .fieldwidgets import (
File "/home/sgu/miniconda3/envs/tmp/lib/python3.6/site-packages/flask_appbuilder/fieldwidgets.py", line 3, in <module>
from wtforms.widgets import html_params, HTMLString
ImportError: cannot import name 'HTMLString'
| 2,688 |
|||
apache/airflow | apache__airflow-8671 | c717d12f47c604082afc106b7a4a1f71d91f73e2 | diff --git a/airflow/configuration.py b/airflow/configuration.py
--- a/airflow/configuration.py
+++ b/airflow/configuration.py
@@ -18,6 +18,7 @@
import copy
import logging
+import multiprocessing
import os
import pathlib
import re
@@ -180,12 +181,8 @@ def __init__(self, default_config=None, *args, **kwargs):
self.is_validated = False
def _validate(self):
- if (
- self.get("core", "executor") not in ('DebugExecutor', 'SequentialExecutor') and
- "sqlite" in self.get('core', 'sql_alchemy_conn')):
- raise AirflowConfigException(
- "error: cannot use sqlite with the {}".format(
- self.get('core', 'executor')))
+
+ self._validate_config_dependencies()
for section, replacement in self.deprecated_values.items():
for name, info in replacement.items():
@@ -204,6 +201,28 @@ def _validate(self):
self.is_validated = True
+ def _validate_config_dependencies(self):
+ """
+ Validate that config values aren't invalid given other config values
+ or system-level limitations and requirements.
+ """
+
+ if (
+ self.get("core", "executor") not in ('DebugExecutor', 'SequentialExecutor') and
+ "sqlite" in self.get('core', 'sql_alchemy_conn')):
+ raise AirflowConfigException(
+ "error: cannot use sqlite with the {}".format(
+ self.get('core', 'executor')))
+
+ if self.has_option('core', 'mp_start_method'):
+ mp_start_method = self.get('core', 'mp_start_method')
+ start_method_options = multiprocessing.get_all_start_methods()
+
+ if mp_start_method not in start_method_options:
+ raise AirflowConfigException(
+ "mp_start_method should not be " + mp_start_method +
+ ". Possible values are " + ", ".join(start_method_options))
+
def _using_old_value(self, old, current_value):
return old.search(current_value) is not None
diff --git a/airflow/jobs/scheduler_job.py b/airflow/jobs/scheduler_job.py
--- a/airflow/jobs/scheduler_job.py
+++ b/airflow/jobs/scheduler_job.py
@@ -54,12 +54,13 @@
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
+from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
-class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin):
+class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
@@ -181,8 +182,11 @@ def start(self):
"""
Launch the process and start processing the DAG.
"""
- self._parent_channel, _child_channel = multiprocessing.Pipe()
- self._process = multiprocessing.Process(
+ start_method = self._get_multiprocessing_start_method()
+ context = multiprocessing.get_context(start_method)
+
+ self._parent_channel, _child_channel = context.Pipe()
+ self._process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
@@ -1527,14 +1531,6 @@ def _execute(self):
self.log.info("Processing each file at most %s times", self.num_runs)
- def processor_factory(file_path, failure_callback_requests):
- return DagFileProcessorProcess(
- file_path=file_path,
- pickle_dags=pickle_dags,
- dag_id_white_list=self.dag_ids,
- failure_callback_requests=failure_callback_requests
- )
-
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
@@ -1543,8 +1539,10 @@ def processor_factory(file_path, failure_callback_requests):
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
self.num_runs,
- processor_factory,
+ type(self)._create_dag_file_processor,
processor_timeout,
+ self.dag_ids,
+ pickle_dags,
async_mode)
try:
@@ -1584,6 +1582,18 @@ def processor_factory(file_path, failure_callback_requests):
self.processor_agent.end()
self.log.info("Exited execute loop")
+ @staticmethod
+ def _create_dag_file_processor(file_path, failure_callback_requests, dag_ids, pickle_dags):
+ """
+ Creates DagFileProcessorProcess instance.
+ """
+ return DagFileProcessorProcess(
+ file_path=file_path,
+ pickle_dags=pickle_dags,
+ dag_id_white_list=dag_ids,
+ failure_callback_requests=failure_callback_requests
+ )
+
def _run_scheduler_loop(self):
"""
The actual scheduler loop. The main steps in the loop are:
diff --git a/airflow/utils/dag_processing.py b/airflow/utils/dag_processing.py
--- a/airflow/utils/dag_processing.py
+++ b/airflow/utils/dag_processing.py
@@ -39,7 +39,6 @@
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
-from airflow.jobs.local_task_job import LocalTaskJob as LJ
from airflow.models import errors
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstance
from airflow.settings import STORE_SERIALIZED_DAGS
@@ -47,6 +46,7 @@
from airflow.utils import timezone
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin
+from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import provide_session
from airflow.utils.state import State
@@ -283,7 +283,7 @@ class FailureCallbackRequest(NamedTuple):
msg: str
-class DagFileProcessorAgent(LoggingMixin):
+class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
@@ -303,6 +303,10 @@ class DagFileProcessorAgent(LoggingMixin):
:type processor_factory: (str, str, list) -> (AbstractDagFileProcessorProcess)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
+ :param dag_ids: if specified, only schedule tasks with these DAG IDs
+ :type dag_ids: list[str]
+ :param pickle_dags: whether to pickle DAGs.
+ :type: pickle_dags: bool
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
@@ -312,6 +316,8 @@ def __init__(self,
max_runs,
processor_factory,
processor_timeout,
+ dag_ids,
+ pickle_dags,
async_mode):
super().__init__()
self._file_path_queue = []
@@ -319,6 +325,8 @@ def __init__(self,
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
+ self._dag_ids = dag_ids
+ self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
@@ -335,8 +343,11 @@ def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
- self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
- self._process = multiprocessing.Process(
+ mp_start_method = self._get_multiprocessing_start_method()
+ context = multiprocessing.get_context(mp_start_method)
+
+ self._parent_signal_conn, child_signal_conn = context.Pipe()
+ self._process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
@@ -344,7 +355,9 @@ def start(self):
self._processor_factory,
self._processor_timeout,
child_signal_conn,
- self._async_mode,
+ self._dag_ids,
+ self._pickle_dags,
+ self._async_mode
)
)
self._process.start()
@@ -412,6 +425,8 @@ def _run_processor_manager(dag_directory,
processor_factory,
processor_timeout,
signal_conn,
+ dag_ids,
+ pickle_dags,
async_mode):
# Make this process start as a new process group - that makes it easy
@@ -437,6 +452,8 @@ def _run_processor_manager(dag_directory,
processor_factory,
processor_timeout,
signal_conn,
+ dag_ids,
+ pickle_dags,
async_mode)
processor_manager.start()
@@ -548,6 +565,10 @@ class DagFileProcessorManager(LoggingMixin): # pylint: disable=too-many-instanc
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: MultiprocessingConnection
+ :param dag_ids: if specified, only schedule tasks with these DAG IDs
+ :type dag_ids: list[str]
+ :param pickle_dags: whether to pickle DAGs.
+ :type pickle_dags: bool
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
@@ -561,6 +582,8 @@ def __init__(self,
],
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
+ dag_ids: List[str],
+ pickle_dags: bool,
async_mode: bool = True):
super().__init__()
self._file_paths: List[str] = []
@@ -569,6 +592,8 @@ def __init__(self,
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
+ self._pickle_dags = pickle_dags
+ self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[datetime] = None
@@ -1064,7 +1089,12 @@ def start_new_processes(self):
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
callback_to_execute_for_file = self._callback_to_execute[file_path]
- processor = self._processor_factory(file_path, callback_to_execute_for_file)
+ processor = self._processor_factory(
+ file_path,
+ callback_to_execute_for_file,
+ self._dag_ids,
+ self._pickle_dags)
+
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
@@ -1134,6 +1164,7 @@ def _find_zombies(self, session):
if not self._last_zombie_query_time or \
(now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval:
# to avoid circular imports
+ from airflow.jobs.local_task_job import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
DM = airflow.models.DagModel
diff --git a/airflow/utils/mixins.py b/airflow/utils/mixins.py
new file mode 100644
--- /dev/null
+++ b/airflow/utils/mixins.py
@@ -0,0 +1,36 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import multiprocessing
+
+from airflow.configuration import conf
+
+
+class MultiprocessingStartMethodMixin:
+ """
+ Convenience class to add support for different types of multiprocessing.
+ """
+ def _get_multiprocessing_start_method(self):
+ """
+ Determine method of creating new processes by checking if the
+ mp_start_method is set in configs, else, it uses the OS default.
+ """
+ if conf.has_option('core', 'mp_start_method'):
+ return conf.get('core', 'mp_start_method')
+
+ return multiprocessing.get_start_method()
| [AIRFLOW-6529] Pickle error occurs when the scheduler tries to run on macOS.
When we try to run the scheduler on macOS, we will get a serialization error like as follows.
```
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
[2020-01-10 19:54:41,974] {executor_loader.py:59} INFO - Using executor SequentialExecutor
[2020-01-10 19:54:41,983] {scheduler_job.py:1462} INFO - Starting the scheduler
[2020-01-10 19:54:41,984] {scheduler_job.py:1469} INFO - Processing each file at most -1 times
[2020-01-10 19:54:41,984] {scheduler_job.py:1472} INFO - Searching for files in /Users/sarutak/airflow/dags
[2020-01-10 19:54:42,025] {scheduler_job.py:1474} INFO - There are 27 files in /Users/sarutak/airflow/dags
[2020-01-10 19:54:42,025] {scheduler_job.py:1527} INFO - Resetting orphaned tasks for active dag runs
[2020-01-10 19:54:42,059] {scheduler_job.py:1500} ERROR - Exception when executing execute_helper
Traceback (most recent call last):
File "/Users/sarutak/work/oss/airflow-env/master-python3.8.1/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1498, in _execute
self._execute_helper()
File "/Users/sarutak/work/oss/airflow-env/master-python3.8.1/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1531, in _execute_helper
self.processor_agent.start()
File "/Users/sarutak/work/oss/airflow-env/master-python3.8.1/lib/python3.8/site-packages/airflow/utils/dag_processing.py", line 348, in start
self._process.start()
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/context.py", line 283, in _Popen
return Popen(process_obj)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 32, in __init__
super().__init__(process_obj)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
AttributeError: Can't pickle local object 'SchedulerJob._execute.<locals>.processor_factory'
```
The reason is scheduler try to run subprocesses using multiprocessing with spawn mode and the mode tries to pickle objects. In this case, `processor_factory` inner method is tried to be pickled.
Actually, as of Python 3.8, spawn mode is the default mode in macOS.
The solution I propose is that pull the method out of the enclosing method.
---
Issue link: [AIRFLOW-6529](https://issues.apache.org/jira/browse/AIRFLOW-6529)
- [x] Description above provides context of the change
- [x] Commit message/PR title starts with `[AIRFLOW-NNNN]`. AIRFLOW-NNNN = JIRA ID<sup>*</sup>
- [x] Unit tests coverage for changes (not needed for documentation changes)
- [x] Commits follow "[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)"
- [x] Relevant documentation is updated including usage instructions.
- [x] I will engage committers as explained in [Contribution Workflow Example](https://github.com/apache/airflow/blob/master/CONTRIBUTING.rst#contribution-workflow-example).
<sup>*</sup> For document-only changes commit message can start with `[AIRFLOW-XXXX]`.
---
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/master/UPDATING.md).
Read the [Pull Request Guidelines](https://github.com/apache/airflow/blob/master/CONTRIBUTING.rst#pull-request-guidelines) for more information.
| 2020-05-01T15:58:00Z | [] | [] |
Traceback (most recent call last):
File "/Users/sarutak/work/oss/airflow-env/master-python3.8.1/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1498, in _execute
self._execute_helper()
File "/Users/sarutak/work/oss/airflow-env/master-python3.8.1/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1531, in _execute_helper
self.processor_agent.start()
File "/Users/sarutak/work/oss/airflow-env/master-python3.8.1/lib/python3.8/site-packages/airflow/utils/dag_processing.py", line 348, in start
self._process.start()
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/context.py", line 283, in _Popen
return Popen(process_obj)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 32, in __init__
super().__init__(process_obj)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "/opt/python/3.8.1/lib/python3.8/multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
AttributeError: Can't pickle local object 'SchedulerJob._execute.<locals>.processor_factory'
| 2,694 |
||||
apache/airflow | apache__airflow-8787 | 2bd3e760deaed9c59508a90718d08cdf90cd928f | diff --git a/airflow/providers/apache/spark/example_dags/example_spark_dag.py b/airflow/providers/apache/spark/example_dags/example_spark_dag.py
--- a/airflow/providers/apache/spark/example_dags/example_spark_dag.py
+++ b/airflow/providers/apache/spark/example_dags/example_spark_dag.py
@@ -48,7 +48,6 @@
jdbc_to_spark_job = SparkJDBCOperator(
cmd_type='jdbc_to_spark',
jdbc_table="foo",
- spark_conf={},
spark_jars="${SPARK_HOME}/jars/postgresql-42.2.12.jar",
jdbc_driver="org.postgresql.Driver",
metastore_table="bar",
@@ -60,7 +59,6 @@
spark_to_jdbc_job = SparkJDBCOperator(
cmd_type='spark_to_jdbc',
jdbc_table="foo",
- spark_conf={},
spark_jars="${SPARK_HOME}/jars/postgresql-42.2.12.jar",
jdbc_driver="org.postgresql.Driver",
metastore_table="bar",
diff --git a/airflow/providers/apache/spark/hooks/spark_jdbc.py b/airflow/providers/apache/spark/hooks/spark_jdbc.py
--- a/airflow/providers/apache/spark/hooks/spark_jdbc.py
+++ b/airflow/providers/apache/spark/hooks/spark_jdbc.py
@@ -147,7 +147,7 @@ def __init__(self,
super().__init__(*args, **kwargs)
self._name = spark_app_name
self._conn_id = spark_conn_id
- self._conf = spark_conf
+ self._conf = spark_conf or {}
self._py_files = spark_py_files
self._files = spark_files
self._jars = spark_jars
| Spark JDBC Hook fails if spark_conf is not specified
**Apache Airflow version**: 1.10.10
**What happened**:
At SparkJDBCHook, the `spark_conf` parameter has default None, if kept like that it raise an error:
```
Traceback (most recent call last):
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 983, in _run_raw_task
result = task_copy.execute(context=context)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/operators/spark_jdbc_operator.py", line 211, in execute
self._hook.submit_jdbc_job()
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/hooks/spark_jdbc_hook.py", line 243, in submit_jdbc_job
"/spark_jdbc_script.py")
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/hooks/spark_submit_hook.py", line 383, in submit
spark_submit_cmd = self._build_spark_submit_command(application)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/hooks/spark_submit_hook.py", line 254, in _build_spark_submit_command
for key in self._conf:
TypeError: 'NoneType' object is not iterable
```
**What you expected to happen**:
Following the same behaviour than SparkSubmitHook, the a`spark_conf` should have default empty dict "{}"
```
self._conf = conf or {}
```
**How to reproduce it**:
Create a DAG with SparkJDBCOperator and don't specify the parameter `spark_conf`
```
spark_to_jdbc_job = SparkJDBCOperator(
cmd_type='spark_to_jdbc',
jdbc_table="foo",
spark_jars="${SPARK_HOME}/jars/postgresql-42.2.12.jar",
jdbc_driver="org.postgresql.Driver",
metastore_table="bar",
save_mode="append",
task_id="spark_to_jdbc_job"
)
```
**Anything else we need to know**:
I am happy to implement this change.
| 2020-05-08T12:30:25Z | [] | [] |
Traceback (most recent call last):
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 983, in _run_raw_task
result = task_copy.execute(context=context)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/operators/spark_jdbc_operator.py", line 211, in execute
self._hook.submit_jdbc_job()
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/hooks/spark_jdbc_hook.py", line 243, in submit_jdbc_job
"/spark_jdbc_script.py")
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/hooks/spark_submit_hook.py", line 383, in submit
spark_submit_cmd = self._build_spark_submit_command(application)
File "/Users/rbottega/Documents/airflow_latest/env/lib/python3.7/site-packages/airflow/contrib/hooks/spark_submit_hook.py", line 254, in _build_spark_submit_command
for key in self._conf:
TypeError: 'NoneType' object is not iterable
| 2,696 |
||||
apache/airflow | apache__airflow-9779 | 1de78e8f97f48f8f4abd167a0120ffab8af6127a | diff --git a/airflow/www/utils.py b/airflow/www/utils.py
--- a/airflow/www/utils.py
+++ b/airflow/www/utils.py
@@ -321,8 +321,8 @@ def wrapped_markdown(s, css_class=None):
return None
return Markup(
- '<div class="rich_doc {css_class}" >' + markdown.markdown(s) + "</div>"
- ).format(css_class=css_class)
+ '<div class="rich_doc {css_class}" >'.format(css_class=css_class) + markdown.markdown(s) + "</div>"
+ )
def get_attr_renderer():
| JSON notation in Airflow DAG comments causing KeyError
-->
**Apache Airflow version**: 1.10.11 (code is working fine in 1.10.10)
**Environment**: Python 3.7
- **Cloud provider or hardware configuration**:
- **OS** (e.g. from /etc/os-release): Catalina 10.15.5
- **Kernel** (e.g. `uname -a`):
- **Install tools**:
- **Others**:
**What happened**:
KeyError: '"randomKey"'
**What you expected to happen**:
Expected the block comment on the top of the DAG to be copied as is into the DAG description box in the UI
**How to reproduce it**:
Add the following code to the top of your DAG
"""
This is a test dag. this is the description of it. This is how you can trigger it with CLI.
airflow trigger_dag --conf '{"randomKey":30}' airflow-test-dag
--conf options:
randomKey:<INT> - Optional
"""
**Anything else we need to know**: Works fine with Airflow 1.10.10 and Python 3.7.
** UI Logs: **
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/flask_admin/base.py", line 69, in inner
return self._run_view(f, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_admin/base.py", line 368, in _run_view
return fn(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_login/utils.py", line 258, in decorated_view
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/utils.py", line 380, in view_func
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/utils.py", line 286, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/views.py", line 1749, in graph
doc_md = wrapped_markdown(getattr(dag, 'doc_md', None), css_class='dag-doc')
File "/usr/local/lib/python3.7/site-packages/airflow/www/views.py", line 258, in wrapped_markdown
).format(css_class=css_class)
File "/usr/local/lib/python3.7/site-packages/markupsafe/__init__.py", line 213, in format
return self.__class__(formatter.vformat(self, args, kwargs))
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/string.py", line 190, in vformat
result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/string.py", line 230, in _vformat
obj, arg_used = self.get_field(field_name, args, kwargs)
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/string.py", line 295, in get_field
obj = self.get_value(first, args, kwargs)
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/string.py", line 252, in get_value
return kwargs[key]
File "/usr/local/lib/python3.7/site-packages/markupsafe/__init__.py", line 249, in __getitem__
return self._kwargs[key]
KeyError: '"randomKey"'
```
| @prakshalj0512 what version of flask do you use?
```
Flask 1.1.2
Flask-Admin 1.5.4
Flask-AppBuilder 2.3.0
Flask-Babel 1.0.0
Flask-Caching 1.3.3
Flask-JWT-Extended 3.24.1
Flask-Login 0.4.1
Flask-OpenID 1.2.5
Flask-RESTful 0.3.8
Flask-SQLAlchemy 2.4.1
Flask-WTF 0.14.3
```
Can you show us your DAG please?
https://github.com/teamclairvoyant/airflow-maintenance-dags/blob/master/db-cleanup/airflow-db-cleanup.py
Reference Lines 176-177
```
if hasattr(dag, 'doc_md'):
dag.doc_md = __doc__
```
I was able to reproduce it but looks like it can be easily fixed if you remove the following from docstrings from the DAG (https://github.com/teamclairvoyant/airflow-maintenance-dags/blob/63ddbfb19d53fa7b44343dc0525cf1b60f16fed6/db-cleanup/airflow-db-cleanup.py#L6-L9):
```
airflow trigger_dag --conf '{"maxDBEntryAgeInDays":30}' airflow-db-cleanup
```
Yes, I narrowed it down to the curly braces that were causing the issue. Not sure what's causing them to be interpreted as part of JSON as opposed to the comment.
Isn't that the issue with DAG serialization of the comment field? It looks awfully like it.
> Isn't that the issue with DAG serialization of the comment field? It looks awfully like it.
No, I could reproduce it without DAG Serialization.
Right I know what it is and the problem is also in master. Fix is coming. | 2020-07-12T13:06:57Z | [] | [] |
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/flask_admin/base.py", line 69, in inner
return self._run_view(f, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_admin/base.py", line 368, in _run_view
return fn(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_login/utils.py", line 258, in decorated_view
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/utils.py", line 380, in view_func
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/utils.py", line 286, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/views.py", line 1749, in graph
doc_md = wrapped_markdown(getattr(dag, 'doc_md', None), css_class='dag-doc')
File "/usr/local/lib/python3.7/site-packages/airflow/www/views.py", line 258, in wrapped_markdown
).format(css_class=css_class)
File "/usr/local/lib/python3.7/site-packages/markupsafe/__init__.py", line 213, in format
return self.__class__(formatter.vformat(self, args, kwargs))
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/string.py", line 190, in vformat
result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/string.py", line 230, in _vformat
obj, arg_used = self.get_field(field_name, args, kwargs)
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/string.py", line 295, in get_field
obj = self.get_value(first, args, kwargs)
File "/usr/local/Cellar/python/3.7.8/Frameworks/Python.framework/Versions/3.7/lib/python3.7/string.py", line 252, in get_value
return kwargs[key]
File "/usr/local/lib/python3.7/site-packages/markupsafe/__init__.py", line 249, in __getitem__
return self._kwargs[key]
KeyError: '"randomKey"'
| 2,724 |
|||
celery/celery | celery__celery-1206 | 707fdb39d057bfc5179626113fb77aeca56a6ac6 | diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -96,7 +96,10 @@ def _get_connection(self):
# This enables the use of replica sets and sharding.
# See pymongo.Connection() for more info.
args = [self.mongodb_host]
- kwargs = {'max_pool_size': self.mongodb_max_pool_size}
+ kwargs = {
+ 'max_pool_size': self.mongodb_max_pool_size,
+ 'ssl': self.app.conf.BROKER_USE_SSL
+ }
if isinstance(self.mongodb_host, string_t) \
and not self.mongodb_host.startswith('mongodb://'):
args.append(self.mongodb_port)
| MongoDB and BROKER_USE_SSL=True
I've recently started with mongodb and BROKER_USE_SSL=True, this doesn't seem to work. Celery is trying to reconnect with Re-establishing connection message. BROKER_USE_SSL=False works well.
``` python
[2013-02-21 14:57:45,708: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...
[2013-02-21 14:57:45,710: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.
[2013-02-21 14:57:45,714: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...
Traceback (most recent call last):
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 392, in start
self.reset_connection()
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 741, in reset_connection
self.connection, on_decode_error=self.on_decode_error,
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/app/amqp.py", line 291, in __init__
queues or self.app.amqp.queues.consume_from.values(), **kw
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 338, in __init__
self.revive(self.channel)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 350, in revive
self.declare()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 360, in declare
queue.declare()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 471, in declare
self.queue_declare(nowait, passive=False)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 497, in queue_declare
nowait=nowait)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/virtual/__init__.py", line 398, in queue_declare
return queue, self._size(queue), 0
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 76, in _size
return self.client.messages.find({'queue': queue}).count()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 204, in client
self._client = self._open()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 133, in _open
mongoconn = Connection(host=hostname)
File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/connection.py", line 180, in __init__
max_pool_size, document_class, tz_aware, _connect, **kwargs)
File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/mongo_client.py", line 269, in __init__
raise ConnectionFailure(str(e))
ConnectionFailure: [Errno 104] Connection reset by peer
[2013-02-21 14:57:45,716: DEBUG/MainProcess] consumer: Re-establishing connection to the broker...
[2013-02-21 14:57:45,718: INFO/MainProcess] consumer: Connected to mongodb://localhost%3A27017%2Fdata/data.
[2013-02-21 14:57:45,721: ERROR/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...
```
Problem seems to be generated by this line
https://github.com/celery/kombu/blob/master/kombu/transport/mongodb.py#L135
which should take ssl=True parameter for SSL connections.
I know it's kombu component, but setting (BROKER_USE_SSL) which is leading to this problem is part of celery library.
| 2013-02-23T09:59:42Z | [] | [] |
Traceback (most recent call last):
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 392, in start
self.reset_connection()
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/worker/consumer.py", line 741, in reset_connection
self.connection, on_decode_error=self.on_decode_error,
File "/stuff/eggs/celery-3.0.13-py2.7.egg/celery/app/amqp.py", line 291, in __init__
queues or self.app.amqp.queues.consume_from.values(), **kw
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 338, in __init__
self.revive(self.channel)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 350, in revive
self.declare()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/messaging.py", line 360, in declare
queue.declare()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 471, in declare
self.queue_declare(nowait, passive=False)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/entity.py", line 497, in queue_declare
nowait=nowait)
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/virtual/__init__.py", line 398, in queue_declare
return queue, self._size(queue), 0
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 76, in _size
return self.client.messages.find({'queue': queue}).count()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 204, in client
self._client = self._open()
File "/stuff/eggs/kombu-2.5.4-py2.7.egg/kombu/transport/mongodb.py", line 133, in _open
mongoconn = Connection(host=hostname)
File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/connection.py", line 180, in __init__
max_pool_size, document_class, tz_aware, _connect, **kwargs)
File "/stuff/eggs/pymongo-2.4.2-py2.7-linux-x86_64.egg/pymongo/mongo_client.py", line 269, in __init__
raise ConnectionFailure(str(e))
ConnectionFailure: [Errno 104] Connection reset by peer
| 2,733 |
||||
celery/celery | celery__celery-1769 | 3c4860d2208ae07fc1f5f07d7e9ae6c79919e9c4 | diff --git a/celery/apps/worker.py b/celery/apps/worker.py
--- a/celery/apps/worker.py
+++ b/celery/apps/worker.py
@@ -315,6 +315,9 @@ def on_SIGINT(worker):
def _reload_current_worker():
+ platforms.close_open_fds([
+ sys.__stdin__, sys.__stdout__, sys.__stderr__,
+ ])
os.execv(sys.executable, [sys.executable] + sys.argv)
| Sending SIGHUP leaks file handles
When sending SIGHUP to the Celery master process, it leaks all of its previously open file handles when calling exec. This is a regression introduced in 3.1 by 118b300fcad4e6ffb0178fc00cf9fe26075101a5 (originally fixed in 803655b79ccb0403f47cfcd2cfa5a6ed66301cbc for #1270).
This additionally causes Celery to crash after enough HUPs (if the open file limit is larger than 1024):
```
ERROR celery.bootsteps Error on stopping Pool: ValueError('filedescriptor out of range in select()',)
Traceback (most recent call last):
File "celery/bootsteps.py", line 155, in send_all
fun(parent, *args)
File "celery/bootsteps.py", line 377, in stop
return self.obj.stop()
File "celery/concurrency/base.py", line 119, in stop
self.on_stop()
File "celery/concurrency/prefork.py", line 140, in on_stop
self._pool.join()
File "billiard/pool.py", line 1523, in join
stop_if_not_current(self._result_handler)
File "billiard/pool.py", line 148, in stop_if_not_current
thread.stop(timeout)
File "billiard/pool.py", line 493, in stop
self.on_stop_not_started()
File "celery/concurrency/asynpool.py", line 301, in on_stop_not_started
join_exited_workers(shutdown=True)
File "billiard/pool.py", line 1109, in _join_exited_workers
self.process_flush_queues(worker)
File "celery/concurrency/asynpool.py", line 1082, in process_flush_queues
readable, _, again = _select(fds, None, fds, timeout=0.01)
File "celery/concurrency/asynpool.py", line 141, in _select
r, w, e = select.select(readers, writers, err, timeout)
ValueError: filedescriptor out of range in select()
```
| 2014-01-03T23:56:52Z | [] | [] |
Traceback (most recent call last):
File "celery/bootsteps.py", line 155, in send_all
fun(parent, *args)
File "celery/bootsteps.py", line 377, in stop
return self.obj.stop()
File "celery/concurrency/base.py", line 119, in stop
self.on_stop()
File "celery/concurrency/prefork.py", line 140, in on_stop
self._pool.join()
File "billiard/pool.py", line 1523, in join
stop_if_not_current(self._result_handler)
File "billiard/pool.py", line 148, in stop_if_not_current
thread.stop(timeout)
File "billiard/pool.py", line 493, in stop
self.on_stop_not_started()
File "celery/concurrency/asynpool.py", line 301, in on_stop_not_started
join_exited_workers(shutdown=True)
File "billiard/pool.py", line 1109, in _join_exited_workers
self.process_flush_queues(worker)
File "celery/concurrency/asynpool.py", line 1082, in process_flush_queues
readable, _, again = _select(fds, None, fds, timeout=0.01)
File "celery/concurrency/asynpool.py", line 141, in _select
r, w, e = select.select(readers, writers, err, timeout)
ValueError: filedescriptor out of range in select()
| 2,734 |
||||
celery/celery | celery__celery-1834 | 59e44ae6300e5b39b3306bc2cdc76a0b85b3d418 | diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -501,7 +501,7 @@ def verify_process_alive(proc):
if proc._is_alive() and proc in waiting_to_start:
assert proc.outqR_fd in fileno_to_outq
assert fileno_to_outq[proc.outqR_fd] is proc
- assert proc.outqR_fd in hub.readers
+ assert proc.outqR_fd in hub.readers, "%s.outqR_fd=%s not in hub.readers !" % (proc, proc.outqR_fd)
error('Timed out waiting for UP message from %r', proc)
os.kill(proc.pid, 9)
@@ -570,6 +570,15 @@ def on_process_down(proc):
if inq:
busy_workers.discard(inq)
hub_remove(proc.sentinel)
+ waiting_to_start.discard(proc)
+ self._active_writes.discard(proc.inqW_fd)
+ hub_remove(proc.inqW_fd)
+ hub_remove(proc.outqR_fd)
+ if proc.synqR_fd:
+ hub_remove(proc.synqR_fd)
+ if proc.synqW_fd:
+ self._active_writes.discard(proc.synqW_fd)
+ hub_remove(proc.synqW_fd)
self.on_process_down = on_process_down
def _create_write_handlers(self, hub,
@@ -966,7 +975,7 @@ def on_process_alive(self, pid):
try:
proc = next(w for w in self._pool if w.pid == pid)
except StopIteration:
- # process already exited :( this will be handled elsewhere.
+ logger.warning("process with pid=%s already exited :( - handling this elsewhere ...", pid)
return
assert proc.inqW_fd not in self._fileno_to_inq
assert proc.inqW_fd not in self._all_inqueues
| "assert proc.outqR_fd in hub.readers" AssertionError
```
[2014-01-13 15:06:53,047] pid=33970/MainProcess - ERROR - celery.worker - Unrecoverable error: AssertionError()
Traceback (most recent call last):
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/__init__.py", line 206, in start
self.blueprint.start(self)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/bootsteps.py", line 123, in start
step.start(parent)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/bootsteps.py", line 373, in start
return self.obj.start()
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/consumer.py", line 270, in start
blueprint.start(self)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/bootsteps.py", line 123, in start
step.start(parent)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/consumer.py", line 786, in start
c.loop(*c.loop_args())
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/loops.py", line 71, in asynloop
next(loop)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/kombu/async/hub.py", line 288, in create_loop
poll_timeout = fire_timers(propagate=propagate) if scheduled else 1
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/kombu/async/hub.py", line 151, in fire_timers
entry()
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/kombu/async/timer.py", line 64, in __call__
return self.fun(*self.args, **self.kwargs)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/concurrency/asynpool.py", line 504, in verify_process_alive
assert proc.outqR_fd in hub.readers
AssertionError
[2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Worker: Closing Hub...
[2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Worker: Closing Pool...
[2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Worker: Closing Consumer...
[2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Worker: Stopping Consumer...
[2014-01-13 15:06:53,048] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Connection...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Events...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Mingle...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Tasks...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Control...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Gossip...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing Heart...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Closing event loop...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Stopping event loop...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Stopping Heart...
[2014-01-13 15:06:53,049] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Stopping Gossip...
[2014-01-13 15:06:53,050] pid=33970/MainProcess - DEBUG - celery.bootsteps - | Consumer: Stopping Control...
```
I don't have a isolated testcase yet. Does the cause look like something obvious ?
| Tested with:
- celery/kombu@ffa90945bf06ba8b9269b4a36019baad0ac57793
- celery/billiard@c29c4f7adbd0f7f4544c05fb9777800616e89d2f
- celery/celery@ceaf7aba36eae78af852eb5ca703c81091b52f23
[I too](https://github.com/celery/kombu/issues/305) am getting a similar issue. Which transport are you using?
It was redis. I will post more details when I get the chance
I am getting this issues with rabbitmq/amqp.
I'm getting it too, on Redis backend. Celery Beat continues, worker restarts but seems to be idle (no output from tasks in log, unlike when I stop entire instance, do flushall on redis and start celery again, then it starts working again).
The problem code is in https://github.com/celery/celery/blob/master/celery/concurrency/asynpool.py#L501
Do you guys use CELERYD_MAX_TASKS_PER_CHILD with low value ?
Yes! In fact I set it to 1, otherwise I had massive memory leaks under heavy loads (lots of workers, big tasks, not releasing memory to OS causing memory use creeping up and then OOM killer incoming) etc.
We have it set to 1000.
Yep it is set to 2500 for us.
| 2014-01-30T13:06:46Z | [] | [] |
Traceback (most recent call last):
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/__init__.py", line 206, in start
self.blueprint.start(self)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/bootsteps.py", line 123, in start
step.start(parent)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/bootsteps.py", line 373, in start
return self.obj.start()
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/consumer.py", line 270, in start
blueprint.start(self)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/bootsteps.py", line 123, in start
step.start(parent)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/consumer.py", line 786, in start
c.loop(*c.loop_args())
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/worker/loops.py", line 71, in asynloop
next(loop)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/kombu/async/hub.py", line 288, in create_loop
poll_timeout = fire_timers(propagate=propagate) if scheduled else 1
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/kombu/async/hub.py", line 151, in fire_timers
entry()
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/kombu/async/timer.py", line 64, in __call__
return self.fun(*self.args, **self.kwargs)
File "/home/ionel/projects/core/.ve/local/lib/python2.7/site-packages/celery/concurrency/asynpool.py", line 504, in verify_process_alive
assert proc.outqR_fd in hub.readers
AssertionError
| 2,735 |