Compare commits

..

1 Commits

Author SHA1 Message Date
Philip Meulengracht dca9bbfac9
Merge 684f4d25c8 into d209e197f8 2024-11-22 16:16:24 +01:00
63 changed files with 216 additions and 524 deletions

View File

@ -37,7 +37,7 @@ jobs:
VALIDATE_GITHUB_ACTIONS: true
- name: Check that tabs are not used in Python code
run: sh -c '! git grep -P "\\t" -- src/ukify/ukify.py test/integration-test-wrapper.py'
run: sh -c '! git grep -P "\\t" -- src/ukify/ukify.py'
- name: Install ruff and mypy
run: |
@ -47,14 +47,14 @@ jobs:
- name: Run mypy
run: |
python3 -m mypy --version
python3 -m mypy src/ukify/ukify.py test/integration-test-wrapper.py
python3 -m mypy src/ukify/ukify.py
- name: Run ruff check
run: |
ruff --version
ruff check src/ukify/ukify.py test/integration-test-wrapper.py
ruff check src/ukify/ukify.py
- name: Run ruff format
run: |
ruff --version
ruff format --check src/ukify/ukify.py test/integration-test-wrapper.py
ruff format --check src/ukify/ukify.py

View File

@ -105,7 +105,7 @@ jobs:
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: systemd/mkosi@0825cca8084674ec8fa27502134b1bc601f79e0c
- uses: systemd/mkosi@8976a0abb19221e65300222f2d33067970cca0f1
# Freeing up disk space with rm -rf can take multiple minutes. Since we don't need the extra free space
# immediately, we remove the files in the background. However, we first move them to a different location

3
NEWS
View File

@ -764,9 +764,6 @@ CHANGES WITH 257 in spe:
other cases EnterNamespace= might be an suitable approach to acquire
symbolized backtraces.)
Special thanks to Nick Owens for bringing attention to and testing
fixes for issue #34516.
Contributions from: 12paper, A. Wilcox, Abderrahim Kitouni,
Adrian Vovk, Alain Greppin, Allison Karlitskaya, Alyssa Ross,
Anders Jonsson, Andika Triwidada, Andres Beltran, Anouk Ceyssens,

View File

@ -295,10 +295,6 @@ sensor:modalias:acpi:MXC6655*:dmi:*:svnCHUWIInnovationAndTechnology*:pnHi10X:*
sensor:modalias:acpi:KIOX000A*:dmi:*:svnCHUWIInnovationAndTechnology*:pnHi10X:*
ACCEL_MOUNT_MATRIX=0, -1, 0; -1, 0, 0; 0, 0, 1
# Chuwi Hi10 X1
sensor:modalias:acpi:NSA2513*:dmi:*:svnCHUWIInnovationAndTechnology*:pnHi10X1:*
ACCEL_MOUNT_MATRIX=0, 1, 0; -1, 0, 0; 0, 0, 1
# Chuwi Hi10 Go
sensor:modalias:acpi:MXC6655*:dmi:*:svnCHUWIINNOVATIONLIMITED:pnHi10Go:*
ACCEL_MOUNT_MATRIX=-1, 0, 0; 0,-1, 0; 0, 0, 1
@ -957,15 +953,6 @@ sensor:modalias:acpi:MXC6655*:dmi:*:svnDefaultstring*:pnP612F:*
sensor:modalias:acpi:SMO8500*:dmi:*:svnPEAQ:pnPEAQPMMC1010MD99187:*
ACCEL_MOUNT_MATRIX=-1, 0, 0; 0, 1, 0; 0, 0, 1
#########################################
# Pine64
#########################################
# PineTab2
sensor:modalias:of:NaccelerometerT_null_Csilan,sc7a20:*
ACCEL_MOUNT_MATRIX=0, 0, -1; 1, 0, 0; 0, -1, 0
#########################################
# Pipo
#########################################

View File

@ -1,18 +1,12 @@
#!/bin/bash
# SPDX-License-Identifier: LGPL-2.1-or-later
if command -v flatpak-spawn >/dev/null; then
SPAWN=(flatpak-spawn --host)
else
SPAWN=()
fi
MKOSI_CONFIG="$("${SPAWN[@]}" --host mkosi --json summary | jq -r .Images[-1])"
MKOSI_CONFIG="$(mkosi --json summary | jq -r .Images[-1])"
DISTRIBUTION="$(jq -r .Distribution <<< "$MKOSI_CONFIG")"
RELEASE="$(jq -r .Release <<< "$MKOSI_CONFIG")"
ARCH="$(jq -r .Architecture <<< "$MKOSI_CONFIG")"
exec "${SPAWN[@]}" mkosi \
exec mkosi \
--incremental=strict \
--build-sources-ephemeral=no \
--format=none \

View File

@ -38,8 +38,9 @@ SignExpectedPcr=yes
[Content]
ExtraTrees=
mkosi.extra.common
mkosi.crt:/usr/lib/verity.d/mkosi.crt # sysext verification key
mkosi.leak-sanitizer-suppressions:/usr/lib/systemd/leak-sanitizer-suppressions
mkosi.coredump-journal-storage.conf:/usr/lib/systemd/coredump.conf.d/10-coredump-journal-storage.conf
%O/minimal-0.root-%a.raw:/usr/share/minimal_0.raw
%O/minimal-0.root-%a-verity.raw:/usr/share/minimal_0.verity
%O/minimal-0.root-%a-verity-sig.raw:/usr/share/minimal_0.verity.sig

View File

@ -6,12 +6,10 @@ ToolsTreeDistribution=arch
[Build]
ToolsTreePackages=
cryptsetup
github-cli
libcap
libmicrohttpd
python-jinja
python-pytest
ruff
shellcheck
tpm2-tss
util-linux-libs

View File

@ -16,4 +16,3 @@ ToolsTreePackages=
tpm2-tss-devel
python3-jinja2
python3-pytest
shellcheck

View File

@ -6,7 +6,6 @@ ToolsTreeDistribution=|ubuntu
[Build]
ToolsTreePackages=
gh
libblkid-dev
libcap-dev
libcryptsetup-dev
@ -17,4 +16,3 @@ ToolsTreePackages=
libtss2-dev
python3-jinja2
python3-pytest
shellcheck

View File

@ -5,5 +5,4 @@ ToolsTreeDistribution=fedora
[Build]
ToolsTreePackages=
gh
ruff

View File

@ -5,7 +5,6 @@ ToolsTreeDistribution=opensuse
[Build]
ToolsTreePackages=
gh
pkgconfig(blkid)
pkgconfig(libcap)
pkgconfig(libcryptsetup)
@ -17,4 +16,3 @@ ToolsTreePackages=
tss2-devel
python3-jinja2
python3-pytest
ShellCheck

View File

@ -13,7 +13,6 @@ Environment=
[Content]
Packages=
clang-devel
compiler-rt
gdb
git-core

View File

@ -15,7 +15,6 @@ Environment=
[Content]
Packages=
apt
clangd
erofs-utils
git-core
libclang-rt-dev

View File

@ -12,7 +12,6 @@ Environment=
[Content]
Packages=
clang
diffutils
erofs-utils
gcc-c++

View File

@ -6,7 +6,9 @@ Include=
%D/mkosi.sanitizers
[Content]
ExtraTrees=%D/mkosi.extra.common
ExtraTrees=
%D/mkosi.leak-sanitizer-suppressions:/usr/lib/systemd/leak-sanitizer-suppressions
%D/mkosi.coredump-journal-storage.conf:/usr/lib/systemd/coredump.conf.d/10-coredump-journal-storage.conf
Packages=
findutils

View File

@ -57,8 +57,6 @@ wrap=(
delv
dhcpd
dig
dnf
dnf5
dmsetup
dnsmasq
findmnt
@ -95,7 +93,7 @@ wrap=(
)
for bin in "${wrap[@]}"; do
if ! mkosi-chroot bash -c "command -v $bin" >/dev/null; then
if ! mkosi-chroot command -v "$bin" >/dev/null; then
continue
fi
@ -105,7 +103,7 @@ for bin in "${wrap[@]}"; do
enable_lsan=0
fi
target="$(mkosi-chroot bash -c "command -v $bin")"
target="$(mkosi-chroot command -v "$bin")"
mv "$BUILDROOT/$target" "$BUILDROOT/$target.orig"

View File

@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2024-11-06 14:42+0000\n"
"PO-Revision-Date: 2024-11-23 10:38+0000\n"
"PO-Revision-Date: 2024-11-20 19:13+0000\n"
"Last-Translator: Léane GRASSER <leane.grasser@proton.me>\n"
"Language-Team: French <https://translate.fedoraproject.org/projects/systemd/"
"main/fr/>\n"
@ -1258,7 +1258,7 @@ msgstr ""
#: src/sysupdate/org.freedesktop.sysupdate1.policy:75
msgid "Manage optional features"
msgstr "Gérer les fonctionnalités facultatives"
msgstr "Gérer les fonctionnalités en option"
#: src/sysupdate/org.freedesktop.sysupdate1.policy:76
msgid "Authentication is required to manage optional features"

View File

@ -21,7 +21,7 @@
#define AUTOFS_MIN_PROTO_VERSION 3
#define AUTOFS_MAX_PROTO_VERSION 5
#define AUTOFS_PROTO_SUBVERSION 6
#define AUTOFS_PROTO_SUBVERSION 5
/*
* The wait_queue_token (autofs_wqt_t) is part of a structure which is passed

View File

@ -1121,9 +1121,6 @@ enum bpf_attach_type {
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
/* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[]
* in sync with the definitions below.
*/
enum bpf_link_type {
BPF_LINK_TYPE_UNSPEC = 0,
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
@ -2854,7 +2851,7 @@ union bpf_attr {
* **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
* **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
* **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
* **TCP_BPF_RTO_MIN**, **TCP_BPF_SOCK_OPS_CB_FLAGS**.
* **TCP_BPF_RTO_MIN**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
* * **IPPROTO_IPV6**, which supports the following *optname*\ s:
* **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
@ -5522,12 +5519,11 @@ union bpf_attr {
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
*
* void *bpf_kptr_xchg(void *dst, void *ptr)
* void *bpf_kptr_xchg(void *map_value, void *ptr)
* Description
* Exchange kptr at pointer *dst* with *ptr*, and return the old value.
* *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
* it must be a referenced pointer which will be released when this helper
* is called.
* Exchange kptr at pointer *map_value* with *ptr*, and return the
* old value. *ptr* can be NULL, otherwise it must be a referenced
* pointer which will be released when this helper is called.
* Return
* The old value of kptr (which can be NULL). The returned pointer
* if not NULL, is a reference which must be released using its
@ -6050,6 +6046,11 @@ enum {
BPF_F_MARK_ENFORCE = (1ULL << 6),
};
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
enum {
BPF_F_INGRESS = (1ULL << 0),
};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
enum {
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
@ -6196,12 +6197,10 @@ enum {
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
};
/* Flags for bpf_redirect and bpf_redirect_map helpers */
/* Flags for bpf_redirect_map helper */
enum {
BPF_F_INGRESS = (1ULL << 0), /* used for skb path */
BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */
BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */
#define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
BPF_F_BROADCAST = (1ULL << 3),
BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
};
#define __bpf_md_ptr(type, name) \
@ -7081,7 +7080,6 @@ enum {
TCP_BPF_SYN = 1005, /* Copy the TCP header */
TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */
};
enum {
@ -7514,13 +7512,4 @@ struct bpf_iter_num {
__u64 __opaque[1];
} __attribute__((aligned(8)));
/*
* Flags to control BPF kfunc behaviour.
* - BPF_F_PAD_ZEROS: Pad destination buffer with zeros. (See the respective
* helper documentation for details.)
*/
enum bpf_kfunc_flags {
BPF_F_PAD_ZEROS = (1ULL << 0),
};
#endif /* __LINUX_BPF_H__ */

View File

@ -28,23 +28,6 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
#if !defined(__ASSEMBLY__)
/*
* Missing __asm__ support
*
* __BIT128() would not work in the __asm__ code, as it shifts an
* 'unsigned __init128' data type as direct representation of
* 128 bit constants is not supported in the gcc compiler, as
* they get silently truncated.
*
* TODO: Please revisit this implementation when gcc compiler
* starts representing 128 bit constants directly like long
* and unsigned long etc. Subsequently drop the comment for
* GENMASK_U128() which would then start supporting __asm__ code.
*/
#define _BIT128(x) ((unsigned __int128)(1) << (x))
#endif
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))

View File

@ -2531,20 +2531,4 @@ struct ethtool_link_settings {
* __u32 map_lp_advertising[link_mode_masks_nwords];
*/
};
/**
* enum phy_upstream - Represents the upstream component a given PHY device
* is connected to, as in what is on the other end of the MII bus. Most PHYs
* will be attached to an Ethernet MAC controller, but in some cases, there's
* an intermediate PHY used as a media-converter, which will driver another
* MII interface as its output.
* @PHY_UPSTREAM_MAC: Upstream component is a MAC (a switch port,
* or ethernet controller)
* @PHY_UPSTREAM_PHY: Upstream component is a PHY (likely a media converter)
*/
enum phy_upstream {
PHY_UPSTREAM_MAC,
PHY_UPSTREAM_PHY,
};
#endif /* _LINUX_ETHTOOL_H */

View File

@ -67,7 +67,6 @@ enum {
FRA_IP_PROTO, /* ip proto */
FRA_SPORT_RANGE, /* sport */
FRA_DPORT_RANGE, /* dport */
FRA_DSCP, /* dscp */
__FRA_MAX
};

View File

@ -230,8 +230,8 @@ struct tpacket_hdr_v1 {
* ts_first_pkt:
* Is always the time-stamp when the block was opened.
* Case a) ZERO packets
* No packets to deal with but at least you know
* the time-interval of this block.
* No packets to deal with but atleast you know the
* time-interval of this block.
* Case b) Non-zero packets
* Use the ts of the first packet in the block.
*
@ -265,8 +265,7 @@ enum tpacket_versions {
- struct tpacket_hdr
- pad to TPACKET_ALIGNMENT=16
- struct sockaddr_ll
- Gap, chosen so that packet data (Start+tp_net) aligns to
TPACKET_ALIGNMENT=16
- Gap, chosen so that packet data (Start+tp_net) alignes to TPACKET_ALIGNMENT=16
- Start+tp_mac: [ Optional MAC header ]
- Start+tp_net: Packet data, aligned to TPACKET_ALIGNMENT=16.
- Pad to align to TPACKET_ALIGNMENT=16

View File

@ -141,7 +141,7 @@ struct in_addr {
*/
#define IP_PMTUDISC_INTERFACE 4
/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get
* fragmented if they exceed the interface mtu
* fragmented if they exeed the interface mtu
*/
#define IP_PMTUDISC_OMIT 5

View File

@ -140,6 +140,25 @@
#endif /* _NETINET_IN_H */
/* Coordinate with glibc netipx/ipx.h header. */
#if defined(__NETIPX_IPX_H)
#define __UAPI_DEF_SOCKADDR_IPX 0
#define __UAPI_DEF_IPX_ROUTE_DEFINITION 0
#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 0
#define __UAPI_DEF_IPX_CONFIG_DATA 0
#define __UAPI_DEF_IPX_ROUTE_DEF 0
#else /* defined(__NETIPX_IPX_H) */
#define __UAPI_DEF_SOCKADDR_IPX 1
#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
#define __UAPI_DEF_IPX_CONFIG_DATA 1
#define __UAPI_DEF_IPX_ROUTE_DEF 1
#endif /* defined(__NETIPX_IPX_H) */
/* Definitions for xattr.h */
#if defined(_SYS_XATTR_H)
#define __UAPI_DEF_XATTR 0
@ -221,6 +240,23 @@
#define __UAPI_DEF_IP6_MTUINFO 1
#endif
/* Definitions for ipx.h */
#ifndef __UAPI_DEF_SOCKADDR_IPX
#define __UAPI_DEF_SOCKADDR_IPX 1
#endif
#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
#endif
#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
#endif
#ifndef __UAPI_DEF_IPX_CONFIG_DATA
#define __UAPI_DEF_IPX_CONFIG_DATA 1
#endif
#ifndef __UAPI_DEF_IPX_ROUTE_DEF
#define __UAPI_DEF_IPX_ROUTE_DEF 1
#endif
/* Definitions for xattr.h */
#ifndef __UAPI_DEF_XATTR
#define __UAPI_DEF_XATTR 1

View File

@ -436,7 +436,7 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
* @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
* @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
* @NFTA_SET_ELEM_TIMEOUT: timeout value, zero means never times out (NLA_U64)
* @NFTA_SET_ELEM_TIMEOUT: timeout value (NLA_U64)
* @NFTA_SET_ELEM_EXPIRATION: expiration time (NLA_U64)
* @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY)
* @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
@ -1694,7 +1694,7 @@ enum nft_flowtable_flags {
*
* @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
* @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING)
* @NFTA_FLOWTABLE_HOOK: netfilter hook configuration (NLA_NESTED)
* @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
* @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
* @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
* @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32)

View File

@ -16,15 +16,10 @@ struct nhmsg {
struct nexthop_grp {
__u32 id; /* nexthop id - must exist */
__u8 weight; /* weight of this nexthop */
__u8 weight_high; /* high order bits of weight */
__u8 resvd1;
__u16 resvd2;
};
static __inline__ __u16 nexthop_grp_weight(const struct nexthop_grp *entry)
{
return ((entry->weight_high << 8) | entry->weight) + 1;
}
enum {
NEXTHOP_GRP_TYPE_MPATH, /* hash-threshold nexthop group
* default type if not specified
@ -38,9 +33,6 @@ enum {
#define NHA_OP_FLAG_DUMP_STATS BIT(0)
#define NHA_OP_FLAG_DUMP_HW_STATS BIT(1)
/* Response OP_FLAGS. */
#define NHA_OP_FLAG_RESP_GRP_RESVD_0 BIT(31) /* Dump clears resvd fields. */
enum {
NHA_UNSPEC,
NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */

View File

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#pragma once
/* Root namespace inode numbers, as per include/linux/proc_ns.h in the kernel source tree, since v3.8:
* https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=98f842e675f96ffac96e6c50315790912b2812be */
#define PROC_IPC_INIT_INO ((ino_t) UINT32_C(0xEFFFFFFF))
#define PROC_UTS_INIT_INO ((ino_t) UINT32_C(0xEFFFFFFE))
#define PROC_USER_INIT_INO ((ino_t) UINT32_C(0xEFFFFFFD))
#define PROC_PID_INIT_INO ((ino_t) UINT32_C(0xEFFFFFFC))
#define PROC_CGROUP_INIT_INO ((ino_t) UINT32_C(0xEFFFFFFB))
#define PROC_TIME_INIT_INO ((ino_t) UINT32_C(0xEFFFFFFA))

View File

@ -12,7 +12,6 @@
#include "fileio.h"
#include "missing_fs.h"
#include "missing_magic.h"
#include "missing_namespace.h"
#include "missing_sched.h"
#include "missing_syscall.h"
#include "mountpoint-util.h"
@ -24,17 +23,17 @@
#include "user-util.h"
const struct namespace_info namespace_info[_NAMESPACE_TYPE_MAX + 1] = {
[NAMESPACE_CGROUP] = { "cgroup", "ns/cgroup", CLONE_NEWCGROUP, PROC_CGROUP_INIT_INO },
[NAMESPACE_IPC] = { "ipc", "ns/ipc", CLONE_NEWIPC, PROC_IPC_INIT_INO },
[NAMESPACE_NET] = { "net", "ns/net", CLONE_NEWNET, 0 },
[NAMESPACE_CGROUP] = { "cgroup", "ns/cgroup", CLONE_NEWCGROUP, },
[NAMESPACE_IPC] = { "ipc", "ns/ipc", CLONE_NEWIPC, },
[NAMESPACE_NET] = { "net", "ns/net", CLONE_NEWNET, },
/* So, the mount namespace flag is called CLONE_NEWNS for historical
* reasons. Let's expose it here under a more explanatory name: "mnt".
* This is in-line with how the kernel exposes namespaces in /proc/$PID/ns. */
[NAMESPACE_MOUNT] = { "mnt", "ns/mnt", CLONE_NEWNS, 0 },
[NAMESPACE_PID] = { "pid", "ns/pid", CLONE_NEWPID, PROC_PID_INIT_INO },
[NAMESPACE_USER] = { "user", "ns/user", CLONE_NEWUSER, PROC_USER_INIT_INO },
[NAMESPACE_UTS] = { "uts", "ns/uts", CLONE_NEWUTS, PROC_UTS_INIT_INO },
[NAMESPACE_TIME] = { "time", "ns/time", CLONE_NEWTIME, PROC_TIME_INIT_INO },
[NAMESPACE_MOUNT] = { "mnt", "ns/mnt", CLONE_NEWNS, },
[NAMESPACE_PID] = { "pid", "ns/pid", CLONE_NEWPID, },
[NAMESPACE_USER] = { "user", "ns/user", CLONE_NEWUSER, },
[NAMESPACE_UTS] = { "uts", "ns/uts", CLONE_NEWUTS, },
[NAMESPACE_TIME] = { "time", "ns/time", CLONE_NEWTIME, },
{ /* Allow callers to iterate over the array without using _NAMESPACE_TYPE_MAX. */ },
};
@ -480,28 +479,6 @@ int namespace_open_by_type(NamespaceType type) {
return fd;
}
int namespace_is_init(NamespaceType type) {
int r;
assert(type >= 0);
assert(type <= _NAMESPACE_TYPE_MAX);
if (namespace_info[type].root_inode == 0)
return -EBADR; /* Cannot answer this question */
const char *p = pid_namespace_path(0, type);
struct stat st;
r = RET_NERRNO(stat(p, &st));
if (r == -ENOENT)
/* If the /proc/ns/<type> API is not around in /proc/ then ns is off in the kernel and we are in the init ns */
return proc_mounted() == 0 ? -ENOSYS : true;
if (r < 0)
return r;
return st.st_ino == namespace_info[type].root_inode;
}
int is_our_namespace(int fd, NamespaceType request_type) {
int clone_flag;
@ -554,24 +531,20 @@ int is_idmapping_supported(const char *path) {
userns_fd = userns_acquire(uid_map, gid_map);
if (ERRNO_IS_NEG_NOT_SUPPORTED(userns_fd) || ERRNO_IS_NEG_PRIVILEGE(userns_fd))
return false;
if (userns_fd == -ENOSPC) {
log_debug_errno(userns_fd, "Failed to acquire new user namespace, user.max_user_namespaces seems to be exhausted or maybe even zero, assuming ID-mapping is not supported: %m");
return false;
}
if (userns_fd < 0)
return log_debug_errno(userns_fd, "Failed to acquire new user namespace for checking if '%s' supports ID-mapping: %m", path);
return log_debug_errno(userns_fd, "ID-mapping supported namespace acquire failed for '%s' : %m", path);
dir_fd = RET_NERRNO(open(path, O_RDONLY | O_CLOEXEC | O_NOFOLLOW));
if (ERRNO_IS_NEG_NOT_SUPPORTED(dir_fd))
return false;
if (dir_fd < 0)
return log_debug_errno(dir_fd, "Failed to open '%s', cannot determine if ID-mapping is supported: %m", path);
return log_debug_errno(dir_fd, "ID-mapping supported open failed for '%s' : %m", path);
mount_fd = RET_NERRNO(open_tree(dir_fd, "", AT_EMPTY_PATH | OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC));
if (ERRNO_IS_NEG_NOT_SUPPORTED(mount_fd) || ERRNO_IS_NEG_PRIVILEGE(mount_fd) || mount_fd == -EINVAL)
return false;
if (mount_fd < 0)
return log_debug_errno(mount_fd, "Failed to open mount tree '%s', cannot determine if ID-mapping is supported: %m", path);
return log_debug_errno(mount_fd, "ID-mapping supported open_tree failed for '%s' : %m", path);
r = RET_NERRNO(mount_setattr(mount_fd, "", AT_EMPTY_PATH,
&(struct mount_attr) {
@ -581,7 +554,7 @@ int is_idmapping_supported(const char *path) {
if (ERRNO_IS_NEG_NOT_SUPPORTED(r) || ERRNO_IS_NEG_PRIVILEGE(r) || r == -EINVAL)
return false;
if (r < 0)
return log_debug_errno(r, "Failed to set mount attribute to '%s', cannot determine if ID-mapping is supported: %m", path);
return log_debug_errno(r, "ID-mapping supported setattr failed for '%s' : %m", path);
return true;
}

View File

@ -24,7 +24,6 @@ extern const struct namespace_info {
const char *proc_name;
const char *proc_path;
unsigned int clone_flag;
ino_t root_inode;
} namespace_info[_NAMESPACE_TYPE_MAX + 1];
int pidref_namespace_open(
@ -75,8 +74,6 @@ int parse_userns_uid_range(const char *s, uid_t *ret_uid_shift, uid_t *ret_uid_r
int namespace_open_by_type(NamespaceType type);
int namespace_is_init(NamespaceType type);
int is_our_namespace(int fd, NamespaceType type);
int is_idmapping_supported(const char *path);

View File

@ -585,14 +585,6 @@ static int running_in_cgroupns(void) {
if (!cg_ns_supported())
return false;
r = namespace_is_init(NAMESPACE_CGROUP);
if (r < 0)
log_debug_errno(r, "Failed to test if in root cgroup namespace, ignoring: %m");
else if (r > 0)
return false;
// FIXME: We really should drop the heuristics below.
r = cg_all_unified();
if (r < 0)
return r;
@ -653,16 +645,6 @@ static int running_in_cgroupns(void) {
}
}
static int running_in_pidns(void) {
int r;
r = namespace_is_init(NAMESPACE_PID);
if (r < 0)
return log_debug_errno(r, "Failed to test if in root PID namespace, ignoring: %m");
return !r;
}
static Virtualization detect_container_files(void) {
static const struct {
const char *file_path;
@ -808,21 +790,12 @@ check_files:
r = running_in_cgroupns();
if (r > 0) {
log_debug("Running in a cgroup namespace, assuming unknown container manager.");
v = VIRTUALIZATION_CONTAINER_OTHER;
goto finish;
}
if (r < 0)
log_debug_errno(r, "Failed to detect cgroup namespace: %m");
/* Finally, the root pid namespace has an hardcoded inode number of 0xEFFFFFFC since kernel 3.8, so
* if all else fails we can check the inode number of our pid namespace and compare it. */
if (running_in_pidns() > 0) {
log_debug("Running in a pid namespace, assuming unknown container manager.");
v = VIRTUALIZATION_CONTAINER_OTHER;
goto finish;
}
/* If none of that worked, give up, assume no container manager. */
v = VIRTUALIZATION_NONE;
goto finish;
@ -890,14 +863,6 @@ int running_in_userns(void) {
_cleanup_free_ char *line = NULL;
int r;
r = namespace_is_init(NAMESPACE_USER);
if (r < 0)
log_debug_errno(r, "Failed to test if in root user namespace, ignoring: %m");
else if (r > 0)
return false;
// FIXME: We really should drop the heuristics below.
r = userns_has_mapping("/proc/self/uid_map");
if (r != 0)
return r;

View File

@ -1048,6 +1048,9 @@ static void device_enumerate(Manager *m) {
_cleanup_set_free_ Set *ready_units = NULL, *not_ready_units = NULL;
Device *d;
if (device_is_processed(dev) <= 0)
continue;
if (device_setup_units(m, dev, &ready_units, &not_ready_units) < 0)
continue;

View File

@ -98,11 +98,16 @@ static int parse_proc_cmdline_item(const char *key, const char *value, void *dat
}
}
else if (streq(key, "fastboot") && !value)
#if HAVE_SYSV_COMPAT
else if (streq(key, "fastboot") && !value) {
log_warning("Please pass 'fsck.mode=skip' rather than 'fastboot' on the kernel command line.");
arg_skip = true;
else if (streq(key, "forcefsck") && !value)
} else if (streq(key, "forcefsck") && !value) {
log_warning("Please pass 'fsck.mode=force' rather than 'forcefsck' on the kernel command line.");
arg_force = true;
}
#endif
return 0;
}

View File

@ -75,10 +75,6 @@ static int curl_glue_socket_callback(CURL *curl, curl_socket_t s, int action, vo
return 0;
}
/* Don't configure io event source anymore when the event loop is dead already. */
if (g->event && sd_event_get_state(g->event) == SD_EVENT_FINISHED)
return 0;
r = hashmap_ensure_allocated(&g->ios, &trivial_hash_ops);
if (r < 0) {
log_oom();

View File

@ -16,7 +16,7 @@ int varlink_get_peer_pidref(sd_varlink *v, PidRef *ret) {
int pidfd = sd_varlink_get_peer_pidfd(v);
if (pidfd < 0) {
if (!ERRNO_IS_NEG_NOT_SUPPORTED(pidfd) && pidfd != -EINVAL)
if (!ERRNO_IS_NEG_NOT_SUPPORTED(pidfd))
return pidfd;
pid_t pid;

View File

@ -101,19 +101,18 @@ static int help(int argc, char *argv[], void *userdata) {
" -j Same as --json=pretty on tty, --json=short otherwise\n"
" --append=PATH Load specified JSON signature, and append new signature to it\n"
"\n%3$sUKI PE Section Options:%4$s %3$sUKI PE Section%4$s\n"
" --linux=PATH Path to Linux kernel image file %7$s .linux\n"
" --osrel=PATH Path to os-release file %7$s .osrel\n"
" --cmdline=PATH Path to file with kernel command line %7$s .cmdline\n"
" --initrd=PATH Path to initrd image file %7$s .initrd\n"
" --ucode=PATH Path to microcode image file %7$s .ucode\n"
" --splash=PATH Path to splash bitmap file %7$s .splash\n"
" --dtb=PATH Path to DeviceTree file %7$s .dtb\n"
" --dtbauto=PATH Path to DeviceTree file for auto selection %7$s .dtbauto\n"
" --uname=PATH Path to 'uname -r' file %7$s .uname\n"
" --sbat=PATH Path to SBAT file %7$s .sbat\n"
" --pcrpkey=PATH Path to public key for PCR signatures %7$s .pcrpkey\n"
" --profile=PATH Path to profile file %7$s .profile\n"
" --hwids=PATH Path to HWIDs file %7$s .hwids\n"
" --linux=PATH Path to Linux kernel image file %7$s .linux\n"
" --osrel=PATH Path to os-release file %7$s .osrel\n"
" --cmdline=PATH Path to file with kernel command line %7$s .cmdline\n"
" --initrd=PATH Path to initrd image file %7$s .initrd\n"
" --ucode=PATH Path to microcode image file %7$s .ucode\n"
" --splash=PATH Path to splash bitmap file %7$s .splash\n"
" --dtb=PATH Path to DeviceTree file %7$s .dtb\n"
" --uname=PATH Path to 'uname -r' file %7$s .uname\n"
" --sbat=PATH Path to SBAT file %7$s .sbat\n"
" --pcrpkey=PATH Path to public key for PCR signatures %7$s .pcrpkey\n"
" --profile=PATH Path to profile file %7$s .profile\n"
" --hwids=PATH Path to HWIDs file %7$s .hwids\n"
"\nSee the %2$s for details.\n",
program_invocation_short_name,
link,

View File

@ -2280,9 +2280,10 @@ static int copy_devnode_one(const char *dest, const char *node, bool ignore_mkno
r = path_extract_directory(from, &parent);
if (r < 0)
return log_error_errno(r, "Failed to extract directory from %s: %m", from);
r = userns_mkdir(dest, parent, 0755, 0, 0);
if (r < 0)
return log_error_errno(r, "Failed to create directory %s: %m", parent);
if (!path_equal(parent, "/dev/")) {
if (userns_mkdir(dest, parent, 0755, 0, 0) < 0)
return log_error_errno(r, "Failed to create directory %s: %m", parent);
}
if (mknod(to, st.st_mode, st.st_rdev) < 0) {
r = -errno; /* Save the original error code. */
@ -4653,7 +4654,7 @@ static int nspawn_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t r
ucred = CMSG_FIND_DATA(&msghdr, SOL_SOCKET, SCM_CREDENTIALS, struct ucred);
if (!ucred || ucred->pid != inner_child_pid) {
log_debug("Received notify message from process that is not the payload's PID 1. Ignoring.");
log_debug("Received notify message without valid credentials. Ignoring.");
return 0;
}

View File

@ -36,9 +36,14 @@ static int parse_proc_cmdline_item(const char *key, const char *value, void *dat
arg_skip = true;
else
log_warning("Invalid quotacheck.mode= value, ignoring: %s", value);
}
} else if (streq(key, "forcequotacheck") && !value)
#if HAVE_SYSV_COMPAT
else if (streq(key, "forcequotacheck") && !value) {
log_warning("Please use 'quotacheck.mode=force' rather than 'forcequotacheck' on the kernel command line. Proceeding anyway.");
arg_force = true;
}
#endif
return 0;
}

View File

@ -98,17 +98,15 @@ static int delete_dm(DeviceMapper *m) {
assert(major(m->devnum) != 0);
assert(m->path);
fd = open(m->path, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
if (fd < 0)
log_debug_errno(errno, "Failed to open DM block device %s for syncing, ignoring: %m", m->path);
else {
(void) sync_with_progress(fd);
fd = safe_close(fd);
}
fd = open("/dev/mapper/control", O_RDWR|O_CLOEXEC);
if (fd < 0)
return log_debug_errno(errno, "Failed to open /dev/mapper/control: %m");
return -errno;
_cleanup_close_ int block_fd = open(m->path, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
if (block_fd < 0)
log_debug_errno(errno, "Failed to open DM block device %s for syncing, ignoring: %m", m->path);
else
(void) sync_with_progress(block_fd);
return RET_NERRNO(ioctl(fd, DM_DEV_REMOVE, &(struct dm_ioctl) {
.version = {

View File

@ -211,8 +211,10 @@ static int sync_making_progress(unsigned long long *prev_dirty) {
continue;
errno = 0;
if (sscanf(line, "%*s %llu %*s", &ull) != 1)
return log_warning_errno(errno_or_else(EIO), "Failed to parse /proc/meminfo field: %m");
if (sscanf(line, "%*s %llu %*s", &ull) != 1) {
log_warning_errno(errno_or_else(EIO), "Failed to parse /proc/meminfo field, ignoring: %m");
return false;
}
val += ull;
}

View File

@ -213,23 +213,9 @@ TEST(idmapping_supported) {
assert_se(is_idmapping_supported("/etc") >= 0);
}
TEST(namespace_is_init) {
int r;
for (NamespaceType t = 0; t < _NAMESPACE_TYPE_MAX; t++) {
r = namespace_is_init(t);
if (r == -EBADR)
log_info_errno(r, "In root namespace of type '%s': don't know", namespace_info[t].proc_name);
else {
ASSERT_OK(r);
log_info("In root namespace of type '%s': %s", namespace_info[t].proc_name, yes_no(r));
}
}
}
static int intro(void) {
if (!have_namespaces())
return log_tests_skipped("Don't have namespace support or lacking privileges");
return log_tests_skipped("Don't have namespace support");
return EXIT_SUCCESS;
}

View File

@ -467,7 +467,7 @@ class SignTool:
raise NotImplementedError()
@staticmethod
def from_string(name: str) -> type['SignTool']:
def from_string(name) -> type['SignTool']:
if name == 'pesign':
return PeSign
elif name == 'sbsign':

View File

@ -3,7 +3,6 @@
integration_tests += [
integration_test_template + {
'name' : fs.name(meson.current_source_dir()),
'coredump-exclude-regex' : '/(bash|python3.[0-9]+|systemd-executor)$',
'cmdline' : integration_test_template['cmdline'] + [
'''

View File

@ -4,7 +4,6 @@ integration_tests += [
integration_test_template + {
'name' : fs.name(meson.current_source_dir()),
'unit' : files('TEST-16-EXTEND-TIMEOUT.service'),
'coredump-exclude-regex' : '/(bash|sleep)$',
},
]

View File

@ -4,6 +4,5 @@ integration_tests += [
integration_test_template + {
'name' : fs.name(meson.current_source_dir()),
'vm' : true,
'coredump-exclude-regex' : '/(sleep|udevadm)$',
},
]

View File

@ -3,6 +3,5 @@
integration_tests += [
integration_test_template + {
'name' : fs.name(meson.current_source_dir()),
'coredump-exclude-regex' : '/(sleep|bash|systemd-notify)$',
},
]

View File

@ -4,7 +4,5 @@ integration_tests += [
integration_test_template + {
'name' : fs.name(meson.current_source_dir()),
'priority' : 10,
# TODO: Remove when https://github.com/systemd/systemd/issues/35335 is fixed.
'coredump-exclude-regex' : '/systemd-localed',
},
]

View File

@ -5,7 +5,6 @@ integration_tests += [
'name' : fs.name(meson.current_source_dir()),
'storage': 'persistent',
'vm' : true,
'coredump-exclude-regex' : '/(test-usr-dump|test-dump|bash)$',
},
]

View File

@ -1,18 +1,19 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
"""Test wrapper command for driving integration tests."""
'''Test wrapper command for driving integration tests.
'''
import argparse
import json
import os
import re
import shlex
import subprocess
import sys
import textwrap
from pathlib import Path
EMERGENCY_EXIT_DROPIN = """\
[Unit]
Wants=emergency-exit.service
@ -33,60 +34,7 @@ ExecStart=false
"""
def process_coredumps(args: argparse.Namespace, journal_file: Path) -> bool:
# Collect executable paths of all coredumps and filter out the expected ones.
if args.coredump_exclude_regex:
exclude_regex = re.compile(args.coredump_exclude_regex)
else:
exclude_regex = None
result = subprocess.run(
[
args.mkosi,
'--directory', os.fspath(args.meson_source_dir),
'--extra-search-path', os.fspath(args.meson_build_dir),
'sandbox',
'coredumpctl',
'--file', journal_file,
'--json=short',
],
stdout=subprocess.PIPE,
text=True,
) # fmt: skip
# coredumpctl returns a non-zero exit status if there are no coredumps.
if result.returncode != 0:
return False
coredumps = json.loads(result.stdout)
coredumps = [
coredump for coredump in coredumps if not exclude_regex or not exclude_regex.search(coredump['exe'])
]
if not coredumps:
return False
subprocess.run(
[
args.mkosi,
'--directory', os.fspath(args.meson_source_dir),
'--extra-search-path', os.fspath(args.meson_build_dir),
'sandbox',
'coredumpctl',
'--file', journal_file,
'--no-pager',
'info',
*(coredump['exe'] for coredump in coredumps),
],
check=True,
) # fmt: skip
return True
def main() -> None:
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--mkosi', required=True)
parser.add_argument('--meson-source-dir', required=True, type=Path)
@ -98,44 +46,34 @@ def main() -> None:
parser.add_argument('--slow', action=argparse.BooleanOptionalAction)
parser.add_argument('--vm', action=argparse.BooleanOptionalAction)
parser.add_argument('--exit-code', required=True, type=int)
parser.add_argument('--coredump-exclude-regex', required=True)
parser.add_argument('mkosi_args', nargs='*')
parser.add_argument('mkosi_args', nargs="*")
args = parser.parse_args()
if not bool(int(os.getenv('SYSTEMD_INTEGRATION_TESTS', '0'))):
print(
f'SYSTEMD_INTEGRATION_TESTS=1 not found in environment, skipping {args.name}',
file=sys.stderr,
)
if not bool(int(os.getenv("SYSTEMD_INTEGRATION_TESTS", "0"))):
print(f"SYSTEMD_INTEGRATION_TESTS=1 not found in environment, skipping {args.name}", file=sys.stderr)
exit(77)
if args.slow and not bool(int(os.getenv('SYSTEMD_SLOW_TESTS', '0'))):
print(
f'SYSTEMD_SLOW_TESTS=1 not found in environment, skipping {args.name}',
file=sys.stderr,
)
if args.slow and not bool(int(os.getenv("SYSTEMD_SLOW_TESTS", "0"))):
print(f"SYSTEMD_SLOW_TESTS=1 not found in environment, skipping {args.name}", file=sys.stderr)
exit(77)
if args.vm and bool(int(os.getenv('TEST_NO_QEMU', '0'))):
print(f'TEST_NO_QEMU=1, skipping {args.name}', file=sys.stderr)
if args.vm and bool(int(os.getenv("TEST_NO_QEMU", "0"))):
print(f"TEST_NO_QEMU=1, skipping {args.name}", file=sys.stderr)
exit(77)
for s in os.getenv('TEST_SKIP', '').split():
for s in os.getenv("TEST_SKIP", "").split():
if s in args.name:
print(f'Skipping {args.name} due to TEST_SKIP', file=sys.stderr)
print(f"Skipping {args.name} due to TEST_SKIP", file=sys.stderr)
exit(77)
keep_journal = os.getenv('TEST_SAVE_JOURNAL', 'fail')
shell = bool(int(os.getenv('TEST_SHELL', '0')))
keep_journal = os.getenv("TEST_SAVE_JOURNAL", "fail")
shell = bool(int(os.getenv("TEST_SHELL", "0")))
if shell and not sys.stderr.isatty():
print(
'--interactive must be passed to meson test to use TEST_SHELL=1',
file=sys.stderr,
)
print(f"--interactive must be passed to meson test to use TEST_SHELL=1", file=sys.stderr)
exit(1)
name = args.name + (f'-{i}' if (i := os.getenv('MESON_TEST_ITERATION')) else '')
name = args.name + (f"-{i}" if (i := os.getenv("MESON_TEST_ITERATION")) else "")
dropin = textwrap.dedent(
"""\
@ -146,14 +84,14 @@ def main() -> None:
if not shell:
dropin += textwrap.dedent(
"""
f"""
[Unit]
SuccessAction=exit
SuccessActionExitStatus=123
"""
)
if os.getenv('TEST_MATCH_SUBTEST'):
if os.getenv("TEST_MATCH_SUBTEST"):
dropin += textwrap.dedent(
f"""
[Service]
@ -161,7 +99,7 @@ def main() -> None:
"""
)
if os.getenv('TEST_MATCH_TESTCASE'):
if os.getenv("TEST_MATCH_TESTCASE"):
dropin += textwrap.dedent(
f"""
[Service]
@ -169,9 +107,7 @@ def main() -> None:
"""
)
journal_file = (args.meson_build_dir / (f'test/journal/{name}.journal')).absolute()
journal_file.unlink(missing_ok=True)
journal_file = None
if not sys.stderr.isatty():
dropin += textwrap.dedent(
"""
@ -179,6 +115,9 @@ def main() -> None:
FailureAction=exit
"""
)
journal_file = (args.meson_build_dir / (f"test/journal/{name}.journal")).absolute()
journal_file.unlink(missing_ok=True)
elif not shell:
dropin += textwrap.dedent(
"""
@ -197,93 +136,87 @@ def main() -> None:
*(['--forward-journal', journal_file] if journal_file else []),
*(
[
'--credential', f'systemd.extra-unit.emergency-exit.service={shlex.quote(EMERGENCY_EXIT_SERVICE)}', # noqa: E501
'--credential', f'systemd.unit-dropin.emergency.target={shlex.quote(EMERGENCY_EXIT_DROPIN)}',
'--credential',
f"systemd.extra-unit.emergency-exit.service={shlex.quote(EMERGENCY_EXIT_SERVICE)}",
'--credential',
f"systemd.unit-dropin.emergency.target={shlex.quote(EMERGENCY_EXIT_DROPIN)}",
]
if not sys.stderr.isatty()
else []
),
'--credential', f'systemd.unit-dropin.{args.unit}={shlex.quote(dropin)}',
'--credential',
f"systemd.unit-dropin.{args.unit}={shlex.quote(dropin)}",
'--runtime-network=none',
'--runtime-scratch=no',
*args.mkosi_args,
'--qemu-firmware',
args.firmware,
*(['--qemu-kvm', 'no'] if int(os.getenv('TEST_NO_KVM', '0')) else []),
'--qemu-firmware', args.firmware,
*(['--qemu-kvm', 'no'] if int(os.getenv("TEST_NO_KVM", "0")) else []),
'--kernel-command-line-extra',
' '.join(
[
'systemd.hostname=H',
f'SYSTEMD_UNIT_PATH=/usr/lib/systemd/tests/testdata/{args.name}.units:/usr/lib/systemd/tests/testdata/units:',
*([f'systemd.unit={args.unit}'] if not shell else []),
'systemd.mask=systemd-networkd-wait-online.service',
*(
[
'systemd.mask=serial-getty@.service',
'systemd.show_status=error',
'systemd.crash_shell=0',
'systemd.crash_action=poweroff',
]
if not sys.stderr.isatty()
else []
),
]
),
' '.join([
'systemd.hostname=H',
f"SYSTEMD_UNIT_PATH=/usr/lib/systemd/tests/testdata/{args.name}.units:/usr/lib/systemd/tests/testdata/units:",
*([f"systemd.unit={args.unit}"] if not shell else []),
'systemd.mask=systemd-networkd-wait-online.service',
*(
[
"systemd.mask=serial-getty@.service",
"systemd.show_status=error",
"systemd.crash_shell=0",
"systemd.crash_action=poweroff",
]
if not sys.stderr.isatty()
else []
),
]),
'--credential', f"journal.storage={'persistent' if sys.stderr.isatty() else args.storage}",
*(['--runtime-build-sources=no'] if not sys.stderr.isatty() else []),
'qemu' if args.vm or os.getuid() != 0 else 'boot',
] # fmt: skip
]
result = subprocess.run(cmd)
# On Debian/Ubuntu we get a lot of random QEMU crashes. Retry once, and then skip if it fails again.
if args.vm and result.returncode == 247 and args.exit_code != 247:
if journal_file:
journal_file.unlink(missing_ok=True)
journal_file.unlink(missing_ok=True)
result = subprocess.run(cmd)
if args.vm and result.returncode == 247 and args.exit_code != 247:
print(
f'Test {args.name} failed due to QEMU crash (error 247), ignoring',
file=sys.stderr,
)
print(f"Test {args.name} failed due to QEMU crash (error 247), ignoring", file=sys.stderr)
exit(77)
coredumps = process_coredumps(args, journal_file)
if keep_journal == '0' or (
keep_journal == 'fail' and result.returncode in (args.exit_code, 77) and not coredumps
):
if journal_file and (keep_journal == "0" or (result.returncode in (args.exit_code, 77) and keep_journal == "fail")):
journal_file.unlink(missing_ok=True)
if shell or (result.returncode in (args.exit_code, 77) and not coredumps):
if shell or result.returncode in (args.exit_code, 77):
exit(0 if shell or result.returncode == args.exit_code else 77)
ops = []
if journal_file:
ops = []
if os.getenv('GITHUB_ACTIONS'):
id = os.environ['GITHUB_RUN_ID']
iteration = os.environ['GITHUB_RUN_ATTEMPT']
j = json.loads(
subprocess.run(
[
args.mkosi,
'--directory', os.fspath(args.meson_source_dir),
'--json',
'summary',
],
stdout=subprocess.PIPE,
text=True,
).stdout
) # fmt: skip
distribution = j['Images'][-1]['Distribution']
release = j['Images'][-1]['Release']
artifact = f'ci-mkosi-{id}-{iteration}-{distribution}-{release}-failed-test-journals'
ops += [f'gh run download {id} --name {artifact} -D ci/{artifact}']
journal_file = Path(f'ci/{artifact}/test/journal/{name}.journal')
if os.getenv("GITHUB_ACTIONS"):
id = os.environ["GITHUB_RUN_ID"]
iteration = os.environ["GITHUB_RUN_ATTEMPT"]
j = json.loads(
subprocess.run(
[
args.mkosi,
"--directory", os.fspath(args.meson_source_dir),
"--json",
"summary",
],
stdout=subprocess.PIPE,
text=True,
).stdout
)
distribution = j["Images"][-1]["Distribution"]
release = j["Images"][-1]["Release"]
artifact = f"ci-mkosi-{id}-{iteration}-{distribution}-{release}-failed-test-journals"
ops += [f"gh run download {id} --name {artifact} -D ci/{artifact}"]
journal_file = Path(f"ci/{artifact}/test/journal/{name}.journal")
ops += [f'journalctl --file {journal_file} --no-hostname -o short-monotonic -u {args.unit} -p info']
ops += [f"journalctl --file {journal_file} --no-hostname -o short-monotonic -u {args.unit} -p info"]
print("Test failed, relevant logs can be viewed with: \n\n" f"{(' && '.join(ops))}\n", file=sys.stderr)
print("Test failed, relevant logs can be viewed with: \n\n"
f"{(' && '.join(ops))}\n", file=sys.stderr)
# 0 also means we failed so translate that to a non-zero exit code to mark the test as failed.
exit(result.returncode or 1)

View File

@ -297,7 +297,6 @@ integration_test_template = {
'qemu-args' : [],
'exit-code' : 123,
'vm' : false,
'coredump-exclude-regex' : '',
}
testdata_subdirs = [
'auxv',
@ -392,7 +391,6 @@ foreach integration_test : integration_tests
'--storage', integration_test['storage'],
'--firmware', integration_test['firmware'],
'--exit-code', integration_test['exit-code'].to_string(),
'--coredump-exclude-regex', integration_test['coredump-exclude-regex'],
]
if 'unit' in integration_test

View File

@ -248,7 +248,6 @@ Bridge=mybridge
[Match]
Name=mybridge
[Network]
IPv6AcceptRA=no
DNS=192.168.250.1
Address=192.168.250.33/24
Gateway=192.168.250.1
@ -541,7 +540,6 @@ MACAddress=12:34:56:78:9a:bc
[Match]
Name=dummy0
[Network]
IPv6AcceptRA=no
Address=192.168.42.100/24
DNS=192.168.42.1
Domains= ~company
@ -575,7 +573,6 @@ MACAddress=12:34:56:78:9a:bc
self.write_network('50-myvpn.network', '''[Match]
Name=dummy0
[Network]
IPv6AcceptRA=no
Address=192.168.42.100/24
DNS=192.168.42.1
Domains= ~company ~.
@ -930,7 +927,6 @@ cat <<EOF >/run/systemd/network/50-test.network
Name={ifr}
[Network]
IPv6AcceptRA=no
Address=192.168.5.1/24
{addr6}
DHCPServer=yes
@ -964,13 +960,10 @@ exec $(systemctl cat systemd-networkd.service | sed -n '/^ExecStart=/ {{ s/^.*=/
# wait until devices got created
for _ in range(50):
if subprocess.run(['ip', 'link', 'show', 'dev', self.if_router],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode == 0:
out = subprocess.check_output(['ip', 'a', 'show', 'dev', self.if_router])
if b'state UP' in out and b'scope global' in out:
break
time.sleep(0.1)
else:
subprocess.call(['ip', 'link', 'show', 'dev', self.if_router])
self.fail('Timed out waiting for {ifr} created.'.format(ifr=self.if_router))
def shutdown_iface(self):
'''Remove test interface and stop DHCP server'''
@ -1010,7 +1003,6 @@ MACAddress=12:34:56:78:9a:bc
[Match]
Name=dummy0
[Network]
IPv6AcceptRA=no
Address=192.168.42.100/24
DNS=192.168.42.1
Domains= one two three four five six seven eight nine ten
@ -1040,7 +1032,6 @@ MACAddress=12:34:56:78:9a:bc
[Match]
Name=dummy0
[Network]
IPv6AcceptRA=no
Address=192.168.42.100/24
DNS=192.168.42.1
''')
@ -1113,12 +1104,7 @@ class MatchClientTest(unittest.TestCase, NetworkdTestingUtilities):
def test_basic_matching(self):
"""Verify the Name= line works throughout this class."""
self.add_veth_pair('test_if1', 'fake_if2')
self.write_network('50-test.network', '''\
[Match]
Name=test_*
[Network]
IPv6AcceptRA=no
''')
self.write_network('50-test.network', "[Match]\nName=test_*\n[Network]")
subprocess.check_call(['systemctl', 'start', 'systemd-networkd'])
self.assert_link_states(test_if1='managed', fake_if2='unmanaged')
@ -1129,13 +1115,11 @@ IPv6AcceptRA=no
mac = '00:01:02:03:98:99'
self.add_veth_pair('test_veth', 'test_peer',
['addr', mac], ['addr', mac])
self.write_network('50-no-veth.network', '''\
self.write_network('50-no-veth.network', """\
[Match]
MACAddress={}
Name=!nonexistent *peer*
[Network]
IPv6AcceptRA=no
'''.format(mac))
[Network]""".format(mac))
subprocess.check_call(['systemctl', 'start', 'systemd-networkd'])
self.assert_link_states(test_veth='managed', test_peer='unmanaged')

View File

@ -1,71 +0,0 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: LGPL-2.1-or-later
# shellcheck disable=SC2317
set -ex
set -o pipefail
# This is a reproducer of issue #35329,
# which is a regression caused by 405be62f05d76f1845f347737b5972158c79dd3e.
IFNAME=udevtestnetif
at_exit() {
set +e
systemctl stop testsleep.service
rm -f /run/udev/udev.conf.d/timeout.conf
rm -f /run/udev/rules.d/99-testsuite.rules
# Forcibly kills sleep command invoked by the udev rule before restarting,
# otherwise systemctl restart below will takes longer.
killall -KILL sleep
systemctl restart systemd-udevd.service
ip link del "$IFNAME"
}
trap at_exit EXIT
udevadm settle
mkdir -p /run/udev/udev.conf.d/
cat >/run/udev/udev.conf.d/timeout.conf <<EOF
event_timeout=1h
EOF
mkdir -p /run/udev/rules.d/
cat >/run/udev/rules.d/99-testsuite.rules <<EOF
SUBSYSTEM=="net", ACTION=="change", KERNEL=="${IFNAME}", OPTIONS="log_level=debug", RUN+="/usr/bin/sleep 1000"
EOF
systemctl restart systemd-udevd.service
ip link add "$IFNAME" type dummy
IFINDEX=$(ip -json link show "$IFNAME" | jq '.[].ifindex')
udevadm wait --timeout 10 "/sys/class/net/${IFNAME}"
# Check if the database file is created.
[[ -e "/run/udev/data/n${IFINDEX}" ]]
systemd-run \
-p After="sys-subsystem-net-devices-${IFNAME}.device" \
-p BindsTo="sys-subsystem-net-devices-${IFNAME}.device" \
-u testsleep.service \
sleep 1h
timeout 10 bash -c 'until systemctl is-active testsleep.service; do sleep .5; done'
udevadm trigger "/sys/class/net/${IFNAME}"
timeout 30 bash -c "until grep -F 'ID_PROCESSING=1' /run/udev/data/n${IFINDEX}; do sleep .5; done"
for _ in {1..3}; do
systemctl daemon-reexec
systemctl is-active testsleep.service
done
for _ in {1..3}; do
systemctl daemon-reload
systemctl is-active testsleep.service
done
# Check if the reexec and reload have finished during processing the event.
grep -F 'ID_PROCESSING=1' "/run/udev/data/n${IFINDEX}"
exit 0

View File

@ -6,14 +6,6 @@ set -o pipefail
# shellcheck source=test/units/test-control.sh
. "$(dirname "$0")"/test-control.sh
if systemd-detect-virt --quiet --container; then
# This comes from the selinux package and tries to write
# some files under sysfs, which will be read-only in a container,
# so mask it. It's not our tmpfiles.d file anyway.
mkdir -p /run/tmpfiles.d/
ln -s /dev/null /run/tmpfiles.d/selinux-policy.conf
fi
run_subtests
touch /testok

View File

@ -5,7 +5,3 @@ set -o pipefail
SYSTEMD_IN_CHROOT=1 systemd-detect-virt --chroot
(! SYSTEMD_IN_CHROOT=0 systemd-detect-virt --chroot)
if ! systemd-detect-virt -c; then
unshare --mount-proc --fork --user --pid systemd-detect-virt --container
fi

View File

@ -13,12 +13,11 @@
d /run/lock 0755 root root -
L /var/lock - - - - ../run/lock
{% if HAVE_SYSV_COMPAT %}
{% if CREATE_LOG_DIRS %}
L$ /var/log/README - - - - ../..{{DOC_DIR}}/README.logs
{% endif %}
{% if HAVE_SYSV_COMPAT %}
# /run/lock/subsys is used for serializing SysV service execution, and
# hence without use on SysV-less systems.
d /run/lock/subsys 0755 root root -