mirror of
https://github.com/systemd/systemd
synced 2025-11-14 14:24:45 +01:00
Compare commits
8 Commits
7547c3e06a
...
f8293452b6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8293452b6 | ||
|
|
61fffbfa58 | ||
|
|
89e74275ad | ||
|
|
2db3e403ae | ||
|
|
f0fdb69a8d | ||
|
|
6d510012b7 | ||
|
|
9d6c34a297 | ||
|
|
057fe0a6ca |
8
NEWS
8
NEWS
@ -2,6 +2,14 @@ systemd System and Service Manager
|
|||||||
|
|
||||||
CHANGES WITH 259 in spe:
|
CHANGES WITH 259 in spe:
|
||||||
|
|
||||||
|
Announcements of Future Feature Removals and Incompatible Changes:
|
||||||
|
|
||||||
|
* The parsing of RootImageOptions= and the mount image parameters of
|
||||||
|
ExtensionImages= and MountImages= will be changed so that the last
|
||||||
|
duplicated definition for a given partition wins and is applied,
|
||||||
|
rather than the first, to keep these options coherent with other
|
||||||
|
unit settings.
|
||||||
|
|
||||||
* The cgroup2 file system is now mounted with the
|
* The cgroup2 file system is now mounted with the
|
||||||
"memory_hugetlb_accounting" mount option, supported since kernel 6.6.
|
"memory_hugetlb_accounting" mount option, supported since kernel 6.6.
|
||||||
This means that HugeTLB memory usage is now counted towards the
|
This means that HugeTLB memory usage is now counted towards the
|
||||||
|
|||||||
19
meson.build
19
meson.build
@ -1534,10 +1534,19 @@ conf.set('DEFAULT_DNSSEC_MODE',
|
|||||||
'DNSSEC_' + default_dnssec.underscorify().to_upper())
|
'DNSSEC_' + default_dnssec.underscorify().to_upper())
|
||||||
conf.set_quoted('DEFAULT_DNSSEC_MODE_STR', default_dnssec)
|
conf.set_quoted('DEFAULT_DNSSEC_MODE_STR', default_dnssec)
|
||||||
|
|
||||||
|
have = get_option('importd').require(
|
||||||
|
conf.get('HAVE_LIBCURL') == 1 and
|
||||||
|
conf.get('HAVE_OPENSSL') == 1 and
|
||||||
|
conf.get('HAVE_ZLIB') == 1 and
|
||||||
|
conf.get('HAVE_XZ') == 1,
|
||||||
|
error_message : 'curl, openssl, zlib and xz required').allowed()
|
||||||
|
conf.set10('ENABLE_IMPORTD', have)
|
||||||
|
|
||||||
have = get_option('sysupdate').require(
|
have = get_option('sysupdate').require(
|
||||||
|
conf.get('ENABLE_IMPORTD') == 1 and
|
||||||
conf.get('HAVE_OPENSSL') == 1 and
|
conf.get('HAVE_OPENSSL') == 1 and
|
||||||
conf.get('HAVE_LIBFDISK') == 1,
|
conf.get('HAVE_LIBFDISK') == 1,
|
||||||
error_message : 'fdisk and openssl required').allowed()
|
error_message : 'systemd-importd, fdisk, and openssl required').allowed()
|
||||||
conf.set10('ENABLE_SYSUPDATE', have)
|
conf.set10('ENABLE_SYSUPDATE', have)
|
||||||
|
|
||||||
have2 = get_option('sysupdated')
|
have2 = get_option('sysupdated')
|
||||||
@ -1556,14 +1565,6 @@ conf.set10('ENABLE_SYSUPDATED', have2)
|
|||||||
|
|
||||||
conf.set10('ENABLE_STORAGETM', get_option('storagetm'))
|
conf.set10('ENABLE_STORAGETM', get_option('storagetm'))
|
||||||
|
|
||||||
have = get_option('importd').require(
|
|
||||||
conf.get('HAVE_LIBCURL') == 1 and
|
|
||||||
conf.get('HAVE_OPENSSL') == 1 and
|
|
||||||
conf.get('HAVE_ZLIB') == 1 and
|
|
||||||
conf.get('HAVE_XZ') == 1,
|
|
||||||
error_message : 'curl, openssl/grypt, zlib and xz required').allowed()
|
|
||||||
conf.set10('ENABLE_IMPORTD', have)
|
|
||||||
|
|
||||||
have = get_option('homed').require(
|
have = get_option('homed').require(
|
||||||
conf.get('HAVE_OPENSSL') == 1 and
|
conf.get('HAVE_OPENSSL') == 1 and
|
||||||
conf.get('HAVE_LIBFDISK') == 1 and
|
conf.get('HAVE_LIBFDISK') == 1 and
|
||||||
|
|||||||
@ -187,6 +187,18 @@ static int connect_journal_socket(
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool exec_output_forward_to_console(ExecOutput o) {
|
||||||
|
return IN_SET(o,
|
||||||
|
EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
|
||||||
|
EXEC_OUTPUT_KMSG_AND_CONSOLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool exec_output_forward_to_kmsg(ExecOutput o) {
|
||||||
|
return IN_SET(o,
|
||||||
|
EXEC_OUTPUT_KMSG,
|
||||||
|
EXEC_OUTPUT_KMSG_AND_CONSOLE);
|
||||||
|
}
|
||||||
|
|
||||||
static int connect_logger_as(
|
static int connect_logger_as(
|
||||||
const ExecContext *context,
|
const ExecContext *context,
|
||||||
const ExecParameters *params,
|
const ExecParameters *params,
|
||||||
@ -231,8 +243,8 @@ static int connect_logger_as(
|
|||||||
context->syslog_priority,
|
context->syslog_priority,
|
||||||
!!context->syslog_level_prefix,
|
!!context->syslog_level_prefix,
|
||||||
false,
|
false,
|
||||||
exec_output_is_kmsg(output),
|
exec_output_forward_to_kmsg(output),
|
||||||
exec_output_is_terminal(output)) < 0)
|
exec_output_forward_to_console(output)) < 0)
|
||||||
return -errno;
|
return -errno;
|
||||||
|
|
||||||
return move_fd(TAKE_FD(fd), nfd, false);
|
return move_fd(TAKE_FD(fd), nfd, false);
|
||||||
@ -1237,7 +1249,10 @@ static int exec_context_get_tty_for_pam(const ExecContext *context, char **ret)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!IN_SET(context->std_input, EXEC_INPUT_TTY, EXEC_INPUT_TTY_FAIL, EXEC_INPUT_TTY_FORCE)) {
|
/* Do not implicitly configure TTY unless TTYPath= or StandardInput=tty is specified. See issue
|
||||||
|
* #39334. Note, exec_context_tty_path() returns "/dev/console" when TTYPath= is unspecified, hence
|
||||||
|
* explicitly check context->tty_path here. */
|
||||||
|
if (!context->tty_path && !exec_input_is_terminal(context->std_input)) {
|
||||||
*ret = NULL;
|
*ret = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -470,19 +470,6 @@ static inline bool exec_input_is_terminal(ExecInput i) {
|
|||||||
EXEC_INPUT_TTY_FAIL);
|
EXEC_INPUT_TTY_FAIL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool exec_output_is_terminal(ExecOutput o) {
|
|
||||||
return IN_SET(o,
|
|
||||||
EXEC_OUTPUT_TTY,
|
|
||||||
EXEC_OUTPUT_KMSG_AND_CONSOLE,
|
|
||||||
EXEC_OUTPUT_JOURNAL_AND_CONSOLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool exec_output_is_kmsg(ExecOutput o) {
|
|
||||||
return IN_SET(o,
|
|
||||||
EXEC_OUTPUT_KMSG,
|
|
||||||
EXEC_OUTPUT_KMSG_AND_CONSOLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool exec_context_has_tty(const ExecContext *context) {
|
static inline bool exec_context_has_tty(const ExecContext *context) {
|
||||||
assert(context);
|
assert(context);
|
||||||
|
|
||||||
|
|||||||
@ -509,7 +509,7 @@ static SD_VARLINK_DEFINE_STRUCT_TYPE(
|
|||||||
SD_VARLINK_DEFINE_FIELD(Model, SD_VARLINK_STRING, SD_VARLINK_NULLABLE),
|
SD_VARLINK_DEFINE_FIELD(Model, SD_VARLINK_STRING, SD_VARLINK_NULLABLE),
|
||||||
SD_VARLINK_FIELD_COMMENT("DNS servers configured for this interface"),
|
SD_VARLINK_FIELD_COMMENT("DNS servers configured for this interface"),
|
||||||
SD_VARLINK_DEFINE_FIELD_BY_TYPE(DNS, DNS, SD_VARLINK_ARRAY|SD_VARLINK_NULLABLE),
|
SD_VARLINK_DEFINE_FIELD_BY_TYPE(DNS, DNS, SD_VARLINK_ARRAY|SD_VARLINK_NULLABLE),
|
||||||
/* FIXME: DNR Addresses fied (json array or arrays) is incompatible with Varlink type system */
|
/* FIXME: DNR Addresses field (json array or arrays) is incompatible with Varlink type system */
|
||||||
SD_VARLINK_FIELD_COMMENT("Discovery of Network-designated Resolvers (RFC9463)"),
|
SD_VARLINK_FIELD_COMMENT("Discovery of Network-designated Resolvers (RFC9463)"),
|
||||||
SD_VARLINK_DEFINE_FIELD(DNR, SD_VARLINK_OBJECT, SD_VARLINK_ARRAY|SD_VARLINK_NULLABLE),
|
SD_VARLINK_DEFINE_FIELD(DNR, SD_VARLINK_OBJECT, SD_VARLINK_ARRAY|SD_VARLINK_NULLABLE),
|
||||||
SD_VARLINK_FIELD_COMMENT("NTP servers configured for this interface"),
|
SD_VARLINK_FIELD_COMMENT("NTP servers configured for this interface"),
|
||||||
|
|||||||
@ -18,6 +18,7 @@ TMP_DIR=$(mktemp -d)
|
|||||||
# service stdout will not contain _SYSTEMD_INVOCATION_ID field.
|
# service stdout will not contain _SYSTEMD_INVOCATION_ID field.
|
||||||
SAVED_LOG_LEVEL=$(systemctl log-level)
|
SAVED_LOG_LEVEL=$(systemctl log-level)
|
||||||
systemctl log-level info
|
systemctl log-level info
|
||||||
|
journalctl --rotate
|
||||||
|
|
||||||
# Note, if the service exits extremely fast, journald cannot find the source of the
|
# Note, if the service exits extremely fast, journald cannot find the source of the
|
||||||
# stream. Hence, we need to call 'journalctl --sync' before service exits.
|
# stream. Hence, we need to call 'journalctl --sync' before service exits.
|
||||||
|
|||||||
@ -75,16 +75,16 @@ check_elapse_timestamp() {
|
|||||||
systemctl restart "$UNIT_NAME.timer"
|
systemctl restart "$UNIT_NAME.timer"
|
||||||
check_elapse_timestamp
|
check_elapse_timestamp
|
||||||
|
|
||||||
# Bump the system date to 1 minute after the original calendar timer would've expired (without any random
|
# Bump the system date to exactly the original calendar timer time (without any random delay!) - systemd
|
||||||
# delay!) - systemd should recalculate the next elapse timestamp with a new randomized delay, but it should
|
# should recalculate the next elapse timestamp with a new randomized delay, but it should use the original
|
||||||
# use the original inactive exit timestamp as a "base", so the final timestamp should not end up beyond the
|
# inactive exit timestamp as a "base", so the final timestamp should not end up beyond the original calendar
|
||||||
# original calendar timestamp + randomized delay range.
|
# timestamp + randomized delay range.
|
||||||
#
|
#
|
||||||
# Similarly, do the same check after doing daemon-reload, as that also forces systemd to recalculate the next
|
# Similarly, do the same check after doing daemon-reload, as that also forces systemd to recalculate the next
|
||||||
# elapse timestamp (this goes through a slightly different codepath that actually contained the original
|
# elapse timestamp (this goes through a slightly different codepath that actually contained the original
|
||||||
# issue).
|
# issue).
|
||||||
: "Next elapse timestamp after time jump"
|
: "Next elapse timestamp after time jump"
|
||||||
date -s "tomorrow 00:11"
|
date -s "tomorrow 00:10"
|
||||||
check_elapse_timestamp
|
check_elapse_timestamp
|
||||||
|
|
||||||
: "Next elapse timestamp after daemon-reload"
|
: "Next elapse timestamp after daemon-reload"
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user