mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-25 12:00:22 +09:00
Merge 5.15.130 into android14-5.15-lts
Changes in 5.15.130 ACPI: thermal: Drop nocrt parameter module: Expose module_init_layout_section() arm64: module-plts: inline linux/moduleloader.h arm64: module: Use module_init_layout_section() to spot init sections ARM: module: Use module_init_layout_section() to spot init sections rcu: Prevent expedited GP from enabling tick on offline CPU rcu-tasks: Fix IPI failure handling in trc_wait_for_one_reader rcu-tasks: Wait for trc_read_check_handler() IPIs rcu-tasks: Add trc_inspect_reader() checks for exiting critical section Linux 5.15.130 Change-Id: Ibf4ab4fbe70e9099f930096f34094fe8d1ec23bb Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -5818,10 +5818,6 @@
|
||||
-1: disable all critical trip points in all thermal zones
|
||||
<degrees C>: override all critical trip points
|
||||
|
||||
thermal.nocrt= [HW,ACPI]
|
||||
Set to disable actions on ACPI thermal zone
|
||||
critical and hot trip points.
|
||||
|
||||
thermal.off= [HW,ACPI]
|
||||
1: disable ACPI thermal control
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 129
|
||||
SUBLEVEL = 130
|
||||
EXTRAVERSION =
|
||||
NAME = Trick or Treat
|
||||
|
||||
|
||||
@@ -256,7 +256,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
/* sort by type and symbol index */
|
||||
sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
|
||||
|
||||
if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
|
||||
if (!module_init_layout_section(secstrings + dstsec->sh_name))
|
||||
core_plts += count_plts(syms, dstsec->sh_addr, rels,
|
||||
numrels, s->sh_info);
|
||||
else
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
|
||||
@@ -342,7 +343,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
if (nents)
|
||||
sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
|
||||
|
||||
if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
|
||||
if (!module_init_layout_section(secstrings + dstsec->sh_name))
|
||||
core_plts += count_plts(syms, rels, numrels,
|
||||
sechdrs[i].sh_info, dstsec);
|
||||
else
|
||||
|
||||
@@ -59,10 +59,6 @@ static int tzp;
|
||||
module_param(tzp, int, 0444);
|
||||
MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.");
|
||||
|
||||
static int nocrt;
|
||||
module_param(nocrt, int, 0);
|
||||
MODULE_PARM_DESC(nocrt, "Set to take no action upon ACPI thermal zone critical trips points.");
|
||||
|
||||
static int off;
|
||||
module_param(off, int, 0);
|
||||
MODULE_PARM_DESC(off, "Set to disable ACPI thermal support.");
|
||||
@@ -1132,7 +1128,7 @@ static int thermal_nocrt(const struct dmi_system_id *d) {
|
||||
|
||||
pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
|
||||
d->ident);
|
||||
nocrt = 1;
|
||||
crt = -1;
|
||||
return 0;
|
||||
}
|
||||
static int thermal_tzp(const struct dmi_system_id *d) {
|
||||
|
||||
@@ -39,6 +39,11 @@ bool module_init_section(const char *name);
|
||||
*/
|
||||
bool module_exit_section(const char *name);
|
||||
|
||||
/* Describes whether within_module_init() will consider this an init section
|
||||
* or not. This behaviour changes with CONFIG_MODULE_UNLOAD.
|
||||
*/
|
||||
bool module_init_layout_section(const char *sname);
|
||||
|
||||
/*
|
||||
* Apply the given relocation to the (simplified) ELF. Return -error
|
||||
* or 0.
|
||||
|
||||
@@ -2450,7 +2450,7 @@ static long get_offset(struct module *mod, unsigned int *size,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool module_init_layout_section(const char *sname)
|
||||
bool module_init_layout_section(const char *sname)
|
||||
{
|
||||
#ifndef CONFIG_MODULE_UNLOAD
|
||||
if (module_exit_section(sname))
|
||||
|
||||
@@ -951,7 +951,7 @@ reset_ipi:
|
||||
static bool trc_inspect_reader(struct task_struct *t, void *arg)
|
||||
{
|
||||
int cpu = task_cpu(t);
|
||||
bool in_qs = false;
|
||||
int nesting;
|
||||
bool ofl = cpu_is_offline(cpu);
|
||||
|
||||
if (task_curr(t)) {
|
||||
@@ -971,18 +971,18 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
|
||||
n_heavy_reader_updates++;
|
||||
if (ofl)
|
||||
n_heavy_reader_ofl_updates++;
|
||||
in_qs = true;
|
||||
nesting = 0;
|
||||
} else {
|
||||
// The task is not running, so C-language access is safe.
|
||||
in_qs = likely(!t->trc_reader_nesting);
|
||||
nesting = t->trc_reader_nesting;
|
||||
}
|
||||
|
||||
// Mark as checked so that the grace-period kthread will
|
||||
// remove it from the holdout list.
|
||||
t->trc_reader_checked = true;
|
||||
|
||||
if (in_qs)
|
||||
return true; // Already in quiescent state, done!!!
|
||||
// If not exiting a read-side critical section, mark as checked
|
||||
// so that the grace-period kthread will remove it from the
|
||||
// holdout list.
|
||||
t->trc_reader_checked = nesting >= 0;
|
||||
if (nesting <= 0)
|
||||
return !nesting; // If in QS, done, otherwise try again later.
|
||||
|
||||
// The task is in a read-side critical section, so set up its
|
||||
// state so that it will awaken the grace-period kthread upon exit
|
||||
@@ -1041,9 +1041,11 @@ static void trc_wait_for_one_reader(struct task_struct *t,
|
||||
if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
|
||||
// Just in case there is some other reason for
|
||||
// failure than the target CPU being offline.
|
||||
WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
|
||||
__func__, cpu);
|
||||
rcu_tasks_trace.n_ipis_fails++;
|
||||
per_cpu(trc_ipi_to_cpu, cpu) = false;
|
||||
t->trc_ipi_to_cpu = cpu;
|
||||
t->trc_ipi_to_cpu = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1164,14 +1166,28 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
|
||||
}
|
||||
}
|
||||
|
||||
static void rcu_tasks_trace_empty_fn(void *unused)
|
||||
{
|
||||
}
|
||||
|
||||
/* Wait for grace period to complete and provide ordering. */
|
||||
static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
|
||||
{
|
||||
int cpu;
|
||||
bool firstreport;
|
||||
struct task_struct *g, *t;
|
||||
LIST_HEAD(holdouts);
|
||||
long ret;
|
||||
|
||||
// Wait for any lingering IPI handlers to complete. Note that
|
||||
// if a CPU has gone offline or transitioned to userspace in the
|
||||
// meantime, all IPI handlers should have been drained beforehand.
|
||||
// Yes, this assumes that CPUs process IPIs in order. If that ever
|
||||
// changes, there will need to be a recheck and/or timed wait.
|
||||
for_each_online_cpu(cpu)
|
||||
if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))
|
||||
smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
|
||||
|
||||
// Remove the safety count.
|
||||
smp_mb__before_atomic(); // Order vs. earlier atomics
|
||||
atomic_dec(&trc_n_readers_need_end);
|
||||
|
||||
@@ -604,7 +604,10 @@ static void synchronize_rcu_expedited_wait(void)
|
||||
if (rdp->rcu_forced_tick_exp)
|
||||
continue;
|
||||
rdp->rcu_forced_tick_exp = true;
|
||||
tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
|
||||
preempt_disable();
|
||||
if (cpu_online(cpu))
|
||||
tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
j = READ_ONCE(jiffies_till_first_fqs);
|
||||
|
||||
Reference in New Issue
Block a user