mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-31 18:23:00 +09:00
PD#SWPL-17246 Problem: sync the code from mainline. Solution: sync the code from mainline. 7c03859983c2 OSS vulnerability found in [boot.img]:[linux_kernel] (CVE-2018-12232) Risk:[] [1/1] ba89a3d9c791 OSS vulnerability found in [boot.img]:[linux_kernel] (CVE-2019-8912) Risk:[] [1/1] c434d0530610 Android Security Bulletin - November 2019-11 - Kernel components binder driver - CVE-2019-2214 [1/1] ff8d9012fbd4 Android Security Bulletin - November 2019-11 - Kernel components ext4 filesystem - CVE-2019-11833 [1/1] 3c52e964495e cec: store msg after bootup from st [1/2] 94198a56ee10 lcd: support tcon vac and demura data [2/2] 1add1a008a03 vout: spi: porting lcd driver and SPI to Linux [1/1] 3e8d7b0e5f97 hdmirx: add hpd recovery logic when input clk is unstable [1/1] f92e7ba21c62 ppmgr: Add 10bit, dolby and HDR video rotation. [1/1] dab2cc37cd95 dvb: fix dmx2 interrupt bug [1/1] 9d31efae4a55 dv: add dv target output mode [1/1] e86eb9d1b5c5 hdmirx: add rx phy tdr enable control [1/1] 8ea66f645bf6 dts: enable spi for gva [1/1] baf6e74528ef drm: add drm support for tm2 [1/1] Verify: verify by newton Change-Id: I9415060a4b39895b5d624117271a72fc6a1fd187 Signed-off-by: Luan Yuan <luan.yuan@amlogic.com>
332 lines
7.8 KiB
C
332 lines
7.8 KiB
C
/*
|
|
* drivers/amlogic/cpu_hotplug/cpu_hotplug.c
|
|
*
|
|
* Copyright (C) 2017 Amlogic, Inc. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/cpufreq.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/ktime.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/err.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/sched/rt.h>
|
|
#include <linux/notifier.h>
|
|
#include "linux/amlogic/cpu_hotplug.h"
|
|
#include <linux/delay.h>
|
|
|
|
#define MAX_CLUSTRS 2
|
|
struct cpu_hotplug_s {
|
|
int clusters;
|
|
unsigned int flgs[MAX_CLUSTRS];
|
|
unsigned int max_num[MAX_CLUSTRS];
|
|
unsigned int gov_num[MAX_CLUSTRS];
|
|
cpumask_t cpumask[MAX_CLUSTRS];
|
|
unsigned int cpunum[MAX_CLUSTRS];
|
|
unsigned int min_num[MAX_CLUSTRS];
|
|
struct task_struct *hotplug_thread;
|
|
struct task_struct *null_thread[MAX_CLUSTRS];
|
|
struct mutex mutex;
|
|
};
|
|
|
|
static struct cpu_hotplug_s hpg;
|
|
|
|
int cpu_hotplug_cpumask_init(void)
|
|
{
|
|
int cpu, clstr;
|
|
|
|
hpg.clusters = 0;
|
|
for (clstr = 0; clstr < MAX_CLUSTRS; clstr++) {
|
|
hpg.cpunum[clstr] = 0;
|
|
cpumask_clear(&hpg.cpumask[clstr]);
|
|
}
|
|
|
|
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
|
|
clstr = topology_physical_package_id(cpu);
|
|
if (clstr < 0)
|
|
continue;
|
|
clstr &= (MAX_CLUSTRS - 1);
|
|
cpumask_set_cpu(cpu, &hpg.cpumask[clstr]);
|
|
if (hpg.clusters < clstr + 1)
|
|
hpg.clusters = clstr + 1;
|
|
hpg.cpunum[clstr]++;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
unsigned int cpu_hotplug_get_max(unsigned int clustr)
|
|
{
|
|
if (clustr >= hpg.clusters)
|
|
return 0;
|
|
return hpg.max_num[clustr];
|
|
}
|
|
unsigned int cpu_hotplug_get_min(int clustr)
|
|
{
|
|
return hpg.min_num[clustr];
|
|
}
|
|
|
|
int cpu_num_online(int clustr)
|
|
{
|
|
cpumask_t mask;
|
|
|
|
cpumask_and(&mask, &hpg.cpumask[clustr], cpu_online_mask);
|
|
return cpumask_weight(&mask);
|
|
}
|
|
|
|
void cpu_hotplug_set_max(unsigned int num, int clustr)
|
|
{
|
|
unsigned int cpu_online;
|
|
|
|
if (clustr > hpg.clusters) {
|
|
dev_err(NULL, " %s <:%d %d>\n", __func__, num, clustr);
|
|
return;
|
|
}
|
|
|
|
if (!mutex_trylock(&hpg.mutex))
|
|
return;
|
|
if (num > hpg.cpunum[clustr])
|
|
num = hpg.cpunum[clustr];
|
|
if (hpg.max_num[clustr] == num) {
|
|
mutex_unlock(&hpg.mutex);
|
|
return;
|
|
}
|
|
|
|
cpu_online = cpu_num_online(clustr);
|
|
hpg.max_num[clustr] = num;
|
|
if (num < cpu_online) {
|
|
hpg.flgs[clustr] = CPU_HOTPLUG_UNPLUG;
|
|
if (hpg.hotplug_thread)
|
|
wake_up_process(hpg.hotplug_thread);
|
|
} else if (num > cpu_online) {
|
|
if (cpu_online < hpg.gov_num[clustr]) {
|
|
hpg.flgs[clustr] = CPU_HOTPLUG_PLUG;
|
|
if (hpg.hotplug_thread)
|
|
wake_up_process(hpg.hotplug_thread);
|
|
}
|
|
}
|
|
mutex_unlock(&hpg.mutex);
|
|
}
|
|
|
|
void cpufreq_set_max_cpu_num(unsigned int n, unsigned int c)
|
|
{
|
|
cpu_hotplug_set_max(n, c);
|
|
}
|
|
|
|
int cpu_hotplug_gov(int clustr, int num, int flg, cpumask_t *mask)
|
|
{
|
|
if (!num || clustr > hpg.clusters
|
|
|| hpg.gov_num[clustr] == num) {
|
|
return -1;
|
|
}
|
|
mutex_lock(&hpg.mutex);
|
|
if (num > hpg.cpunum[clustr])
|
|
num = hpg.cpunum[clustr];
|
|
hpg.gov_num[clustr] = num;
|
|
hpg.flgs[clustr] = flg;
|
|
if (hpg.hotplug_thread)
|
|
wake_up_process(hpg.hotplug_thread);
|
|
mutex_unlock(&hpg.mutex);
|
|
return 0;
|
|
}
|
|
|
|
static int __ref cpu_hotplug_thread(void *data)
|
|
{
|
|
unsigned int clustr, cpu, flg, online;
|
|
int target, cnt;
|
|
unsigned long flags;
|
|
|
|
while (1) {
|
|
if (kthread_should_stop())
|
|
break;
|
|
if (!mutex_trylock(&hpg.mutex)) {
|
|
usleep_range(1000, 2000);
|
|
continue;
|
|
}
|
|
for (clustr = 0; clustr < hpg.clusters; clustr++) {
|
|
if (!hpg.flgs[clustr])
|
|
continue;
|
|
flg = hpg.flgs[clustr];
|
|
hpg.flgs[clustr] = 0;
|
|
if (flg == CPU_HOTPLUG_PLUG) {
|
|
for_each_cpu(cpu, &hpg.cpumask[clustr]) {
|
|
if (cpu_online(cpu))
|
|
continue;
|
|
online = cpu_num_online(clustr);
|
|
if (online >= hpg.gov_num[clustr] ||
|
|
online >= hpg.max_num[clustr])
|
|
break;
|
|
device_online(get_cpu_device(cpu));
|
|
cpumask_set_cpu(cpu,
|
|
&hpg.null_thread[clustr]->cpus_allowed);
|
|
}
|
|
} else if (flg == CPU_HOTPLUG_UNPLUG) {
|
|
cnt = 0;
|
|
while ((online = cpu_num_online(clustr)) > 0) {
|
|
if (online <= hpg.gov_num[clustr] &&
|
|
online <= hpg.max_num[clustr])
|
|
break;
|
|
if (cnt++ > 20)
|
|
break;
|
|
raw_spin_lock_irqsave(
|
|
&hpg.null_thread[clustr]->pi_lock,
|
|
flags);
|
|
target = cpumask_next(
|
|
cpumask_first(&hpg.cpumask[clustr]),
|
|
&hpg.cpumask[clustr]);
|
|
target = select_cpu_for_hotplug(
|
|
hpg.null_thread[clustr],
|
|
target, SD_BALANCE_WAKE, 0);
|
|
raw_spin_unlock_irqrestore(
|
|
&hpg.null_thread[clustr]->pi_lock,
|
|
flags);
|
|
if (!cpumask_test_cpu(target,
|
|
&hpg.cpumask[clustr])) {
|
|
goto clear_cpu;
|
|
}
|
|
if (!cpu_online(target) ||
|
|
(cpumask_first(hpg.cpumask) == target &&
|
|
clustr == 0))
|
|
goto clear_cpu;
|
|
device_offline(get_cpu_device(target));
|
|
clear_cpu:
|
|
cpumask_clear_cpu(target,
|
|
&hpg.null_thread[clustr]->cpus_allowed);
|
|
}
|
|
}
|
|
}
|
|
mutex_unlock(&hpg.mutex);
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
schedule();
|
|
set_current_state(TASK_RUNNING);
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
static int do_null_task(void *data)
|
|
{
|
|
while (1) {
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
schedule();
|
|
set_current_state(TASK_RUNNING);
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
static ssize_t show_hotplug_max_cpus(struct kobject *kobj,
|
|
struct attribute *attr, char *buf)
|
|
{
|
|
unsigned int max = 0;
|
|
unsigned int c = 0;
|
|
|
|
for (c = 0; c < hpg.clusters; c++)
|
|
max |= cpu_hotplug_get_max(c) << (c * 8);
|
|
return sprintf(buf, "%u\n", max);
|
|
}
|
|
|
|
static ssize_t store_hotplug_max_cpus(struct kobject *kobj,
|
|
struct attribute *attr, const char *buf, size_t count)
|
|
{
|
|
unsigned int input;
|
|
unsigned int max;
|
|
unsigned int c = 0;
|
|
int ret;
|
|
|
|
ret = kstrtouint(buf, 0, &input);
|
|
|
|
for (c = 0; c < hpg.clusters; c++) {
|
|
max = input & 0xff;
|
|
cpu_hotplug_set_max(max, c);
|
|
input = input >> 8;
|
|
}
|
|
return count;
|
|
}
|
|
define_one_global_rw(hotplug_max_cpus);
|
|
|
|
|
|
static int __init cpu_hotplug_init(void)
|
|
{
|
|
int clstr;
|
|
int err;
|
|
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
|
|
|
mutex_init(&hpg.mutex);
|
|
mutex_lock(&hpg.mutex);
|
|
cpu_hotplug_cpumask_init();
|
|
for (clstr = 0; clstr < hpg.clusters; clstr++) {
|
|
hpg.null_thread[clstr] = kthread_create(do_null_task,
|
|
NULL, "null");
|
|
if (!hpg.null_thread[clstr]) {
|
|
hpg.null_thread[clstr] = NULL;
|
|
return -1;
|
|
}
|
|
cpumask_copy(&hpg.null_thread[clstr]->cpus_allowed,
|
|
&hpg.cpumask[clstr]);
|
|
cpumask_clear_cpu(cpumask_first(&hpg.cpumask[clstr]),
|
|
&hpg.null_thread[clstr]->cpus_allowed);
|
|
|
|
wake_up_process(hpg.null_thread[clstr]);
|
|
hpg.max_num[clstr] = hpg.cpunum[clstr];
|
|
hpg.gov_num[clstr] = hpg.cpunum[clstr];
|
|
hpg.flgs[clstr] = CPU_HOTPLUG_NONE;
|
|
hpg.min_num[clstr] = 1;
|
|
}
|
|
|
|
|
|
hpg.hotplug_thread = kthread_create(cpu_hotplug_thread,
|
|
NULL, "cpu_hogplug_thread");
|
|
if (IS_ERR(hpg.hotplug_thread)) {
|
|
err = PTR_ERR(hpg.hotplug_thread);
|
|
hpg.hotplug_thread = NULL;
|
|
mutex_unlock(&hpg.mutex);
|
|
return err;
|
|
}
|
|
|
|
sched_setscheduler_nocheck(hpg.hotplug_thread, SCHED_FIFO, ¶m);
|
|
get_task_struct(hpg.hotplug_thread);
|
|
mutex_unlock(&hpg.mutex);
|
|
wake_up_process(hpg.hotplug_thread);
|
|
|
|
err = sysfs_create_file(&cpu_subsys.dev_root->kobj,
|
|
&hotplug_max_cpus.attr);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void __exit cpu_hotplug_exit(void)
|
|
{
|
|
unsigned int c = 0;
|
|
|
|
for (c = 0; c < hpg.clusters; c++)
|
|
kthread_stop(hpg.null_thread[c]);
|
|
kthread_stop(hpg.hotplug_thread);
|
|
put_task_struct(hpg.hotplug_thread);
|
|
}
|
|
|
|
MODULE_DESCRIPTION("amlogic cpu hotplug");
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
module_init(cpu_hotplug_init);
|
|
module_exit(cpu_hotplug_exit);
|
|
|