mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-24 19:40:21 +09:00
Merge tag 'v3.8.13.25' of git://kernel.ubuntu.com/ubuntu/linux into odroid-3.8.y
v3.8.13.25
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 8
|
||||
SUBLEVEL = 13
|
||||
EXTRAVERSION = .24
|
||||
EXTRAVERSION = .25
|
||||
NAME = Remoralised Urchins Update
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
ipu: ipu@18000000 {
|
||||
#crtc-cells = <1>;
|
||||
compatible = "fsl,imx53-ipu";
|
||||
reg = <0x18000000 0x080000000>;
|
||||
reg = <0x18000000 0x08000000>;
|
||||
interrupts = <11 10>;
|
||||
};
|
||||
|
||||
|
||||
@@ -164,8 +164,9 @@ extern int __put_user_8(void *, unsigned long long);
|
||||
#define __put_user_check(x,p) \
|
||||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
const typeof(*(p)) __user *__tmp_p = (p); \
|
||||
register const typeof(*(p)) __r2 asm("r2") = (x); \
|
||||
register const typeof(*(p)) __user *__p asm("r0") = (p);\
|
||||
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
switch (sizeof(*(__p))) { \
|
||||
|
||||
@@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
|
||||
|
||||
pdev = platform_device_alloc("mx3-camera", 0);
|
||||
if (!pdev)
|
||||
goto err;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
|
||||
if (!pdev->dev.dma_mask)
|
||||
|
||||
@@ -418,7 +418,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
|
||||
.clkdm_name = "dpll4_clkdm",
|
||||
};
|
||||
|
||||
DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops);
|
||||
DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
|
||||
dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
|
||||
|
||||
static struct clk dpll4_m5x2_ck_3630 = {
|
||||
.name = "dpll4_m5x2_ck",
|
||||
|
||||
@@ -79,7 +79,9 @@ CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
|
||||
|
||||
CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
|
||||
|
||||
KBUILD_CPPFLAGS += -Iarch/$(ARCH)
|
||||
asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
|
||||
|
||||
KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
|
||||
KBUILD_AFLAGS += -Iarch/$(ARCH)
|
||||
KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
|
||||
CPP = $(CC) -E $(KBUILD_CFLAGS)
|
||||
|
||||
@@ -309,11 +309,16 @@ n:
|
||||
* ld rY,ADDROFF(name)(rX)
|
||||
*/
|
||||
#ifdef __powerpc64__
|
||||
#ifdef HAVE_AS_ATHIGH
|
||||
#define __AS_ATHIGH high
|
||||
#else
|
||||
#define __AS_ATHIGH h
|
||||
#endif
|
||||
#define LOAD_REG_IMMEDIATE(reg,expr) \
|
||||
lis reg,(expr)@highest; \
|
||||
ori reg,reg,(expr)@higher; \
|
||||
rldicr reg,reg,32,31; \
|
||||
oris reg,reg,(expr)@h; \
|
||||
oris reg,reg,(expr)@__AS_ATHIGH; \
|
||||
ori reg,reg,(expr)@l;
|
||||
|
||||
#define LOAD_REG_ADDR(reg,name) \
|
||||
|
||||
@@ -51,6 +51,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
|
||||
static inline int huge_pte_none(pte_t pte)
|
||||
|
||||
@@ -20,6 +20,8 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
int sysctl_ldt16 = 0;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void flush_ldt(void *current_mm)
|
||||
{
|
||||
@@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
||||
* IRET leaking the high bits of the kernel stack address.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
if (!ldt_info.seg_32bit) {
|
||||
if (!ldt_info.seg_32bit && !sysctl_ldt16) {
|
||||
error = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ enum {
|
||||
#ifdef CONFIG_X86_64
|
||||
#define vdso_enabled sysctl_vsyscall32
|
||||
#define arch_setup_additional_pages syscall32_setup_pages
|
||||
extern int sysctl_ldt16;
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -380,6 +381,13 @@ static ctl_table abi_table2[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{
|
||||
.procname = "ldt16",
|
||||
.data = &sysctl_ldt16,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
@@ -397,6 +397,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* Without this this EEEpc exports a non working WMI interface, with
|
||||
* this it exports a working "good old" eeepc_laptop interface, fixing
|
||||
* both brightness control, and rfkill not working.
|
||||
*/
|
||||
{
|
||||
.callback = dmi_enable_osi_linux,
|
||||
.ident = "Asus EEE PC 1015PX",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
@@ -16,9 +16,13 @@
|
||||
char *tmp; \
|
||||
\
|
||||
tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
|
||||
sprintf(tmp, format, param); \
|
||||
strcat(str, tmp); \
|
||||
kfree(tmp); \
|
||||
if (likely(tmp)) { \
|
||||
sprintf(tmp, format, param); \
|
||||
strcat(str, tmp); \
|
||||
kfree(tmp); \
|
||||
} else { \
|
||||
strcat(str, "kmalloc failure in SPRINTFCAT"); \
|
||||
} \
|
||||
}
|
||||
|
||||
static void report_jump_idx(u32 status, char *outstr)
|
||||
|
||||
@@ -224,12 +224,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
|
||||
|
||||
static void mv_chan_activate(struct mv_xor_chan *chan)
|
||||
{
|
||||
u32 activation;
|
||||
|
||||
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
|
||||
activation = __raw_readl(XOR_ACTIVATION(chan));
|
||||
activation |= 0x1;
|
||||
__raw_writel(activation, XOR_ACTIVATION(chan));
|
||||
|
||||
/* writel ensures all descriptors are flushed before activation */
|
||||
writel(BIT(0), XOR_ACTIVATION(chan));
|
||||
}
|
||||
|
||||
static char mv_chan_is_busy(struct mv_xor_chan *chan)
|
||||
|
||||
@@ -173,6 +173,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
|
||||
dhandle = ACPI_HANDLE(&pdev->dev);
|
||||
if (!dhandle)
|
||||
continue;
|
||||
|
||||
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
|
||||
if (!ACPI_FAILURE(status)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
return false;
|
||||
|
||||
|
||||
@@ -459,28 +459,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
|
||||
radeon_vm_init(rdev, &fpriv->vm);
|
||||
|
||||
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
|
||||
if (r) {
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
return r;
|
||||
if (rdev->accel_working) {
|
||||
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
|
||||
if (r) {
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* map the ib pool buffer read only into
|
||||
* virtual address space */
|
||||
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
|
||||
rdev->ring_tmp_bo.bo);
|
||||
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
|
||||
RADEON_VM_PAGE_READABLE |
|
||||
RADEON_VM_PAGE_SNOOPED);
|
||||
|
||||
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
|
||||
if (r) {
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
/* map the ib pool buffer read only into
|
||||
* virtual address space */
|
||||
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
|
||||
rdev->ring_tmp_bo.bo);
|
||||
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
|
||||
RADEON_VM_PAGE_READABLE |
|
||||
RADEON_VM_PAGE_SNOOPED);
|
||||
|
||||
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
|
||||
if (r) {
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
return r;
|
||||
}
|
||||
|
||||
file_priv->driver_priv = fpriv;
|
||||
}
|
||||
return 0;
|
||||
@@ -505,13 +506,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
|
||||
if (!r) {
|
||||
bo_va = radeon_vm_bo_find(&fpriv->vm,
|
||||
rdev->ring_tmp_bo.bo);
|
||||
if (bo_va)
|
||||
radeon_vm_bo_rmv(rdev, bo_va);
|
||||
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
|
||||
if (rdev->accel_working) {
|
||||
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
|
||||
if (!r) {
|
||||
bo_va = radeon_vm_bo_find(&fpriv->vm,
|
||||
rdev->ring_tmp_bo.bo);
|
||||
if (bo_va)
|
||||
radeon_vm_bo_rmv(rdev, bo_va);
|
||||
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
|
||||
}
|
||||
}
|
||||
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
|
||||
@@ -578,22 +578,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
radeon_bo_check_tiling(rbo, 0, 0);
|
||||
rdev = rbo->rdev;
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
size = bo->mem.num_pages << PAGE_SHIFT;
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
if ((offset + size) > rdev->mc.visible_vram_size) {
|
||||
/* hurrah the memory is not visible ! */
|
||||
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
|
||||
rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
r = ttm_bo_validate(bo, &rbo->placement, false, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
/* this should not happen */
|
||||
if ((offset + size) > rdev->mc.visible_vram_size)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (bo->mem.mem_type != TTM_PL_VRAM)
|
||||
return 0;
|
||||
|
||||
size = bo->mem.num_pages << PAGE_SHIFT;
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
if ((offset + size) <= rdev->mc.visible_vram_size)
|
||||
return 0;
|
||||
|
||||
/* hurrah the memory is not visible ! */
|
||||
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
|
||||
rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
r = ttm_bo_validate(bo, &rbo->placement, false, false);
|
||||
if (unlikely(r == -ENOMEM)) {
|
||||
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
|
||||
return ttm_bo_validate(bo, &rbo->placement, false, false);
|
||||
} else if (unlikely(r != 0)) {
|
||||
return r;
|
||||
}
|
||||
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
/* this should never happen */
|
||||
if ((offset + size) > rdev->mc.visible_vram_size)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -162,7 +162,7 @@ static ssize_t store_hyst(struct device *dev,
|
||||
if (retval < 0)
|
||||
goto fail;
|
||||
|
||||
hyst = val - retval * 1000;
|
||||
hyst = retval * 1000 - val;
|
||||
hyst = DIV_ROUND_CLOSEST(hyst, 1000);
|
||||
if (hyst < 0 || hyst > 255) {
|
||||
retval = -ERANGE;
|
||||
@@ -295,7 +295,7 @@ static int emc1403_detect(struct i2c_client *client,
|
||||
}
|
||||
|
||||
id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
|
||||
if (id != 0x01)
|
||||
if (id < 0x01 || id > 0x04)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -358,6 +358,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
|
||||
ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
|
||||
dw_writel(dev, ic_con, DW_IC_CON);
|
||||
|
||||
/* enforce disabled interrupts (due to HW issues) */
|
||||
i2c_dw_disable_int(dev);
|
||||
|
||||
/* Enable the adapter */
|
||||
dw_writel(dev, 1, DW_IC_ENABLE);
|
||||
|
||||
|
||||
@@ -541,6 +541,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
|
||||
|
||||
ret = -EINVAL;
|
||||
for (i = 0; i < num; i++) {
|
||||
/* This HW can't send STOP after address phase */
|
||||
if (msgs[i].len == 0) {
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
/*-------------- spin lock -----------------*/
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
@@ -605,7 +611,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
|
||||
|
||||
static u32 rcar_i2c_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
/* This HW can't do SMBUS_QUICK and NOSTART */
|
||||
return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm rcar_i2c_algo = {
|
||||
|
||||
@@ -1209,10 +1209,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
|
||||
|
||||
i2c->suspended = 0;
|
||||
clk_prepare_enable(i2c->clk);
|
||||
s3c24xx_i2c_init(i2c);
|
||||
clk_disable_unprepare(i2c->clk);
|
||||
i2c->suspended = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1530,7 +1530,7 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5056, 2058, 4832},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad L540 */
|
||||
@@ -1540,6 +1540,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad W540 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W540"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo Yoga S1 */
|
||||
.matches = {
|
||||
|
||||
@@ -4004,7 +4004,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
|
||||
iommu_flush_dte(iommu, devid);
|
||||
if (devid != alias) {
|
||||
irq_lookup_table[alias] = table;
|
||||
set_dte_irq_entry(devid, table);
|
||||
set_dte_irq_entry(alias, table);
|
||||
iommu_flush_dte(iommu, alias);
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/page.h>
|
||||
@@ -44,6 +43,7 @@ struct convert_context {
|
||||
unsigned int idx_out;
|
||||
sector_t cc_sector;
|
||||
atomic_t cc_pending;
|
||||
struct ablkcipher_request *req;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -105,15 +105,7 @@ struct iv_lmk_private {
|
||||
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
|
||||
|
||||
/*
|
||||
* Duplicated per-CPU state for cipher.
|
||||
*/
|
||||
struct crypt_cpu {
|
||||
struct ablkcipher_request *req;
|
||||
};
|
||||
|
||||
/*
|
||||
* The fields in here must be read only after initialization,
|
||||
* changing state should be in crypt_cpu.
|
||||
* The fields in here must be read only after initialization.
|
||||
*/
|
||||
struct crypt_config {
|
||||
struct dm_dev *dev;
|
||||
@@ -143,12 +135,6 @@ struct crypt_config {
|
||||
sector_t iv_offset;
|
||||
unsigned int iv_size;
|
||||
|
||||
/*
|
||||
* Duplicated per cpu state. Access through
|
||||
* per_cpu_ptr() only.
|
||||
*/
|
||||
struct crypt_cpu __percpu *cpu;
|
||||
|
||||
/* ESSIV: struct crypto_cipher *essiv_tfm */
|
||||
void *iv_private;
|
||||
struct crypto_ablkcipher **tfms;
|
||||
@@ -184,11 +170,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
|
||||
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
|
||||
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
|
||||
|
||||
static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
|
||||
{
|
||||
return this_cpu_ptr(cc->cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this to access cipher attributes that are the same for each CPU.
|
||||
*/
|
||||
@@ -738,16 +719,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
|
||||
static void crypt_alloc_req(struct crypt_config *cc,
|
||||
struct convert_context *ctx)
|
||||
{
|
||||
struct crypt_cpu *this_cc = this_crypt_config(cc);
|
||||
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
|
||||
|
||||
if (!this_cc->req)
|
||||
this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
||||
if (!ctx->req)
|
||||
ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
||||
|
||||
ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
|
||||
ablkcipher_request_set_callback(this_cc->req,
|
||||
ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
|
||||
ablkcipher_request_set_callback(ctx->req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
|
||||
kcryptd_async_done, dmreq_of_req(cc, ctx->req));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -756,7 +736,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
|
||||
static int crypt_convert(struct crypt_config *cc,
|
||||
struct convert_context *ctx)
|
||||
{
|
||||
struct crypt_cpu *this_cc = this_crypt_config(cc);
|
||||
int r;
|
||||
|
||||
atomic_set(&ctx->cc_pending, 1);
|
||||
@@ -768,7 +747,7 @@ static int crypt_convert(struct crypt_config *cc,
|
||||
|
||||
atomic_inc(&ctx->cc_pending);
|
||||
|
||||
r = crypt_convert_block(cc, ctx, this_cc->req);
|
||||
r = crypt_convert_block(cc, ctx, ctx->req);
|
||||
|
||||
switch (r) {
|
||||
/* async */
|
||||
@@ -777,7 +756,7 @@ static int crypt_convert(struct crypt_config *cc,
|
||||
INIT_COMPLETION(ctx->restart);
|
||||
/* fall through*/
|
||||
case -EINPROGRESS:
|
||||
this_cc->req = NULL;
|
||||
ctx->req = NULL;
|
||||
ctx->cc_sector++;
|
||||
continue;
|
||||
|
||||
@@ -877,6 +856,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
|
||||
io->sector = sector;
|
||||
io->error = 0;
|
||||
io->base_io = NULL;
|
||||
io->ctx.req = NULL;
|
||||
atomic_set(&io->io_pending, 0);
|
||||
|
||||
return io;
|
||||
@@ -902,6 +882,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
|
||||
if (!atomic_dec_and_test(&io->io_pending))
|
||||
return;
|
||||
|
||||
if (io->ctx.req)
|
||||
mempool_free(io->ctx.req, cc->req_pool);
|
||||
mempool_free(io, cc->io_pool);
|
||||
|
||||
if (likely(!base_io))
|
||||
@@ -1327,8 +1309,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
|
||||
static void crypt_dtr(struct dm_target *ti)
|
||||
{
|
||||
struct crypt_config *cc = ti->private;
|
||||
struct crypt_cpu *cpu_cc;
|
||||
int cpu;
|
||||
|
||||
ti->private = NULL;
|
||||
|
||||
@@ -1340,13 +1320,6 @@ static void crypt_dtr(struct dm_target *ti)
|
||||
if (cc->crypt_queue)
|
||||
destroy_workqueue(cc->crypt_queue);
|
||||
|
||||
if (cc->cpu)
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_cc = per_cpu_ptr(cc->cpu, cpu);
|
||||
if (cpu_cc->req)
|
||||
mempool_free(cpu_cc->req, cc->req_pool);
|
||||
}
|
||||
|
||||
crypt_free_tfms(cc);
|
||||
|
||||
if (cc->bs)
|
||||
@@ -1365,9 +1338,6 @@ static void crypt_dtr(struct dm_target *ti)
|
||||
if (cc->dev)
|
||||
dm_put_device(ti, cc->dev);
|
||||
|
||||
if (cc->cpu)
|
||||
free_percpu(cc->cpu);
|
||||
|
||||
kzfree(cc->cipher);
|
||||
kzfree(cc->cipher_string);
|
||||
|
||||
@@ -1422,13 +1392,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
|
||||
if (tmp)
|
||||
DMWARN("Ignoring unexpected additional cipher options");
|
||||
|
||||
cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
|
||||
__alignof__(struct crypt_cpu));
|
||||
if (!cc->cpu) {
|
||||
ti->error = "Cannot allocate per cpu state";
|
||||
goto bad_mem;
|
||||
}
|
||||
|
||||
/*
|
||||
* For compatibility with the original dm-crypt mapping format, if
|
||||
* only the cipher name is supplied, use cbc-plain.
|
||||
|
||||
@@ -7305,8 +7305,10 @@ void md_do_sync(struct md_thread *thread)
|
||||
/* just incase thread restarts... */
|
||||
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
|
||||
return;
|
||||
if (mddev->ro) /* never try to sync a read-only array */
|
||||
if (mddev->ro) {/* never try to sync a read-only array */
|
||||
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
|
||||
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
|
||||
@@ -8441,7 +8443,8 @@ static int md_notify_reboot(struct notifier_block *this,
|
||||
if (mddev_trylock(mddev)) {
|
||||
if (mddev->pers)
|
||||
__md_stop_writes(mddev);
|
||||
mddev->safemode = 2;
|
||||
if (mddev->persistent)
|
||||
mddev->safemode = 2;
|
||||
mddev_unlock(mddev);
|
||||
}
|
||||
need_delay = 1;
|
||||
|
||||
@@ -937,7 +937,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
|
||||
* windows that fall outside that.
|
||||
*/
|
||||
for (i = 0; i < N_WIN_SIZES; i++) {
|
||||
struct ov7670_win_size *win = &ov7670_win_sizes[index];
|
||||
struct ov7670_win_size *win = &ov7670_win_sizes[i];
|
||||
if (info->min_width && win->width < info->min_width)
|
||||
continue;
|
||||
if (info->min_height && win->height < info->min_height)
|
||||
|
||||
@@ -92,6 +92,7 @@ static long media_device_enum_entities(struct media_device *mdev,
|
||||
struct media_entity *ent;
|
||||
struct media_entity_desc u_ent;
|
||||
|
||||
memset(&u_ent, 0, sizeof(u_ent));
|
||||
if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
@@ -178,7 +178,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
|
||||
|
||||
f_ref = 2UL * priv->cfg->clock / r_val;
|
||||
n_val = div_u64_rem(f_vco, f_ref, &k_val);
|
||||
k_val_reg = 1UL * k_val * (1 << 20) / f_ref;
|
||||
k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
|
||||
|
||||
ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
|
||||
if (ret < 0)
|
||||
@@ -331,8 +331,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \
|
||||
fc2580_if_filter_lut[i].mul / 1000000000);
|
||||
ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
|
||||
fc2580_if_filter_lut[i].mul, 1000000000));
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#define FC2580_PRIV_H
|
||||
|
||||
#include "fc2580.h"
|
||||
#include <linux/math64.h>
|
||||
|
||||
struct fc2580_reg_val {
|
||||
u8 reg;
|
||||
|
||||
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
|
||||
|
||||
static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
|
||||
{
|
||||
if (get_user(kp->type, &up->type))
|
||||
return -EFAULT;
|
||||
|
||||
switch (kp->type) {
|
||||
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
|
||||
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
|
||||
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
|
||||
|
||||
static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
|
||||
{
|
||||
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
|
||||
get_user(kp->type, &up->type))
|
||||
return -EFAULT;
|
||||
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
|
||||
return -EFAULT;
|
||||
return __get_v4l2_format32(kp, up);
|
||||
}
|
||||
|
||||
static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
|
||||
{
|
||||
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
|
||||
copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
|
||||
return -EFAULT;
|
||||
copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
|
||||
return -EFAULT;
|
||||
return __get_v4l2_format32(&kp->format, &up->format);
|
||||
}
|
||||
|
||||
|
||||
@@ -555,7 +555,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct sja1000_priv *priv;
|
||||
struct peak_pci_chan *chan;
|
||||
struct net_device *dev;
|
||||
struct net_device *dev, *prev_dev;
|
||||
void __iomem *cfg_base, *reg_base;
|
||||
u16 sub_sys_id, icr;
|
||||
int i, err, channels;
|
||||
@@ -691,11 +691,13 @@ failure_remove_channels:
|
||||
writew(0x0, cfg_base + PITA_ICR + 2);
|
||||
|
||||
chan = NULL;
|
||||
for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
|
||||
unregister_sja1000dev(dev);
|
||||
free_sja1000dev(dev);
|
||||
for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
|
||||
priv = netdev_priv(dev);
|
||||
chan = priv->priv;
|
||||
prev_dev = chan->prev_dev;
|
||||
|
||||
unregister_sja1000dev(dev);
|
||||
free_sja1000dev(dev);
|
||||
}
|
||||
|
||||
/* free any PCIeC resources too */
|
||||
@@ -729,10 +731,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
|
||||
|
||||
/* Loop over all registered devices */
|
||||
while (1) {
|
||||
struct net_device *prev_dev = chan->prev_dev;
|
||||
|
||||
dev_info(&pdev->dev, "removing device %s\n", dev->name);
|
||||
unregister_sja1000dev(dev);
|
||||
free_sja1000dev(dev);
|
||||
dev = chan->prev_dev;
|
||||
dev = prev_dev;
|
||||
|
||||
if (!dev) {
|
||||
/* do that only for first channel */
|
||||
|
||||
@@ -1119,6 +1119,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
||||
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
|
||||
phyid = be32_to_cpup(parp+1);
|
||||
mdio = of_find_device_by_node(mdio_node);
|
||||
if (!mdio) {
|
||||
pr_err("Missing mdio platform device\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
|
||||
PHY_ID_FMT, mdio->name, phyid);
|
||||
|
||||
|
||||
@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
|
||||
return WRONG_BUS_FREQUENCY;
|
||||
}
|
||||
|
||||
bsp = ctrl->pci_dev->bus->cur_bus_speed;
|
||||
msp = ctrl->pci_dev->bus->max_bus_speed;
|
||||
bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
|
||||
msp = ctrl->pci_dev->subordinate->max_bus_speed;
|
||||
|
||||
/* Check if there are other slots or devices on the same bus */
|
||||
if (!list_empty(&ctrl->pci_dev->subordinate->devices))
|
||||
|
||||
@@ -2219,6 +2219,7 @@ static void __exit speakup_exit(void)
|
||||
unregister_keyboard_notifier(&keyboard_notifier_block);
|
||||
unregister_vt_notifier(&vt_notifier_block);
|
||||
speakup_unregister_devsynth();
|
||||
speakup_cancel_paste();
|
||||
del_timer(&cursor_timer);
|
||||
kthread_stop(speakup_task);
|
||||
speakup_task = NULL;
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/selection.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
#include "speakup.h"
|
||||
|
||||
@@ -121,20 +123,24 @@ int speakup_set_selection(struct tty_struct *tty)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TODO: move to some helper thread, probably. That'd fix having to check for
|
||||
* in_atomic(). */
|
||||
int speakup_paste_selection(struct tty_struct *tty)
|
||||
struct speakup_paste_work {
|
||||
struct work_struct work;
|
||||
struct tty_struct *tty;
|
||||
};
|
||||
|
||||
static void __speakup_paste_selection(struct work_struct *work)
|
||||
{
|
||||
struct speakup_paste_work *spw =
|
||||
container_of(work, struct speakup_paste_work, work);
|
||||
struct tty_struct *tty = xchg(&spw->tty, NULL);
|
||||
struct vc_data *vc = (struct vc_data *) tty->driver_data;
|
||||
int pasted = 0, count;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
add_wait_queue(&vc->paste_wait, &wait);
|
||||
while (sel_buffer && sel_buffer_lth > pasted) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (test_bit(TTY_THROTTLED, &tty->flags)) {
|
||||
if (in_atomic())
|
||||
/* if we are in an interrupt handler, abort */
|
||||
break;
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
@@ -146,6 +152,26 @@ int speakup_paste_selection(struct tty_struct *tty)
|
||||
}
|
||||
remove_wait_queue(&vc->paste_wait, &wait);
|
||||
current->state = TASK_RUNNING;
|
||||
tty_kref_put(tty);
|
||||
}
|
||||
|
||||
static struct speakup_paste_work speakup_paste_work = {
|
||||
.work = __WORK_INITIALIZER(speakup_paste_work.work,
|
||||
__speakup_paste_selection)
|
||||
};
|
||||
|
||||
int speakup_paste_selection(struct tty_struct *tty)
|
||||
{
|
||||
if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL)
|
||||
return -EBUSY;
|
||||
|
||||
tty_kref_get(tty);
|
||||
schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void speakup_cancel_paste(void)
|
||||
{
|
||||
cancel_work_sync(&speakup_paste_work.work);
|
||||
tty_kref_put(speakup_paste_work.tty);
|
||||
}
|
||||
|
||||
@@ -84,6 +84,7 @@ extern void synth_buffer_clear(void);
|
||||
extern void speakup_clear_selection(void);
|
||||
extern int speakup_set_selection(struct tty_struct *tty);
|
||||
extern int speakup_paste_selection(struct tty_struct *tty);
|
||||
extern void speakup_cancel_paste(void);
|
||||
extern void speakup_register_devsynth(void);
|
||||
extern void speakup_unregister_devsynth(void);
|
||||
extern void synth_write(const char *buf, size_t count);
|
||||
|
||||
@@ -2023,6 +2023,11 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
|
||||
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!(dev->dev_flags & DF_CONFIGURED)) {
|
||||
pr_err("Unable to set alua_access_state while device is"
|
||||
" not configured\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = strict_strtoul(page, 0, &tmp);
|
||||
if (ret < 0) {
|
||||
|
||||
@@ -1747,10 +1747,13 @@ int usb_runtime_suspend(struct device *dev)
|
||||
if (status == -EAGAIN || status == -EBUSY)
|
||||
usb_mark_last_busy(udev);
|
||||
|
||||
/* The PM core reacts badly unless the return code is 0,
|
||||
* -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
|
||||
/*
|
||||
* The PM core reacts badly unless the return code is 0,
|
||||
* -EAGAIN, or -EBUSY, so always return -EBUSY on an error
|
||||
* (except for root hubs, because they don't suspend through
|
||||
* an upstream port like other USB devices).
|
||||
*/
|
||||
if (status != 0)
|
||||
if (status != 0 && udev->parent)
|
||||
return -EBUSY;
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1673,8 +1673,19 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
||||
*/
|
||||
pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
|
||||
|
||||
/* Hubs have proper suspend/resume support. */
|
||||
usb_enable_autosuspend(hdev);
|
||||
/*
|
||||
* Hubs have proper suspend/resume support, except for root hubs
|
||||
* where the controller driver doesn't have bus_suspend and
|
||||
* bus_resume methods.
|
||||
*/
|
||||
if (hdev->parent) { /* normal device */
|
||||
usb_enable_autosuspend(hdev);
|
||||
} else { /* root hub */
|
||||
const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
|
||||
|
||||
if (drv->bus_suspend && drv->bus_resume)
|
||||
usb_enable_autosuspend(hdev);
|
||||
}
|
||||
|
||||
if (hdev->level == MAX_TOPO_LEVEL) {
|
||||
dev_err(&intf->dev,
|
||||
|
||||
@@ -1814,6 +1814,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
kfree(cur_cd);
|
||||
}
|
||||
|
||||
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
|
||||
for (i = 0; i < num_ports; i++) {
|
||||
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
|
||||
for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
|
||||
struct list_head *ep = &bwt->interval_bw[j].endpoints;
|
||||
while (!list_empty(ep))
|
||||
list_del_init(ep->next);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 1; i < MAX_HC_SLOTS; ++i)
|
||||
xhci_free_virt_device(xhci, i);
|
||||
|
||||
@@ -1854,16 +1864,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
if (!xhci->rh_bw)
|
||||
goto no_bw;
|
||||
|
||||
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
|
||||
for (i = 0; i < num_ports; i++) {
|
||||
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
|
||||
for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
|
||||
struct list_head *ep = &bwt->interval_bw[j].endpoints;
|
||||
while (!list_empty(ep))
|
||||
list_del_init(ep->next);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ports; i++) {
|
||||
struct xhci_tt_bw_info *tt, *n;
|
||||
list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
|
||||
|
||||
@@ -585,6 +585,8 @@ static struct usb_device_id id_table_combined [] = {
|
||||
{ USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
|
||||
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
|
||||
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
|
||||
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
|
||||
/*
|
||||
* ELV devices:
|
||||
*/
|
||||
|
||||
@@ -538,6 +538,11 @@
|
||||
*/
|
||||
#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
|
||||
|
||||
/*
|
||||
* NovaTech product ids (FTDI_VID)
|
||||
*/
|
||||
#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
|
||||
|
||||
|
||||
/********************************/
|
||||
/** third-party VID/PID combos **/
|
||||
|
||||
@@ -894,7 +894,7 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
|
||||
firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
|
||||
|
||||
i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
|
||||
i2c_header->Size = (__u16)buffer_size;
|
||||
i2c_header->Size = cpu_to_le16(buffer_size);
|
||||
i2c_header->CheckSum = cs;
|
||||
firmware_rec->Ver_Major = OperationalMajorVersion;
|
||||
firmware_rec->Ver_Minor = OperationalMinorVersion;
|
||||
|
||||
@@ -594,7 +594,7 @@ struct edge_boot_descriptor {
|
||||
|
||||
struct ti_i2c_desc {
|
||||
__u8 Type; // Type of descriptor
|
||||
__u16 Size; // Size of data only not including header
|
||||
__le16 Size; // Size of data only not including header
|
||||
__u8 CheckSum; // Checksum (8 bit sum of data only)
|
||||
__u8 Data[0]; // Data starts here
|
||||
} __attribute__((packed));
|
||||
|
||||
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
|
||||
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
|
||||
#define NOVATELWIRELESS_PRODUCT_E362 0x9010
|
||||
#define NOVATELWIRELESS_PRODUCT_E371 0x9011
|
||||
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
|
||||
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
|
||||
|
||||
@@ -1012,6 +1013,7 @@ static const struct usb_device_id option_ids[] = {
|
||||
/* Novatel Ovation MC551 a.k.a. Verizon USB551L */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
|
||||
|
||||
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
|
||||
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
|
||||
|
||||
@@ -373,8 +373,10 @@ sort_pacl(struct posix_acl *pacl)
|
||||
* by uid/gid. */
|
||||
int i, j;
|
||||
|
||||
if (pacl->a_count <= 4)
|
||||
return; /* no users or groups */
|
||||
/* no users or groups */
|
||||
if (!pacl || pacl->a_count <= 4)
|
||||
return;
|
||||
|
||||
i = 1;
|
||||
while (pacl->a_entries[i].e_tag == ACL_USER)
|
||||
i++;
|
||||
@@ -498,13 +500,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
|
||||
|
||||
/*
|
||||
* ACLs with no ACEs are treated differently in the inheritable
|
||||
* and effective cases: when there are no inheritable ACEs, we
|
||||
* set a zero-length default posix acl:
|
||||
* and effective cases: when there are no inheritable ACEs,
|
||||
* calls ->set_acl with a NULL ACL structure.
|
||||
*/
|
||||
if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
|
||||
pacl = posix_acl_alloc(0, GFP_KERNEL);
|
||||
return pacl ? pacl : ERR_PTR(-ENOMEM);
|
||||
}
|
||||
if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* When there are no effective ACEs, the following will end
|
||||
* up setting a 3-element effective posix ACL with all
|
||||
|
||||
@@ -1052,6 +1052,7 @@ free_client(struct nfs4_client *clp)
|
||||
list_del(&ses->se_perclnt);
|
||||
nfsd4_put_session_locked(ses);
|
||||
}
|
||||
rpc_destroy_wait_queue(&clp->cl_cb_waitq);
|
||||
free_svc_cred(&clp->cl_cred);
|
||||
kfree(clp->cl_name.data);
|
||||
idr_remove_all(&clp->cl_stateids);
|
||||
@@ -3510,9 +3511,16 @@ out:
|
||||
static __be32
|
||||
nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
|
||||
{
|
||||
if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
|
||||
struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
|
||||
|
||||
if (check_for_locks(stp->st_file, lo))
|
||||
return nfserr_locks_held;
|
||||
release_lock_stateid(stp);
|
||||
/*
|
||||
* Currently there's a 1-1 lock stateid<->lockowner
|
||||
* correspondance, and we have to delete the lockowner when we
|
||||
* delete the lock stateid:
|
||||
*/
|
||||
unhash_lockowner(lo);
|
||||
return nfs_ok;
|
||||
}
|
||||
|
||||
@@ -3963,6 +3971,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
|
||||
|
||||
if (!same_owner_str(&lo->lo_owner, owner, clid))
|
||||
return false;
|
||||
if (list_empty(&lo->lo_owner.so_stateids)) {
|
||||
WARN_ON_ONCE(1);
|
||||
return false;
|
||||
}
|
||||
lst = list_first_entry(&lo->lo_owner.so_stateids,
|
||||
struct nfs4_ol_stateid, st_perstateowner);
|
||||
return lst->st_file->fi_inode == inode;
|
||||
|
||||
@@ -308,6 +308,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int irq_can_set_affinity(unsigned int irq)
|
||||
{
|
||||
return 0;
|
||||
|
||||
@@ -4965,6 +4965,9 @@ struct swevent_htable {
|
||||
|
||||
/* Recursion avoidance in each contexts */
|
||||
int recursion[PERF_NR_CONTEXTS];
|
||||
|
||||
/* Keeps track of cpu being initialized/exited */
|
||||
bool online;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
|
||||
@@ -5212,8 +5215,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
|
||||
hwc->state = !(flags & PERF_EF_START);
|
||||
|
||||
head = find_swevent_head(swhash, event);
|
||||
if (WARN_ON_ONCE(!head))
|
||||
if (!head) {
|
||||
/*
|
||||
* We can race with cpu hotplug code. Do not
|
||||
* WARN if the cpu just got unplugged.
|
||||
*/
|
||||
WARN_ON_ONCE(swhash->online);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hlist_add_head_rcu(&event->hlist_entry, head);
|
||||
|
||||
@@ -6490,6 +6499,9 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
if (attr.freq) {
|
||||
if (attr.sample_freq > sysctl_perf_event_sample_rate)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (attr.sample_period & (1ULL << 63))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -7304,6 +7316,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
|
||||
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
|
||||
|
||||
mutex_lock(&swhash->hlist_mutex);
|
||||
swhash->online = true;
|
||||
if (swhash->hlist_refcount > 0) {
|
||||
struct swevent_hlist *hlist;
|
||||
|
||||
@@ -7361,6 +7374,7 @@ static void perf_event_exit_cpu(int cpu)
|
||||
perf_event_exit_cpu_context(cpu);
|
||||
|
||||
mutex_lock(&swhash->hlist_mutex);
|
||||
swhash->online = false;
|
||||
swevent_hlist_release(swhash);
|
||||
mutex_unlock(&swhash->hlist_mutex);
|
||||
}
|
||||
|
||||
239
kernel/futex.c
239
kernel/futex.c
@@ -588,6 +588,55 @@ void exit_pi_state_list(struct task_struct *curr)
|
||||
raw_spin_unlock_irq(&curr->pi_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to check the following states:
|
||||
*
|
||||
* Waiter | pi_state | pi->owner | uTID | uODIED | ?
|
||||
*
|
||||
* [1] NULL | --- | --- | 0 | 0/1 | Valid
|
||||
* [2] NULL | --- | --- | >0 | 0/1 | Valid
|
||||
*
|
||||
* [3] Found | NULL | -- | Any | 0/1 | Invalid
|
||||
*
|
||||
* [4] Found | Found | NULL | 0 | 1 | Valid
|
||||
* [5] Found | Found | NULL | >0 | 1 | Invalid
|
||||
*
|
||||
* [6] Found | Found | task | 0 | 1 | Valid
|
||||
*
|
||||
* [7] Found | Found | NULL | Any | 0 | Invalid
|
||||
*
|
||||
* [8] Found | Found | task | ==taskTID | 0/1 | Valid
|
||||
* [9] Found | Found | task | 0 | 0 | Invalid
|
||||
* [10] Found | Found | task | !=taskTID | 0/1 | Invalid
|
||||
*
|
||||
* [1] Indicates that the kernel can acquire the futex atomically. We
|
||||
* came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
|
||||
*
|
||||
* [2] Valid, if TID does not belong to a kernel thread. If no matching
|
||||
* thread is found then it indicates that the owner TID has died.
|
||||
*
|
||||
* [3] Invalid. The waiter is queued on a non PI futex
|
||||
*
|
||||
* [4] Valid state after exit_robust_list(), which sets the user space
|
||||
* value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
|
||||
*
|
||||
* [5] The user space value got manipulated between exit_robust_list()
|
||||
* and exit_pi_state_list()
|
||||
*
|
||||
* [6] Valid state after exit_pi_state_list() which sets the new owner in
|
||||
* the pi_state but cannot access the user space value.
|
||||
*
|
||||
* [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
|
||||
*
|
||||
* [8] Owner and user space value match
|
||||
*
|
||||
* [9] There is no transient state which sets the user space TID to 0
|
||||
* except exit_robust_list(), but this is indicated by the
|
||||
* FUTEX_OWNER_DIED bit. See [4]
|
||||
*
|
||||
* [10] There is no transient state which leaves owner and user space
|
||||
* TID out of sync.
|
||||
*/
|
||||
static int
|
||||
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||
union futex_key *key, struct futex_pi_state **ps)
|
||||
@@ -603,12 +652,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||
plist_for_each_entry_safe(this, next, head, list) {
|
||||
if (match_futex(&this->key, key)) {
|
||||
/*
|
||||
* Another waiter already exists - bump up
|
||||
* the refcount and return its pi_state:
|
||||
* Sanity check the waiter before increasing
|
||||
* the refcount and attaching to it.
|
||||
*/
|
||||
pi_state = this->pi_state;
|
||||
/*
|
||||
* Userspace might have messed up non-PI and PI futexes
|
||||
* Userspace might have messed up non-PI and
|
||||
* PI futexes [3]
|
||||
*/
|
||||
if (unlikely(!pi_state))
|
||||
return -EINVAL;
|
||||
@@ -616,34 +666,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||
WARN_ON(!atomic_read(&pi_state->refcount));
|
||||
|
||||
/*
|
||||
* When pi_state->owner is NULL then the owner died
|
||||
* and another waiter is on the fly. pi_state->owner
|
||||
* is fixed up by the task which acquires
|
||||
* pi_state->rt_mutex.
|
||||
*
|
||||
* We do not check for pid == 0 which can happen when
|
||||
* the owner died and robust_list_exit() cleared the
|
||||
* TID.
|
||||
* Handle the owner died case:
|
||||
*/
|
||||
if (pid && pi_state->owner) {
|
||||
if (uval & FUTEX_OWNER_DIED) {
|
||||
/*
|
||||
* Bail out if user space manipulated the
|
||||
* futex value.
|
||||
* exit_pi_state_list sets owner to NULL and
|
||||
* wakes the topmost waiter. The task which
|
||||
* acquires the pi_state->rt_mutex will fixup
|
||||
* owner.
|
||||
*/
|
||||
if (pid != task_pid_vnr(pi_state->owner))
|
||||
if (!pi_state->owner) {
|
||||
/*
|
||||
* No pi state owner, but the user
|
||||
* space TID is not 0. Inconsistent
|
||||
* state. [5]
|
||||
*/
|
||||
if (pid)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Take a ref on the state and
|
||||
* return. [4]
|
||||
*/
|
||||
goto out_state;
|
||||
}
|
||||
|
||||
/*
|
||||
* If TID is 0, then either the dying owner
|
||||
* has not yet executed exit_pi_state_list()
|
||||
* or some waiter acquired the rtmutex in the
|
||||
* pi state, but did not yet fixup the TID in
|
||||
* user space.
|
||||
*
|
||||
* Take a ref on the state and return. [6]
|
||||
*/
|
||||
if (!pid)
|
||||
goto out_state;
|
||||
} else {
|
||||
/*
|
||||
* If the owner died bit is not set,
|
||||
* then the pi_state must have an
|
||||
* owner. [7]
|
||||
*/
|
||||
if (!pi_state->owner)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bail out if user space manipulated the
|
||||
* futex value. If pi state exists then the
|
||||
* owner TID must be the same as the user
|
||||
* space TID. [9/10]
|
||||
*/
|
||||
if (pid != task_pid_vnr(pi_state->owner))
|
||||
return -EINVAL;
|
||||
|
||||
out_state:
|
||||
atomic_inc(&pi_state->refcount);
|
||||
*ps = pi_state;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We are the first waiter - try to look up the real owner and attach
|
||||
* the new pi_state to it, but bail out when TID = 0
|
||||
* the new pi_state to it, but bail out when TID = 0 [1]
|
||||
*/
|
||||
if (!pid)
|
||||
return -ESRCH;
|
||||
@@ -651,6 +737,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||
if (!p)
|
||||
return -ESRCH;
|
||||
|
||||
if (!p->mm) {
|
||||
put_task_struct(p);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to look at the task state flags to figure out,
|
||||
* whether the task is exiting. To protect against the do_exit
|
||||
@@ -671,6 +762,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* No existing pi state. First waiter. [2]
|
||||
*/
|
||||
pi_state = alloc_pi_state();
|
||||
|
||||
/*
|
||||
@@ -742,10 +836,18 @@ retry:
|
||||
return -EDEADLK;
|
||||
|
||||
/*
|
||||
* Surprise - we got the lock. Just return to userspace:
|
||||
* Surprise - we got the lock, but we do not trust user space at all.
|
||||
*/
|
||||
if (unlikely(!curval))
|
||||
return 1;
|
||||
if (unlikely(!curval)) {
|
||||
/*
|
||||
* We verify whether there is kernel state for this
|
||||
* futex. If not, we can safely assume, that the 0 ->
|
||||
* TID transition is correct. If state exists, we do
|
||||
* not bother to fixup the user space state as it was
|
||||
* corrupted already.
|
||||
*/
|
||||
return futex_top_waiter(hb, key) ? -EINVAL : 1;
|
||||
}
|
||||
|
||||
uval = curval;
|
||||
|
||||
@@ -875,6 +977,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
|
||||
struct task_struct *new_owner;
|
||||
struct futex_pi_state *pi_state = this->pi_state;
|
||||
u32 uninitialized_var(curval), newval;
|
||||
int ret = 0;
|
||||
|
||||
if (!pi_state)
|
||||
return -EINVAL;
|
||||
@@ -898,23 +1001,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
|
||||
new_owner = this->task;
|
||||
|
||||
/*
|
||||
* We pass it to the next owner. (The WAITERS bit is always
|
||||
* kept enabled while there is PI state around. We must also
|
||||
* preserve the owner died bit.)
|
||||
* We pass it to the next owner. The WAITERS bit is always
|
||||
* kept enabled while there is PI state around. We cleanup the
|
||||
* owner died bit, because we are the owner.
|
||||
*/
|
||||
if (!(uval & FUTEX_OWNER_DIED)) {
|
||||
int ret = 0;
|
||||
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
|
||||
|
||||
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
|
||||
|
||||
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
|
||||
ret = -EFAULT;
|
||||
else if (curval != uval)
|
||||
ret = -EINVAL;
|
||||
if (ret) {
|
||||
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
|
||||
return ret;
|
||||
}
|
||||
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
|
||||
ret = -EFAULT;
|
||||
else if (curval != uval)
|
||||
ret = -EINVAL;
|
||||
if (ret) {
|
||||
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
raw_spin_lock_irq(&pi_state->owner->pi_lock);
|
||||
@@ -1193,7 +1292,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
|
||||
*
|
||||
* Returns:
|
||||
* 0 - failed to acquire the lock atomicly
|
||||
* 1 - acquired the lock
|
||||
* >0 - acquired the lock, return value is vpid of the top_waiter
|
||||
* <0 - error
|
||||
*/
|
||||
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
|
||||
@@ -1204,7 +1303,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
|
||||
{
|
||||
struct futex_q *top_waiter = NULL;
|
||||
u32 curval;
|
||||
int ret;
|
||||
int ret, vpid;
|
||||
|
||||
if (get_futex_value_locked(&curval, pifutex))
|
||||
return -EFAULT;
|
||||
@@ -1232,11 +1331,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
|
||||
* the contended case or if set_waiters is 1. The pi_state is returned
|
||||
* in ps in contended cases.
|
||||
*/
|
||||
vpid = task_pid_vnr(top_waiter->task);
|
||||
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
|
||||
set_waiters);
|
||||
if (ret == 1)
|
||||
if (ret == 1) {
|
||||
requeue_pi_wake_futex(top_waiter, key2, hb2);
|
||||
|
||||
return vpid;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1268,9 +1369,15 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||||
struct futex_hash_bucket *hb1, *hb2;
|
||||
struct plist_head *head1;
|
||||
struct futex_q *this, *next;
|
||||
u32 curval2;
|
||||
|
||||
if (requeue_pi) {
|
||||
/*
|
||||
* Requeue PI only works on two distinct uaddrs. This
|
||||
* check is only valid for private futexes. See below.
|
||||
*/
|
||||
if (uaddr1 == uaddr2)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* requeue_pi requires a pi_state, try to allocate it now
|
||||
* without any locks in case it fails.
|
||||
@@ -1309,6 +1416,15 @@ retry:
|
||||
if (unlikely(ret != 0))
|
||||
goto out_put_key1;
|
||||
|
||||
/*
|
||||
* The check above which compares uaddrs is not sufficient for
|
||||
* shared futexes. We need to compare the keys:
|
||||
*/
|
||||
if (requeue_pi && match_futex(&key1, &key2)) {
|
||||
ret = -EINVAL;
|
||||
goto out_put_keys;
|
||||
}
|
||||
|
||||
hb1 = hash_futex(&key1);
|
||||
hb2 = hash_futex(&key2);
|
||||
|
||||
@@ -1354,16 +1470,25 @@ retry_private:
|
||||
* At this point the top_waiter has either taken uaddr2 or is
|
||||
* waiting on it. If the former, then the pi_state will not
|
||||
* exist yet, look it up one more time to ensure we have a
|
||||
* reference to it.
|
||||
* reference to it. If the lock was taken, ret contains the
|
||||
* vpid of the top waiter task.
|
||||
*/
|
||||
if (ret == 1) {
|
||||
if (ret > 0) {
|
||||
WARN_ON(pi_state);
|
||||
drop_count++;
|
||||
task_count++;
|
||||
ret = get_futex_value_locked(&curval2, uaddr2);
|
||||
if (!ret)
|
||||
ret = lookup_pi_state(curval2, hb2, &key2,
|
||||
&pi_state);
|
||||
/*
|
||||
* If we acquired the lock, then the user
|
||||
* space value of uaddr2 should be vpid. It
|
||||
* cannot be changed by the top waiter as it
|
||||
* is blocked on hb2 lock if it tries to do
|
||||
* so. If something fiddled with it behind our
|
||||
* back the pi state lookup might unearth
|
||||
* it. So we rather use the known value than
|
||||
* rereading and handing potential crap to
|
||||
* lookup_pi_state.
|
||||
*/
|
||||
ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
|
||||
}
|
||||
|
||||
switch (ret) {
|
||||
@@ -2133,9 +2258,10 @@ retry:
|
||||
/*
|
||||
* To avoid races, try to do the TID -> 0 atomic transition
|
||||
* again. If it succeeds then we can return without waking
|
||||
* anyone else up:
|
||||
* anyone else up. We only try this if neither the waiters nor
|
||||
* the owner died bit are set.
|
||||
*/
|
||||
if (!(uval & FUTEX_OWNER_DIED) &&
|
||||
if (!(uval & ~FUTEX_TID_MASK) &&
|
||||
cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
|
||||
goto pi_faulted;
|
||||
/*
|
||||
@@ -2167,11 +2293,9 @@ retry:
|
||||
/*
|
||||
* No waiters - kernel unlocks the futex:
|
||||
*/
|
||||
if (!(uval & FUTEX_OWNER_DIED)) {
|
||||
ret = unlock_futex_pi(uaddr, uval);
|
||||
if (ret == -EFAULT)
|
||||
goto pi_faulted;
|
||||
}
|
||||
ret = unlock_futex_pi(uaddr, uval);
|
||||
if (ret == -EFAULT)
|
||||
goto pi_faulted;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&hb->lock);
|
||||
@@ -2331,6 +2455,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
||||
if (ret)
|
||||
goto out_key2;
|
||||
|
||||
/*
|
||||
* The check above which compares uaddrs is not sufficient for
|
||||
* shared futexes. We need to compare the keys:
|
||||
*/
|
||||
if (match_futex(&q.key, &key2)) {
|
||||
ret = -EINVAL;
|
||||
goto out_put_keys;
|
||||
}
|
||||
|
||||
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
|
||||
futex_wait_queue_me(hb, &q, to);
|
||||
|
||||
|
||||
@@ -980,11 +980,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
/* Remove an active timer from the queue: */
|
||||
ret = remove_hrtimer(timer, base);
|
||||
|
||||
/* Switch the timer base, if necessary: */
|
||||
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
|
||||
|
||||
if (mode & HRTIMER_MODE_REL) {
|
||||
tim = ktime_add_safe(tim, new_base->get_time());
|
||||
tim = ktime_add_safe(tim, base->get_time());
|
||||
/*
|
||||
* CONFIG_TIME_LOW_RES is a temporary way for architectures
|
||||
* to signal that they simply return xtime in
|
||||
@@ -999,6 +996,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
|
||||
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
|
||||
|
||||
/* Switch the timer base, if necessary: */
|
||||
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
|
||||
|
||||
timer_stats_hrtimer_set_start_info(timer);
|
||||
|
||||
leftmost = enqueue_hrtimer(timer, new_base);
|
||||
|
||||
@@ -4039,7 +4039,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
|
||||
*/
|
||||
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
|
||||
{
|
||||
struct sched_param lp;
|
||||
struct sched_param lp = { .sched_priority = 0 };
|
||||
struct task_struct *p;
|
||||
int retval;
|
||||
|
||||
@@ -4056,7 +4056,8 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
|
||||
if (retval)
|
||||
goto out_unlock;
|
||||
|
||||
lp.sched_priority = p->rt_priority;
|
||||
if (task_has_rt_policy(p))
|
||||
lp.sched_priority = p->rt_priority;
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
|
||||
@@ -68,8 +68,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
|
||||
int idx = 0;
|
||||
int task_pri = convert_prio(p->prio);
|
||||
|
||||
if (task_pri >= MAX_RT_PRIO)
|
||||
return 0;
|
||||
BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
|
||||
|
||||
for (idx = 0; idx < task_pri; idx++) {
|
||||
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
|
||||
|
||||
@@ -341,50 +341,50 @@ out:
|
||||
* softirq as those do not count in task exec_runtime any more.
|
||||
*/
|
||||
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||
struct rq *rq)
|
||||
struct rq *rq, int ticks)
|
||||
{
|
||||
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
||||
cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
|
||||
u64 cputime = (__force u64) cputime_one_jiffy;
|
||||
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||
|
||||
if (steal_account_process_tick())
|
||||
return;
|
||||
|
||||
cputime *= ticks;
|
||||
scaled *= ticks;
|
||||
|
||||
if (irqtime_account_hi_update()) {
|
||||
cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
|
||||
cpustat[CPUTIME_IRQ] += cputime;
|
||||
} else if (irqtime_account_si_update()) {
|
||||
cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
|
||||
cpustat[CPUTIME_SOFTIRQ] += cputime;
|
||||
} else if (this_cpu_ksoftirqd() == p) {
|
||||
/*
|
||||
* ksoftirqd time do not get accounted in cpu_softirq_time.
|
||||
* So, we have to handle it separately here.
|
||||
* Also, p->stime needs to be updated for ksoftirqd.
|
||||
*/
|
||||
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
|
||||
CPUTIME_SOFTIRQ);
|
||||
__account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
|
||||
} else if (user_tick) {
|
||||
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
||||
account_user_time(p, cputime, scaled);
|
||||
} else if (p == rq->idle) {
|
||||
account_idle_time(cputime_one_jiffy);
|
||||
account_idle_time(cputime);
|
||||
} else if (p->flags & PF_VCPU) { /* System time or guest time */
|
||||
account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
||||
account_guest_time(p, cputime, scaled);
|
||||
} else {
|
||||
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
|
||||
CPUTIME_SYSTEM);
|
||||
__account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
|
||||
}
|
||||
}
|
||||
|
||||
static void irqtime_account_idle_ticks(int ticks)
|
||||
{
|
||||
int i;
|
||||
struct rq *rq = this_rq();
|
||||
|
||||
for (i = 0; i < ticks; i++)
|
||||
irqtime_account_process_tick(current, 0, rq);
|
||||
irqtime_account_process_tick(current, 0, rq, ticks);
|
||||
}
|
||||
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
static void irqtime_account_idle_ticks(int ticks) {}
|
||||
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||
struct rq *rq) {}
|
||||
struct rq *rq, int nr_ticks) {}
|
||||
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
|
||||
/*
|
||||
@@ -398,7 +398,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
||||
struct rq *rq = this_rq();
|
||||
|
||||
if (sched_clock_irqtime) {
|
||||
irqtime_account_process_tick(p, user_tick, rq);
|
||||
irqtime_account_process_tick(p, user_tick, rq, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -1063,15 +1063,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
||||
return 0;
|
||||
} else if (PageHuge(hpage)) {
|
||||
/*
|
||||
* Check "just unpoisoned", "filter hit", and
|
||||
* "race with other subpage."
|
||||
* Check "filter hit" and "race with other subpage."
|
||||
*/
|
||||
lock_page(hpage);
|
||||
if (!PageHWPoison(hpage)
|
||||
|| (hwpoison_filter(p) && TestClearPageHWPoison(p))
|
||||
|| (p != hpage && TestSetPageHWPoison(hpage))) {
|
||||
atomic_long_sub(nr_pages, &mce_bad_pages);
|
||||
return 0;
|
||||
if (PageHWPoison(hpage)) {
|
||||
if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
|
||||
|| (p != hpage && TestSetPageHWPoison(hpage))) {
|
||||
atomic_long_sub(nr_pages, &mce_bad_pages);
|
||||
unlock_page(hpage);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
set_page_hwpoison_huge_page(hpage);
|
||||
res = dequeue_hwpoisoned_huge_page(hpage);
|
||||
@@ -1123,6 +1124,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
||||
*/
|
||||
if (!PageHWPoison(p)) {
|
||||
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
|
||||
atomic_long_sub(nr_pages, &mce_bad_pages);
|
||||
put_page(hpage);
|
||||
res = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -174,10 +174,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
break;
|
||||
if (pmd_trans_huge(*old_pmd)) {
|
||||
int err = 0;
|
||||
if (extent == HPAGE_PMD_SIZE)
|
||||
if (extent == HPAGE_PMD_SIZE) {
|
||||
VM_BUG_ON(vma->vm_file || !vma->anon_vma);
|
||||
/* See comment in move_ptes() */
|
||||
if (need_rmap_locks)
|
||||
anon_vma_lock_write(vma->anon_vma);
|
||||
err = move_huge_pmd(vma, new_vma, old_addr,
|
||||
new_addr, old_end,
|
||||
old_pmd, new_pmd);
|
||||
if (need_rmap_locks)
|
||||
anon_vma_unlock_write(vma->anon_vma);
|
||||
}
|
||||
if (err > 0) {
|
||||
need_flush = true;
|
||||
continue;
|
||||
|
||||
@@ -612,7 +612,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
|
||||
chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
|
||||
sizeof(chunk->map[0]));
|
||||
if (!chunk->map) {
|
||||
kfree(chunk);
|
||||
pcpu_mem_free(chunk, pcpu_chunk_struct_size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@@ -1696,10 +1696,9 @@ void __put_anon_vma(struct anon_vma *anon_vma)
|
||||
{
|
||||
struct anon_vma *root = anon_vma->root;
|
||||
|
||||
anon_vma_free(anon_vma);
|
||||
if (root != anon_vma && atomic_dec_and_test(&root->refcount))
|
||||
anon_vma_free(root);
|
||||
|
||||
anon_vma_free(anon_vma);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
|
||||
@@ -440,7 +440,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int more)
|
||||
{
|
||||
int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
|
||||
@@ -453,6 +453,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, int more)
|
||||
{
|
||||
int ret;
|
||||
struct kvec iov;
|
||||
|
||||
/* sendpage cannot properly handle pages with page_count == 0,
|
||||
* we need to fallback to sendmsg if that's the case */
|
||||
if (page_count(page) >= 1)
|
||||
return __ceph_tcp_sendpage(sock, page, offset, size, more);
|
||||
|
||||
iov.iov_base = kmap(page) + offset;
|
||||
iov.iov_len = size;
|
||||
ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
|
||||
kunmap(page);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shutdown/close the socket for the given connection.
|
||||
|
||||
@@ -3620,6 +3620,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
|
||||
/* Lynx Point */
|
||||
{ PCI_DEVICE(0x8086, 0x8c20),
|
||||
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
|
||||
/* 9 Series */
|
||||
{ PCI_DEVICE(0x8086, 0x8ca0),
|
||||
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
|
||||
/* Lynx Point-LP */
|
||||
{ PCI_DEVICE(0x8086, 0x9c20),
|
||||
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
|
||||
|
||||
@@ -4927,12 +4927,10 @@ static const struct alc_fixup alc260_fixups[] = {
|
||||
[ALC260_FIXUP_COEF] = {
|
||||
.type = ALC_FIXUP_VERBS,
|
||||
.v.verbs = (const struct hda_verb[]) {
|
||||
{ 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
|
||||
{ 0x20, AC_VERB_SET_PROC_COEF, 0x3040 },
|
||||
{ 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 },
|
||||
{ 0x1a, AC_VERB_SET_PROC_COEF, 0x3040 },
|
||||
{ }
|
||||
},
|
||||
.chained = true,
|
||||
.chain_id = ALC260_FIXUP_HP_PIN_0F,
|
||||
},
|
||||
[ALC260_FIXUP_GPIO1] = {
|
||||
.type = ALC_FIXUP_VERBS,
|
||||
@@ -4947,8 +4945,8 @@ static const struct alc_fixup alc260_fixups[] = {
|
||||
[ALC260_FIXUP_REPLACER] = {
|
||||
.type = ALC_FIXUP_VERBS,
|
||||
.v.verbs = (const struct hda_verb[]) {
|
||||
{ 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
|
||||
{ 0x20, AC_VERB_SET_PROC_COEF, 0x3050 },
|
||||
{ 0x1a, AC_VERB_SET_COEF_INDEX, 0x07 },
|
||||
{ 0x1a, AC_VERB_SET_PROC_COEF, 0x3050 },
|
||||
{ }
|
||||
},
|
||||
.chained = true,
|
||||
|
||||
@@ -153,6 +153,7 @@ static struct reg_default wm8962_reg[] = {
|
||||
{ 40, 0x0000 }, /* R40 - SPKOUTL volume */
|
||||
{ 41, 0x0000 }, /* R41 - SPKOUTR volume */
|
||||
|
||||
{ 49, 0x0010 }, /* R49 - Class D Control 1 */
|
||||
{ 51, 0x0003 }, /* R51 - Class D Control 2 */
|
||||
|
||||
{ 56, 0x0506 }, /* R56 - Clocking 4 */
|
||||
@@ -794,7 +795,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg)
|
||||
case WM8962_ALC2:
|
||||
case WM8962_THERMAL_SHUTDOWN_STATUS:
|
||||
case WM8962_ADDITIONAL_CONTROL_4:
|
||||
case WM8962_CLASS_D_CONTROL_1:
|
||||
case WM8962_DC_SERVO_6:
|
||||
case WM8962_INTERRUPT_STATUS_1:
|
||||
case WM8962_INTERRUPT_STATUS_2:
|
||||
@@ -2903,13 +2903,22 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
|
||||
static int wm8962_mute(struct snd_soc_dai *dai, int mute)
|
||||
{
|
||||
struct snd_soc_codec *codec = dai->codec;
|
||||
int val;
|
||||
int val, ret;
|
||||
|
||||
if (mute)
|
||||
val = WM8962_DAC_MUTE;
|
||||
val = WM8962_DAC_MUTE | WM8962_DAC_MUTE_ALT;
|
||||
else
|
||||
val = 0;
|
||||
|
||||
/**
|
||||
* The DAC mute bit is mirrored in two registers, update both to keep
|
||||
* the register cache consistent.
|
||||
*/
|
||||
ret = snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_1,
|
||||
WM8962_DAC_MUTE_ALT, val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return snd_soc_update_bits(codec, WM8962_ADC_DAC_CONTROL_1,
|
||||
WM8962_DAC_MUTE, val);
|
||||
}
|
||||
|
||||
@@ -1954,6 +1954,10 @@
|
||||
#define WM8962_SPKOUTL_ENA_MASK 0x0040 /* SPKOUTL_ENA */
|
||||
#define WM8962_SPKOUTL_ENA_SHIFT 6 /* SPKOUTL_ENA */
|
||||
#define WM8962_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */
|
||||
#define WM8962_DAC_MUTE_ALT 0x0010 /* DAC_MUTE */
|
||||
#define WM8962_DAC_MUTE_ALT_MASK 0x0010 /* DAC_MUTE */
|
||||
#define WM8962_DAC_MUTE_ALT_SHIFT 4 /* DAC_MUTE */
|
||||
#define WM8962_DAC_MUTE_ALT_WIDTH 1 /* DAC_MUTE */
|
||||
#define WM8962_SPKOUTL_PGA_MUTE 0x0002 /* SPKOUTL_PGA_MUTE */
|
||||
#define WM8962_SPKOUTL_PGA_MUTE_MASK 0x0002 /* SPKOUTL_PGA_MUTE */
|
||||
#define WM8962_SPKOUTL_PGA_MUTE_SHIFT 1 /* SPKOUTL_PGA_MUTE */
|
||||
|
||||
Reference in New Issue
Block a user