Merge tag 'v3.10.57' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable into odroidxu3-3.10.y

This is the 3.10.57 stable release
This commit is contained in:
Mauro Ribeiro
2014-10-20 05:26:47 -02:00
18 changed files with 122 additions and 105 deletions

View File

@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 10
SUBLEVEL = 56
SUBLEVEL = 57
EXTRAVERSION =
NAME = TOSSUG Baby Fish

View File

@@ -514,6 +514,12 @@ void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
struct task_struct *opa;
kref_get(&tconn->kref);
/* We may just have force_sig()'ed this thread
* to get it out of some blocking network function.
* Clear signals; otherwise kthread_run(), which internally uses
* wait_on_completion_killable(), will mistake our pending signal
* for a new fatal signal and fail. */
flush_signals(current);
opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
if (IS_ERR(opa)) {
conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");

View File

@@ -98,7 +98,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
policy = cdbs->cur_policy;
/* Get Absolute Load (in terms of freq for ondemand gov) */
/* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs;
u64 cur_wall_time, cur_idle_time;
@@ -149,14 +149,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
load = 100 * (wall_time - idle_time) / wall_time;
if (dbs_data->cdata->governor == GOV_ONDEMAND) {
int freq_avg = __cpufreq_driver_getavg(policy, j);
if (freq_avg <= 0)
freq_avg = policy->cur;
load *= freq_avg;
}
if (load > max_load)
max_load = load;

View File

@@ -169,7 +169,6 @@ struct od_dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int adj_up_threshold;
unsigned int powersave_bias;
unsigned int io_is_busy;
};

View File

@@ -29,11 +29,9 @@
#include "cpufreq_governor.h"
/* On-demand governor macros */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (11)
@@ -161,14 +159,10 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
/*
* Every sampling_rate, we check, if current idle time is less than 20%
* (default), then we try to increase frequency. Every sampling_rate, we look
* for the lowest frequency which can sustain the load while keeping idle time
* over 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency. Frequency reduction
* happens at minimum steps of 5% (default) of current frequency
* (default), then we try to increase frequency. Else, we adjust the frequency
* proportional to load.
*/
static void od_check_cpu(int cpu, unsigned int load_freq)
static void od_check_cpu(int cpu, unsigned int load)
{
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
@@ -178,29 +172,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
dbs_info->freq_lo = 0;
/* Check for frequency increase */
if (load_freq > od_tuners->up_threshold * policy->cur) {
if (load > od_tuners->up_threshold) {
/* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max)
dbs_info->rate_mult =
od_tuners->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
return;
}
/* Check for frequency decrease */
/* if we cannot reduce the frequency anymore, break out early */
if (policy->cur == policy->min)
return;
/*
* The optimal frequency is the frequency that is the lowest that can
* support the current CPU usage without triggering the up policy. To be
* safe, we focus 10 points under the threshold.
*/
if (load_freq < od_tuners->adj_up_threshold
* policy->cur) {
} else {
/* Calculate the next frequency proportional to load */
unsigned int freq_next;
freq_next = load_freq / od_tuners->adj_up_threshold;
freq_next = load * policy->cpuinfo.max_freq / 100;
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
@@ -374,9 +356,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
/* Calculate the new adj_up_threshold */
od_tuners->adj_up_threshold += input;
od_tuners->adj_up_threshold -= od_tuners->up_threshold;
od_tuners->up_threshold = input;
return count;
@@ -525,8 +504,6 @@ static int od_init(struct dbs_data *dbs_data)
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
@@ -535,8 +512,6 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
DEF_FREQUENCY_DOWN_DIFFERENTIAL;
/* For correct statistics, we need 10 ticks for each measure */
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *

View File

@@ -82,7 +82,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
for (i = 0; i < stat->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
(unsigned long long)
cputime64_to_clock_t(stat->time_in_state[i]));
jiffies_64_to_clock_t(stat->time_in_state[i]));
}
return len;
}

View File

@@ -60,6 +60,10 @@
#include "raid0.h"
#include "bitmap.h"
static bool devices_handle_discard_safely = false;
module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
"Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
/*
* Stripe cache
*/
@@ -5611,7 +5615,7 @@ static int run(struct mddev *mddev)
mddev->queue->limits.discard_granularity = stripe;
/*
* unaligned part of discard request will be ignored, so can't
* guarantee discard_zerors_data
* guarantee discard_zeroes_data
*/
mddev->queue->limits.discard_zeroes_data = 0;
@@ -5636,6 +5640,18 @@ static int run(struct mddev *mddev)
!bdev_get_queue(rdev->bdev)->
limits.discard_zeroes_data)
discard_supported = false;
/* Unfortunately, discard_zeroes_data is not currently
* a guarantee - just a hint. So we only allow DISCARD
* if the sysadmin has confirmed that only safe devices
* are in use by setting a module parameter.
*/
if (!devices_handle_discard_safely) {
if (discard_supported) {
pr_info("md/raid456: discard support disabled due to uncertainty.\n");
pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
}
discard_supported = false;
}
}
if (discard_supported &&

View File

@@ -679,6 +679,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* to the userspace.
*/
req->count = allocated_buffers;
q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
return 0;
}
@@ -727,6 +728,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
q->memory = create->memory;
q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
}
num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
@@ -1385,6 +1387,7 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
* dequeued in dqbuf.
*/
list_add_tail(&vb->queued_entry, &q->queued_list);
q->waiting_for_buffers = false;
vb->state = VB2_BUF_STATE_QUEUED;
/*
@@ -1781,6 +1784,7 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
* and videobuf, effectively returning control over them to userspace.
*/
__vb2_queue_cancel(q);
q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
dprintk(3, "Streamoff successful\n");
return 0;
@@ -2066,9 +2070,16 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
}
/*
* There is nothing to wait for if no buffers have already been queued.
* There is nothing to wait for if the queue isn't streaming.
*/
if (list_empty(&q->queued_list))
if (!vb2_is_streaming(q))
return res | POLLERR;
/*
* For compatibility with vb1: if QBUF hasn't been called yet, then
* return POLLERR as well. This only affects capture queues, output
* queues will always initialize waiting_for_buffers to false.
*/
if (q->waiting_for_buffers)
return res | POLLERR;
if (list_empty(&q->done_list))

View File

@@ -1270,13 +1270,22 @@ update_time:
return 0;
}
/*
* Maximum length of linked list formed by ICB hierarchy. The chosen number is
* arbitrary - just that we hopefully don't limit any real use of rewritten
* inode on write-once media but avoid looping for too long on corrupted media.
*/
#define UDF_MAX_ICB_NESTING 1024
static void __udf_read_inode(struct inode *inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
uint16_t ident;
struct udf_inode_info *iinfo = UDF_I(inode);
unsigned int indirections = 0;
reread:
/*
* Set defaults, but the inode is still incomplete!
* Note: get_new_inode() sets the following on a new inode:
@@ -1313,28 +1322,26 @@ static void __udf_read_inode(struct inode *inode)
ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
&ident);
if (ident == TAG_IDENT_IE && ibh) {
struct buffer_head *nbh = NULL;
struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength &&
(nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
&ident))) {
if (ident == TAG_IDENT_FE ||
ident == TAG_IDENT_EFE) {
memcpy(&iinfo->i_location,
&loc,
sizeof(struct kernel_lb_addr));
brelse(bh);
brelse(ibh);
brelse(nbh);
__udf_read_inode(inode);
if (ie->indirectICB.extLength) {
brelse(bh);
brelse(ibh);
memcpy(&iinfo->i_location, &loc,
sizeof(struct kernel_lb_addr));
if (++indirections > UDF_MAX_ICB_NESTING) {
udf_err(inode->i_sb,
"too many ICBs in ICB hierarchy"
" (max %d supported)\n",
UDF_MAX_ICB_NESTING);
make_bad_inode(inode);
return;
}
brelse(nbh);
goto reread;
}
}
brelse(ibh);

View File

@@ -254,23 +254,11 @@ extern unsigned long preset_lpj;
#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
#endif
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
#define USEC_CONVERSION \
((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
/*
* USEC_ROUND is used in the timeval to jiffie conversion. See there
* for more details. It is the scaled resolution rounding value. Note
* that it is a 64-bit value. Since, when it is applied, we are already
* in jiffies (albit scaled), it is nothing but the bits we will shift
* off.
*/
#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
/*
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,

View File

@@ -326,6 +326,9 @@ struct v4l2_fh;
* @done_wq: waitqueue for processes waiting for buffers ready to be dequeued
* @alloc_ctx: memory type/allocator-specific contexts for each plane
* @streaming: current streaming state
* @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
* buffers. Only set for capture queues if qbuf has not yet been
* called since poll() needs to return POLLERR in that situation.
* @fileio: file io emulator internal data, used only if emulator is active
*/
struct vb2_queue {
@@ -359,6 +362,7 @@ struct vb2_queue {
unsigned int plane_sizes[VIDEO_MAX_PLANES];
unsigned int streaming:1;
unsigned int waiting_for_buffers:1;
struct vb2_fileio_data *fileio;

View File

@@ -1373,6 +1373,7 @@ config FUTEX
config HAVE_FUTEX_CMPXCHG
bool
depends on FUTEX
help
Architectures should select this if futex_atomic_cmpxchg_inatomic()
is implemented and always working. This removes a couple of runtime

View File

@@ -7482,8 +7482,10 @@ int perf_event_init_task(struct task_struct *child)
for_each_task_context_nr(ctxn) {
ret = perf_event_init_context(child, ctxn);
if (ret)
if (ret) {
perf_event_free_task(child);
return ret;
}
}
return 0;

View File

@@ -1341,7 +1341,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_policy;
retval = audit_alloc(p);
if (retval)
goto bad_fork_cleanup_policy;
goto bad_fork_cleanup_perf;
/* copy all the process information */
retval = copy_semundo(clone_flags, p);
if (retval)
@@ -1539,8 +1539,9 @@ bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
bad_fork_cleanup_perf:
perf_event_free_task(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:

View File

@@ -496,17 +496,20 @@ EXPORT_SYMBOL(usecs_to_jiffies);
* that a remainder subtract here would not do the right thing as the
* resolution values don't fall on second boundries. I.e. the line:
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
* Note that due to the small error in the multiplier here, this
* rounding is incorrect for sufficiently large values of tv_nsec, but
* well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
* OK.
*
* Rather, we just shift the bits off the right.
*
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
* value to a scaled second value.
*/
unsigned long
timespec_to_jiffies(const struct timespec *value)
static unsigned long
__timespec_to_jiffies(unsigned long sec, long nsec)
{
unsigned long sec = value->tv_sec;
long nsec = value->tv_nsec + TICK_NSEC - 1;
nsec = nsec + TICK_NSEC - 1;
if (sec >= MAX_SEC_IN_JIFFIES){
sec = MAX_SEC_IN_JIFFIES;
@@ -517,6 +520,13 @@ timespec_to_jiffies(const struct timespec *value)
(NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
}
unsigned long
timespec_to_jiffies(const struct timespec *value)
{
return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
}
EXPORT_SYMBOL(timespec_to_jiffies);
void
@@ -533,31 +543,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
}
EXPORT_SYMBOL(jiffies_to_timespec);
/* Same for "timeval"
/*
* We could use a similar algorithm to timespec_to_jiffies (with a
* different multiplier for usec instead of nsec). But this has a
* problem with rounding: we can't exactly add TICK_NSEC - 1 to the
* usec value, since it's not necessarily integral.
*
* Well, almost. The problem here is that the real system resolution is
* in nanoseconds and the value being converted is in micro seconds.
* Also for some machines (those that use HZ = 1024, in-particular),
* there is a LARGE error in the tick size in microseconds.
* The solution we use is to do the rounding AFTER we convert the
* microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
* Instruction wise, this should cost only an additional add with carry
* instruction above the way it was done above.
* We could instead round in the intermediate scaled representation
* (i.e. in units of 1/2^(large scale) jiffies) but that's also
* perilous: the scaling introduces a small positive error, which
* combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
* units to the intermediate before shifting) leads to accidental
* overflow and overestimates.
*
* At the cost of one additional multiplication by a constant, just
* use the timespec implementation.
*/
unsigned long
timeval_to_jiffies(const struct timeval *value)
{
unsigned long sec = value->tv_sec;
long usec = value->tv_usec;
if (sec >= MAX_SEC_IN_JIFFIES){
sec = MAX_SEC_IN_JIFFIES;
usec = 0;
}
return (((u64)sec * SEC_CONVERSION) +
(((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
(USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
return __timespec_to_jiffies(value->tv_sec,
value->tv_usec * NSEC_PER_USEC);
}
EXPORT_SYMBOL(timeval_to_jiffies);

View File

@@ -3371,7 +3371,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
iter->head = cpu_buffer->reader_page->read;
iter->cache_reader_page = iter->head_page;
iter->cache_read = iter->head;
iter->cache_read = cpu_buffer->read;
if (iter->head)
iter->read_stamp = cpu_buffer->read_stamp;

View File

@@ -1733,21 +1733,24 @@ static int __split_huge_page_map(struct page *page,
if (pmd) {
pgtable = pgtable_trans_huge_withdraw(mm);
pmd_populate(mm, &_pmd, pgtable);
if (pmd_write(*pmd))
BUG_ON(page_mapcount(page) != 1);
haddr = address;
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
pte_t *pte, entry;
BUG_ON(PageCompound(page+i));
/*
* Note that pmd_numa is not transferred deliberately
* to avoid any possibility that pte_numa leaks to
* a PROT_NONE VMA by accident.
*/
entry = mk_pte(page + i, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (!pmd_write(*pmd))
entry = pte_wrprotect(entry);
else
BUG_ON(page_mapcount(page) != 1);
if (!pmd_young(*pmd))
entry = pte_mkold(entry);
if (pmd_numa(*pmd))
entry = pte_mknuma(entry);
pte = pte_offset_map(&_pmd, haddr);
BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry);

View File

@@ -6568,6 +6568,9 @@ int cfg80211_testmode_reply(struct sk_buff *skb)
void *hdr = ((void **)skb->cb)[1];
struct nlattr *data = ((void **)skb->cb)[2];
/* clear CB data for netlink core to own from now on */
memset(skb->cb, 0, sizeof(skb->cb));
if (WARN_ON(!rdev->testmode_info)) {
kfree_skb(skb);
return -EINVAL;
@@ -6594,6 +6597,9 @@ void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
void *hdr = ((void **)skb->cb)[1];
struct nlattr *data = ((void **)skb->cb)[2];
/* clear CB data for netlink core to own from now on */
memset(skb->cb, 0, sizeof(skb->cb));
nla_nest_end(skb, data);
genlmsg_end(skb, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,