ANDROID: block: Send requeued requests to the I/O scheduler

Make sure that the I/O scheduler has control over which requests are
dispatched.

Bug: 275581839
Change-Id: If8c70df11584b023c452fbba28c67b092ddac850
Signed-off-by: Bart Van Assche <bvanassche@google.com>
This commit is contained in:
Bart Van Assche
2023-04-03 09:59:35 -07:00
committed by Treehugger Robot
parent cd4d66e62b
commit 9102217567
4 changed files with 23 additions and 12 deletions

View File

@@ -412,7 +412,7 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
static bool blk_mq_sched_bypass_insert(struct request *rq)
bool blk_mq_sched_bypass_insert(struct request *rq)
{
/*
* dispatch flush and passthrough rq directly

View File

@@ -18,6 +18,7 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
bool blk_mq_sched_bypass_insert(struct request *rq);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async);
void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,

View File

@@ -788,15 +788,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
/*
* If RQF_DONTPREP, rq has contained some driver specific
* data, so insert it to hctx dispatch list to avoid any
* merge.
*/
if (rq->rq_flags & RQF_DONTPREP)
blk_mq_request_bypass_insert(rq, false, false);
else
blk_mq_sched_insert_request(rq, true, false, false);
blk_mq_sched_insert_request(rq, /*at_head=*/true, false, false);
}
while (!list_empty(&rq_list)) {
@@ -1411,14 +1403,31 @@ out:
/* For non-shared tags, the RESTART check will suffice */
bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
LIST_HEAD(for_sched);
struct request *next;
if (nr_budgets)
blk_mq_release_budgets(q, list);
if (q->elevator)
list_for_each_entry_safe(rq, next, list, queuelist)
if (!blk_mq_sched_bypass_insert(rq))
list_move_tail(&rq->queuelist,
&for_sched);
spin_lock(&hctx->lock);
list_splice_tail_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
if (q->elevator && !list_empty(&for_sched)) {
if (q->elevator->type->ops.requeue_request)
list_for_each_entry(rq, &for_sched, queuelist)
q->elevator->type->ops.
requeue_request(rq);
q->elevator->type->ops.insert_requests(hctx, &for_sched,
/*at_head=*/true);
}
/*
* Order adding requests to hctx->dispatch and checking
* SCHED_RESTART flag. The pair of this smp_mb() is the one

View File

@@ -99,8 +99,9 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
#define RQF_NOMERGE_FLAGS \
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_DONTPREP | \
RQF_SPECIAL_PAYLOAD)
/*
* Request state for blk-mq.