ANDROID: block: Revert "Preserve the order of requeued requests"

Revert commit 5d90f7cbaf ("ANDROID: block: Preserve the order of
requeued requests") because it is no longer needed. A mechanism has
been added in the SCSI core for sorting zoned writes by LBA. This
patch includes changes to compensate for the move of the requeue_work
member from struct request_queue into struct internal_request_queue.

Bug: 298142254
Change-Id: I35161cfff058d518c3ac5835c6bfcedb8fb567c7
Signed-off-by: Bart Van Assche <bvanassche@google.com>
This commit is contained in:
Bart Van Assche
2023-09-18 10:34:53 -07:00
parent 1988ebab8f
commit c7b7058fbf

View File

@@ -71,7 +71,6 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
return !list_empty_careful(&hctx->dispatch) ||
!list_empty_careful(&hctx->queue->requeue_list) ||
sbitmap_any_bit_set(&hctx->ctx_map) ||
blk_mq_sched_has_work(hctx);
}
@@ -772,36 +771,33 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
}
EXPORT_SYMBOL(blk_mq_requeue_request);
static bool blk_mq_has_sqsched(struct request_queue *q);
static void blk_mq_process_requeue_list(struct blk_mq_hw_ctx *hctx)
static void blk_mq_requeue_work(struct work_struct *work)
{
struct request_queue *q = hctx->queue;
struct request_queue *q = &container_of(work,
struct internal_request_queue, requeue_work.work)->q;
LIST_HEAD(rq_list);
struct request *rq, *next;
LIST_HEAD(at_head);
LIST_HEAD(at_tail);
if (list_empty_careful(&q->requeue_list))
return;
spin_lock_irq(&q->requeue_lock);
list_for_each_entry_safe(rq, next, &q->requeue_list, queuelist) {
if (!blk_mq_has_sqsched(q) && rq->mq_hctx != hctx)
continue;
if (rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)) {
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_move(&rq->queuelist, &at_head);
} else {
list_move(&rq->queuelist, &at_tail);
}
}
list_splice_init(&q->requeue_list, &rq_list);
spin_unlock_irq(&q->requeue_lock);
list_for_each_entry_safe(rq, next, &at_head, queuelist)
blk_mq_sched_insert_request(rq, /*at_head=*/true, false, false);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
continue;
list_for_each_entry_safe(rq, next, &at_tail, queuelist)
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
blk_mq_sched_insert_request(rq, /*at_head=*/true, false, false);
}
while (!list_empty(&rq_list)) {
rq = list_entry(rq_list.next, struct request, queuelist);
list_del_init(&rq->queuelist);
blk_mq_sched_insert_request(rq, false, false, false);
}
blk_mq_run_hw_queues(q, false);
}
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
@@ -831,14 +827,19 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
void blk_mq_kick_requeue_list(struct request_queue *q)
{
blk_mq_run_hw_queues(q, /*async=*/in_atomic());
struct internal_request_queue *iq = to_internal_q(q);
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &iq->requeue_work, 0);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
unsigned long msecs)
{
blk_mq_delay_run_hw_queues(q, msecs);
struct internal_request_queue *iq = to_internal_q(q);
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &iq->requeue_work,
msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
@@ -1484,8 +1485,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
blk_mq_process_requeue_list(hctx);
hctx_lock(hctx, &srcu_idx);
blk_mq_sched_dispatch_requests(hctx);
hctx_unlock(hctx, srcu_idx);
@@ -1675,7 +1674,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
* scheduler.
*/
if (!sq_hctx || sq_hctx == hctx ||
blk_mq_hctx_has_pending(hctx))
!list_empty_careful(&hctx->dispatch))
blk_mq_run_hw_queue(hctx, async);
}
}
@@ -1703,7 +1702,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
* scheduler.
*/
if (!sq_hctx || sq_hctx == hctx ||
blk_mq_hctx_has_pending(hctx))
!list_empty_careful(&hctx->dispatch))
blk_mq_delay_run_hw_queue(hctx, msecs);
}
}
@@ -3285,6 +3284,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
set->map[HCTX_TYPE_POLL].nr_queues)
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
INIT_DELAYED_WORK(&to_internal_q(q)->requeue_work, blk_mq_requeue_work);
INIT_LIST_HEAD(&q->requeue_list);
spin_lock_init(&q->requeue_lock);
@@ -4005,6 +4005,8 @@ void blk_mq_cancel_work_sync(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i;
cancel_delayed_work_sync(&to_internal_q(q)->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
}