diff --git a/block/blk-exec.c b/block/blk-exec.c index d6cd501c0d34..f58ffcad8280 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -59,7 +59,10 @@ void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq, * don't check dying flag for MQ because the request won't * be reused after dying flag is set */ - blk_mq_sched_insert_request(rq, at_head, true, false); + blk_mq_sched_insert_request(rq, at_head, true, + rq->mq_hctx->flags & BLK_MQ_F_BLOCKING && + rq->cmd_flags & REQ_NOWAIT); + } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); diff --git a/block/blk-mq.c b/block/blk-mq.c index 474ec2fdceb8..9da923d49cb2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1561,11 +1561,9 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, if (unlikely(blk_mq_hctx_stopped(hctx))) return; - if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { - if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { - __blk_mq_run_hw_queue(hctx); - return; - } + if (!async && cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { + __blk_mq_run_hw_queue(hctx); + return; } kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, @@ -1768,7 +1766,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) { clear_bit(BLK_MQ_S_STOPPED, &hctx->state); - blk_mq_run_hw_queue(hctx, false); + blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); } EXPORT_SYMBOL(blk_mq_start_hw_queue); @@ -1798,7 +1796,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) int i; queue_for_each_hw_ctx(q, hctx, i) - blk_mq_start_stopped_hw_queue(hctx, async); + blk_mq_start_stopped_hw_queue(hctx, async || + (hctx->flags & BLK_MQ_F_BLOCKING)); } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 77f379649f64..8d7bb439855f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -328,7 +328,8 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) * but in most cases, we will be first. Ideally, each LU on the * target would get some limited time or requests on the target. */ - blk_mq_run_hw_queues(current_sdev->request_queue, false); + blk_mq_run_hw_queues(current_sdev->request_queue, + shost->queuecommand_may_block); spin_lock_irqsave(shost->host_lock, flags); if (starget->starget_sdev_user)