ANDROID: block: Add support for filesystem requests and small segments

Add support in the bio splitting code and also in the bio submission code
for bios with segments smaller than the page size.

Bug: 308663717
Bug: 319125789
Change-Id: Iea511675ad96b0c8255c2b87811ad33c3a02c8fa
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
[jyescas@google.com: the function bio_may_exceed_limits() does not
                     exists in the 5.15 kernel, so the sub page limit
                     code in block/blk.h was moved to the function
                     __blk_queue_split() in block/blk-merge.c]
Signed-off-by: Juan Yescas <jyescas@google.com>
This commit is contained in:
Bart Van Assche
2023-06-12 13:33:12 -07:00
committed by Juan Yescas
parent e99e7de8a6
commit bed88e7c4f

View File

@@ -265,7 +265,8 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
if (nsegs < max_segs &&
sectors + (bv.bv_len >> 9) <= max_sectors &&
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
nsegs++;
/* single-page bvec optimization */
nsegs += blk_segments(&q->limits, bv.bv_len);
sectors += bv.bv_len >> 9;
} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
max_sectors)) {
@@ -333,18 +334,17 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
break;
default:
/*
* All drivers must accept single-segments bios that are <=
* PAGE_SIZE. This is a quick and dirty check that relies on
* the fact that bi_io_vec[0] is always valid if a bio has data.
* The check might lead to occasional false negatives when bios
* are cloned, but compared to the performance impact of cloned
* bios themselves the loop below doesn't matter anyway.
* Check whether bio splitting should be performed. This check may
* trigger the bio splitting code even if splitting is not necessary.
*/
if (!q->limits.chunk_sectors &&
(*bio)->bi_vcnt == 1 &&
(!blk_queue_sub_page_limits(&q->limits) ||
(*bio)->bi_io_vec->bv_len <= q->limits.max_segment_size) &&
((*bio)->bi_io_vec[0].bv_len +
(*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
*nr_segs = 1;
*nr_segs = blk_segments(&q->limits,
(*bio)->bi_io_vec[0].bv_len);
break;
}
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
@@ -519,7 +519,10 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
__blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
goto next_bvec;
if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE &&
(!blk_queue_sub_page_limits(&q->limits) ||
bvec.bv_len <= q->limits.max_segment_size))
/* single-segment bvec optimization */
nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
else
nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);