Skip to content

Commit f71f642

Browse files
committed
Merge tag 'dma-mapping-6.5-2023-07-09' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - swiotlb area sizing fixes (Petr Tesarik) * tag 'dma-mapping-6.5-2023-07-09' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: reduce the number of areas to match actual memory pool size swiotlb: always set the number of areas before allocating the pool
2 parents a9943ad + 8ac0406 commit f71f642

File tree

1 file changed

+35
-11
lines changed

1 file changed

+35
-11
lines changed

kernel/dma/swiotlb.c

Lines changed: 35 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -115,9 +115,16 @@ static bool round_up_default_nslabs(void)
115115
return true;
116116
}
117117

118+
/**
119+
* swiotlb_adjust_nareas() - adjust the number of areas and slots
120+
* @nareas: Desired number of areas. Zero is treated as 1.
121+
*
122+
* Adjust the default number of areas in a memory pool.
123+
* The default size of the memory pool may also change to meet minimum area
124+
* size requirements.
125+
*/
118126
static void swiotlb_adjust_nareas(unsigned int nareas)
119127
{
120-
/* use a single area when non is specified */
121128
if (!nareas)
122129
nareas = 1;
123130
else if (!is_power_of_2(nareas))
@@ -131,6 +138,23 @@ static void swiotlb_adjust_nareas(unsigned int nareas)
131138
(default_nslabs << IO_TLB_SHIFT) >> 20);
132139
}
133140

141+
/**
142+
* limit_nareas() - get the maximum number of areas for a given memory pool size
143+
* @nareas: Desired number of areas.
144+
* @nslots: Total number of slots in the memory pool.
145+
*
146+
* Limit the number of areas to the maximum possible number of areas in
147+
* a memory pool of the given size.
148+
*
149+
* Return: Maximum possible number of areas.
150+
*/
151+
static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
152+
{
153+
if (nslots < nareas * IO_TLB_SEGSIZE)
154+
return nslots / IO_TLB_SEGSIZE;
155+
return nareas;
156+
}
157+
134158
static int __init
135159
setup_io_tlb_npages(char *str)
136160
{
@@ -290,6 +314,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
290314
{
291315
struct io_tlb_mem *mem = &io_tlb_default_mem;
292316
unsigned long nslabs;
317+
unsigned int nareas;
293318
size_t alloc_size;
294319
void *tlb;
295320

@@ -298,18 +323,16 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
298323
if (swiotlb_force_disable)
299324
return;
300325

301-
/*
302-
* default_nslabs maybe changed when adjust area number.
303-
* So allocate bounce buffer after adjusting area number.
304-
*/
305326
if (!default_nareas)
306327
swiotlb_adjust_nareas(num_possible_cpus());
307328

308329
nslabs = default_nslabs;
330+
nareas = limit_nareas(default_nareas, nslabs);
309331
while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
310332
if (nslabs <= IO_TLB_MIN_SLABS)
311333
return;
312334
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
335+
nareas = limit_nareas(nareas, nslabs);
313336
}
314337

315338
if (default_nslabs != nslabs) {
@@ -355,6 +378,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
355378
{
356379
struct io_tlb_mem *mem = &io_tlb_default_mem;
357380
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
381+
unsigned int nareas;
358382
unsigned char *vstart = NULL;
359383
unsigned int order, area_order;
360384
bool retried = false;
@@ -363,6 +387,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
363387
if (swiotlb_force_disable)
364388
return 0;
365389

390+
if (!default_nareas)
391+
swiotlb_adjust_nareas(num_possible_cpus());
392+
366393
retry:
367394
order = get_order(nslabs << IO_TLB_SHIFT);
368395
nslabs = SLABS_PER_PAGE << order;
@@ -397,11 +424,8 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
397424
(PAGE_SIZE << order) >> 20);
398425
}
399426

400-
if (!default_nareas)
401-
swiotlb_adjust_nareas(num_possible_cpus());
402-
403-
area_order = get_order(array_size(sizeof(*mem->areas),
404-
default_nareas));
427+
nareas = limit_nareas(default_nareas, nslabs);
428+
area_order = get_order(array_size(sizeof(*mem->areas), nareas));
405429
mem->areas = (struct io_tlb_area *)
406430
__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
407431
if (!mem->areas)
@@ -415,7 +439,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
415439
set_memory_decrypted((unsigned long)vstart,
416440
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
417441
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
418-
default_nareas);
442+
nareas);
419443

420444
swiotlb_print_info();
421445
return 0;

0 commit comments

Comments
 (0)