mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
slab fix for 7.1
-----BEGIN PGP SIGNATURE----- iQFPBAABCAA5FiEEe7vIQRWZI0iWSE3xu+CwddJFiJoFAmnrPncbFIAAAAAABAAO bWFudTIsMi41KzEuMTIsMiwyAAoJELvgsHXSRYiaj6gH/jr11AbZbxCd7z1DzwHx blRszXNBGjoMYPbxENG+xostFgccnJVZw5tAeGo8AZoTN2PGrtk0pVOGq+H8ShSd qoQAxx1+wggv2qfQd2qwFmOFueoQBpIlR1kpVF7YFjFz0Z8q2NNqhzNhjIMWiyRI 4qPHcB5GojgXb+khQG25qQ5Hed8D+D+fEelNrNF3lLd8dK5mbqD4VFpf4lnLyIAQ TWf46PhuqQSeYAHMr1j5J+vW2lNeEY5ps/CmAS6DzUt4pl0JnubKT1WPEEUdOjT1 HBTN761tDMM9W5NyqSRLJYqStVbBOFEaJ5ZSulhy8cHreyY8nLN4G1smU/8dGRXc htE= =NYjb -----END PGP SIGNATURE----- Merge tag 'slab-for-7.1-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab Pull slab fix from Vlastimil Babka: - A stable fix for k(v)ealloc() where reallocating on a different node or shrinking the object can result in either losing the original data or a buffer overflow (Marco Elver) * tag 'slab-for-7.1-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: slub: fix data loss and overflow in krealloc()
This commit is contained in:
commit
82138f0183
24
mm/slub.c
24
mm/slub.c
|
|
@ -6645,16 +6645,6 @@ __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags,
|
|||
if (!kasan_check_byte(p))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If reallocation is not necessary (e. g. the new size is less
|
||||
* than the current allocated size), the current allocation will be
|
||||
* preserved unless __GFP_THISNODE is set. In the latter case a new
|
||||
* allocation on the requested node will be attempted.
|
||||
*/
|
||||
if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
|
||||
nid != page_to_nid(virt_to_page(p)))
|
||||
goto alloc_new;
|
||||
|
||||
if (is_kfence_address(p)) {
|
||||
ks = orig_size = kfence_ksize(p);
|
||||
} else {
|
||||
|
|
@ -6673,6 +6663,16 @@ __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If reallocation is not necessary (e. g. the new size is less
|
||||
* than the current allocated size), the current allocation will be
|
||||
* preserved unless __GFP_THISNODE is set. In the latter case a new
|
||||
* allocation on the requested node will be attempted.
|
||||
*/
|
||||
if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
|
||||
nid != page_to_nid(virt_to_page(p)))
|
||||
goto alloc_new;
|
||||
|
||||
/* If the old object doesn't fit, allocate a bigger one */
|
||||
if (new_size > ks)
|
||||
goto alloc_new;
|
||||
|
|
@ -6707,7 +6707,7 @@ __do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags,
|
|||
if (ret && p) {
|
||||
/* Disable KASAN checks as the object's redzone is accessed. */
|
||||
kasan_disable_current();
|
||||
memcpy(ret, kasan_reset_tag(p), orig_size ?: ks);
|
||||
memcpy(ret, kasan_reset_tag(p), min(new_size, (size_t)(orig_size ?: ks)));
|
||||
kasan_enable_current();
|
||||
}
|
||||
|
||||
|
|
@ -6941,7 +6941,7 @@ void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long alig
|
|||
if (p) {
|
||||
/* We already know that `p` is not a vmalloc address. */
|
||||
kasan_disable_current();
|
||||
memcpy(n, kasan_reset_tag(p), ksize(p));
|
||||
memcpy(n, kasan_reset_tag(p), min(size, ksize(p)));
|
||||
kasan_enable_current();
|
||||
|
||||
kfree(p);
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user