mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
f9bff0e318
Patch series "New page table range API", v6. This patchset changes the API used by the MM to set up page table entries. The four APIs are: set_ptes(mm, addr, ptep, pte, nr) update_mmu_cache_range(vma, addr, ptep, nr) flush_dcache_folio(folio) flush_icache_pages(vma, page, nr) flush_dcache_folio() isn't technically new, but no architecture implemented it, so I've done that for them. The old APIs remain around but are mostly implemented by calling the new interfaces. The new APIs are based around setting up N page table entries at once. The N entries belong to the same PMD, the same folio and the same VMA, so ptep++ is a legitimate operation, and locking is taken care of for you. Some architectures can do a better job of it than just a loop, but I have hesitated to make too deep a change to architectures I don't understand well. One thing I have changed in every architecture is that PG_arch_1 is now a per-folio bit instead of a per-page bit when used for dcache clean/dirty tracking. This was something that would have to happen eventually, and it makes sense to do it now rather than iterate over every page involved in a cache flush and figure out if it needs to happen. The point of all this is better performance, and Fengwei Yin has measured improvement on x86. I suspect you'll see improvement on your architecture too. Try the new will-it-scale test mentioned here: https://lore.kernel.org/linux-mm/20230206140639.538867-5-fengwei.yin@intel.com/ You'll need to run it on an XFS filesystem and have CONFIG_TRANSPARENT_HUGEPAGE set. This patchset is the basis for much of the anonymous large folio work being done by Ryan, so it's received quite a lot of testing over the last few months. This patch (of 38): Determine if a value lies within a range more efficiently (subtraction + comparison vs two comparisons and an AND). It also has useful (under some circumstances) behaviour if the range exceeds the maximum value of the type. Convert all the conflicting definitions of in_range() within the kernel; some can use the generic definition while others need their own definition. Link: https://lkml.kernel.org/r/20230802151406.3735276-1-willy@infradead.org Link: https://lkml.kernel.org/r/20230802151406.3735276-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
164 lines
3.8 KiB
C
164 lines
3.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef BTRFS_MISC_H
|
|
#define BTRFS_MISC_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/math64.h>
|
|
#include <linux/rbtree.h>
|
|
|
|
/*
|
|
* Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
|
|
*/
|
|
#define ENUM_BIT(name) \
|
|
__ ## name ## _BIT, \
|
|
name = (1U << __ ## name ## _BIT), \
|
|
__ ## name ## _SEQ = __ ## name ## _BIT
|
|
|
|
static inline void cond_wake_up(struct wait_queue_head *wq)
|
|
{
|
|
/*
|
|
* This implies a full smp_mb barrier, see comments for
|
|
* waitqueue_active why.
|
|
*/
|
|
if (wq_has_sleeper(wq))
|
|
wake_up(wq);
|
|
}
|
|
|
|
static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
|
|
{
|
|
/*
|
|
* Special case for conditional wakeup where the barrier required for
|
|
* waitqueue_active is implied by some of the preceding code. Eg. one
|
|
* of such atomic operations (atomic_dec_and_return, ...), or a
|
|
* unlock/lock sequence, etc.
|
|
*/
|
|
if (waitqueue_active(wq))
|
|
wake_up(wq);
|
|
}
|
|
|
|
static inline u64 mult_perc(u64 num, u32 percent)
|
|
{
|
|
return div_u64(num * percent, 100);
|
|
}
|
|
/* Copy of is_power_of_two that is 64bit safe */
|
|
static inline bool is_power_of_two_u64(u64 n)
|
|
{
|
|
return n != 0 && (n & (n - 1)) == 0;
|
|
}
|
|
|
|
static inline bool has_single_bit_set(u64 n)
|
|
{
|
|
return is_power_of_two_u64(n);
|
|
}
|
|
|
|
/*
|
|
* Simple bytenr based rb_tree relate structures
|
|
*
|
|
* Any structure wants to use bytenr as single search index should have their
|
|
* structure start with these members.
|
|
*/
|
|
struct rb_simple_node {
|
|
struct rb_node rb_node;
|
|
u64 bytenr;
|
|
};
|
|
|
|
static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
|
|
{
|
|
struct rb_node *node = root->rb_node;
|
|
struct rb_simple_node *entry;
|
|
|
|
while (node) {
|
|
entry = rb_entry(node, struct rb_simple_node, rb_node);
|
|
|
|
if (bytenr < entry->bytenr)
|
|
node = node->rb_left;
|
|
else if (bytenr > entry->bytenr)
|
|
node = node->rb_right;
|
|
else
|
|
return node;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Search @root from an entry that starts or comes after @bytenr.
|
|
*
|
|
* @root: the root to search.
|
|
* @bytenr: bytenr to search from.
|
|
*
|
|
* Return the rb_node that start at or after @bytenr. If there is no entry at
|
|
* or after @bytner return NULL.
|
|
*/
|
|
static inline struct rb_node *rb_simple_search_first(struct rb_root *root,
|
|
u64 bytenr)
|
|
{
|
|
struct rb_node *node = root->rb_node, *ret = NULL;
|
|
struct rb_simple_node *entry, *ret_entry = NULL;
|
|
|
|
while (node) {
|
|
entry = rb_entry(node, struct rb_simple_node, rb_node);
|
|
|
|
if (bytenr < entry->bytenr) {
|
|
if (!ret || entry->bytenr < ret_entry->bytenr) {
|
|
ret = node;
|
|
ret_entry = entry;
|
|
}
|
|
|
|
node = node->rb_left;
|
|
} else if (bytenr > entry->bytenr) {
|
|
node = node->rb_right;
|
|
} else {
|
|
return node;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
|
|
struct rb_node *node)
|
|
{
|
|
struct rb_node **p = &root->rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct rb_simple_node *entry;
|
|
|
|
while (*p) {
|
|
parent = *p;
|
|
entry = rb_entry(parent, struct rb_simple_node, rb_node);
|
|
|
|
if (bytenr < entry->bytenr)
|
|
p = &(*p)->rb_left;
|
|
else if (bytenr > entry->bytenr)
|
|
p = &(*p)->rb_right;
|
|
else
|
|
return parent;
|
|
}
|
|
|
|
rb_link_node(node, parent, p);
|
|
rb_insert_color(node, root);
|
|
return NULL;
|
|
}
|
|
|
|
static inline bool bitmap_test_range_all_set(const unsigned long *addr,
|
|
unsigned long start,
|
|
unsigned long nbits)
|
|
{
|
|
unsigned long found_zero;
|
|
|
|
found_zero = find_next_zero_bit(addr, start + nbits, start);
|
|
return (found_zero == start + nbits);
|
|
}
|
|
|
|
static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
|
|
unsigned long start,
|
|
unsigned long nbits)
|
|
{
|
|
unsigned long found_set;
|
|
|
|
found_set = find_next_bit(addr, start + nbits, start);
|
|
return (found_set == start + nbits);
|
|
}
|
|
|
|
#endif
|