kernel: Update to version 5.15.176

Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
This commit is contained in:
Hauke Mehrtens
2025-02-16 22:23:50 +00:00
committed by Lucas Asvio
parent b18c7e2646
commit 46d8cb1028
40 changed files with 203 additions and 203 deletions

View File

@@ -1,2 +1,2 @@
LINUX_VERSION-5.15 = .175
LINUX_KERNEL_HASH-5.15.175 = 8fd8bbc80e7aae30aaca3b40576b283010b5e84e70f6fea1573589155ce8a9d0
LINUX_VERSION-5.15 = .176
LINUX_KERNEL_HASH-5.15.176 = a84ab5328c8a5a0e5c4a39a06b07479a7769c73af49a4c9ce59eeb644829d3b1

View File

@@ -15,7 +15,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.org>
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1642,6 +1642,109 @@ command_cleanup:
@@ -1643,6 +1643,109 @@ command_cleanup:
}
/*
@@ -125,7 +125,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.org>
* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*/
@@ -5488,6 +5591,7 @@ static const struct hc_driver xhci_hc_dr
@@ -5501,6 +5604,7 @@ static const struct hc_driver xhci_hc_dr
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,

View File

@@ -54,7 +54,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1423,8 +1423,9 @@ struct urb_priv {
@@ -1424,8 +1424,9 @@ struct urb_priv {
* Each segment table entry is 4*32bits long. 1K seems like an ok size:
* (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
* meaning 64 ring segments.

View File

@@ -26,7 +26,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -674,9 +674,9 @@ deq_found:
@@ -675,9 +675,9 @@ deq_found:
}
if ((ep->ep_state & SET_DEQ_PENDING)) {

View File

@@ -36,7 +36,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -664,6 +664,15 @@ static int xhci_move_dequeue_past_td(str
@@ -665,6 +665,15 @@ static int xhci_move_dequeue_past_td(str
} while (!cycle_found || !td_last_trb_found);
deq_found:
@@ -54,7 +54,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
addr = xhci_trb_virt_to_dma(new_seg, new_deq);
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1664,6 +1664,7 @@ struct xhci_hcd {
@@ -1665,6 +1665,7 @@ struct xhci_hcd {
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)

View File

@@ -14,7 +14,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -309,6 +309,12 @@ static inline int room_on_ring(struct xh
@@ -310,6 +310,12 @@ static inline int room_on_ring(struct xh
return 0;
if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {

View File

@@ -156,7 +156,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
}
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -90,15 +90,16 @@ static bool trb_is_link(union xhci_trb *
@@ -91,15 +91,16 @@ static bool trb_is_link(union xhci_trb *
return TRB_TYPE_LINK_LE32(trb->link.control);
}
@@ -176,7 +176,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
}
static bool link_trb_toggles_cycle(union xhci_trb *trb)
@@ -161,7 +162,8 @@ void inc_deq(struct xhci_hcd *xhci, stru
@@ -162,7 +163,8 @@ void inc_deq(struct xhci_hcd *xhci, stru
/* event ring doesn't have link trbs, check for last trb */
if (ring->type == TYPE_EVENT) {
@@ -186,7 +186,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
ring->dequeue++;
goto out;
}
@@ -174,7 +176,8 @@ void inc_deq(struct xhci_hcd *xhci, stru
@@ -175,7 +177,8 @@ void inc_deq(struct xhci_hcd *xhci, stru
/* All other rings have link trbs */
if (!trb_is_link(ring->dequeue)) {
@@ -196,7 +196,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
xhci_warn(xhci, "Missing link TRB at end of segment\n");
} else {
ring->dequeue++;
@@ -225,7 +228,7 @@ static void inc_enq(struct xhci_hcd *xhc
@@ -226,7 +229,7 @@ static void inc_enq(struct xhci_hcd *xhc
if (!trb_is_link(ring->enqueue))
ring->num_trbs_free--;
@@ -205,7 +205,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
xhci_err(xhci, "Tried to move enqueue past ring segment\n");
return;
}
@@ -3274,7 +3277,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd
@@ -3314,7 +3317,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd
* that clears the EHB.
*/
while (xhci_handle_event(xhci, ir) > 0) {
@@ -214,7 +214,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
continue;
xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
event_ring_deq = ir->event_ring->dequeue;
@@ -3416,7 +3419,8 @@ static int prepare_ring(struct xhci_hcd
@@ -3456,7 +3459,8 @@ static int prepare_ring(struct xhci_hcd
}
}
@@ -226,7 +226,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
}
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -898,8 +898,8 @@ static void xhci_clear_command_ring(stru
@@ -899,8 +899,8 @@ static void xhci_clear_command_ring(stru
seg = ring->deq_seg;
do {
memset(seg->trbs, 0,
@@ -237,7 +237,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
cpu_to_le32(~TRB_CYCLE);
seg = seg->next;
} while (seg != ring->deq_seg);
@@ -910,7 +910,7 @@ static void xhci_clear_command_ring(stru
@@ -911,7 +911,7 @@ static void xhci_clear_command_ring(stru
ring->enq_seg = ring->deq_seg;
ring->enqueue = ring->dequeue;
@@ -248,7 +248,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
* when the cycle bit is set to 1.
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1385,6 +1385,7 @@ struct xhci_ring {
@@ -1386,6 +1386,7 @@ struct xhci_ring {
unsigned int num_trbs_free;
unsigned int num_trbs_free_temp;
unsigned int bounce_buf_len;

View File

@@ -63,7 +63,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1666,6 +1666,7 @@ struct xhci_hcd {
@@ -1667,6 +1667,7 @@ struct xhci_hcd {
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
#define XHCI_AVOID_DQ_ON_LINK BIT_ULL(49)

View File

@@ -36,7 +36,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3729,14 +3729,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
@@ -3769,14 +3769,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
unsigned int num_trbs;
unsigned int start_cycle, num_sgs = 0;
unsigned int enqd_len, block_len, trb_buff_len, full_len;
@@ -54,7 +54,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
full_len = urb->transfer_buffer_length;
/* If we have scatter/gather list, we use it. */
if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
@@ -3773,6 +3774,17 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
@@ -3813,6 +3814,17 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
start_cycle = ring->cycle_state;
send_addr = addr;
@@ -72,7 +72,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
/* Queue the TRBs, even if they are zero-length */
for (enqd_len = 0; first_trb || enqd_len < full_len;
enqd_len += trb_buff_len) {
@@ -3785,6 +3797,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
@@ -3825,6 +3837,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
if (enqd_len + trb_buff_len > full_len)
trb_buff_len = full_len - enqd_len;
@@ -86,7 +86,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
first_trb = false;
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1667,6 +1667,7 @@ struct xhci_hcd {
@@ -1668,6 +1668,7 @@ struct xhci_hcd {
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
#define XHCI_AVOID_DQ_ON_LINK BIT_ULL(49)
#define XHCI_VLI_TRB_CACHE_BUG BIT_ULL(50)

View File

@@ -13,7 +13,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3729,7 +3729,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
@@ -3769,7 +3769,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
unsigned int num_trbs;
unsigned int start_cycle, num_sgs = 0;
unsigned int enqd_len, block_len, trb_buff_len, full_len;
@@ -22,7 +22,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
u32 field, length_field, remainder, maxpacket;
u64 addr, send_addr;
@@ -3775,14 +3775,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
@@ -3815,14 +3815,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
send_addr = addr;
if (xhci->quirks & XHCI_VLI_SS_BULK_OUT_BUG &&
@@ -40,7 +40,7 @@ Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
}
/* Queue the TRBs, even if they are zero-length */
@@ -3797,7 +3792,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
@@ -3837,7 +3832,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
if (enqd_len + trb_buff_len > full_len)
trb_buff_len = full_len - enqd_len;

View File

@@ -40,7 +40,7 @@ it on BCM4708 family.
/* called during probe() after chip reset completes */
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -158,6 +158,49 @@ int xhci_start(struct xhci_hcd *xhci)
@@ -159,6 +159,49 @@ int xhci_start(struct xhci_hcd *xhci)
return ret;
}
@@ -90,7 +90,7 @@ it on BCM4708 family.
/*
* Reset a halted HC.
*
@@ -635,6 +678,16 @@ static int xhci_run_finished(struct xhci
@@ -636,6 +679,16 @@ static int xhci_run_finished(struct xhci
spin_unlock_irqrestore(&xhci->lock, flags);
return -ENODEV;
}
@@ -109,7 +109,7 @@ it on BCM4708 family.
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1663,6 +1663,7 @@ struct xhci_hcd {
@@ -1664,6 +1664,7 @@ struct xhci_hcd {
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)

View File

@@ -52,7 +52,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2497,6 +2497,109 @@ enum scan_balance {
@@ -2504,6 +2504,109 @@ enum scan_balance {
SCAN_FILE,
};
@@ -162,7 +162,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/*
* Determine how aggressively the anon and file LRU lists should be
* scanned. The relative value of each set of LRU lists is determined
@@ -2965,109 +3068,16 @@ static void shrink_node(pg_data_t *pgdat
@@ -2972,109 +3075,16 @@ static void shrink_node(pg_data_t *pgdat
unsigned long nr_reclaimed, nr_scanned;
struct lruvec *target_lruvec;
bool reclaimable = false;

View File

@@ -723,7 +723,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
local_lock(&lru_pvecs.lock);
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2821,6 +2821,81 @@ static bool can_age_anon_pages(struct pg
@@ -2828,6 +2828,81 @@ static bool can_age_anon_pages(struct pg
return can_demote(pgdat->node_id, sc);
}

View File

@@ -435,7 +435,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
} else if (PageUnevictable(page)) {
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1142,9 +1142,11 @@ static int __remove_mapping(struct addre
@@ -1149,9 +1149,11 @@ static int __remove_mapping(struct addre
if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) };
@@ -448,7 +448,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
__delete_from_swap_cache(page, swap, shadow);
xa_unlock_irq(&mapping->i_pages);
put_swap_page(page, swap);
@@ -2502,6 +2504,9 @@ static void prepare_scan_count(pg_data_t
@@ -2509,6 +2511,9 @@ static void prepare_scan_count(pg_data_t
unsigned long file;
struct lruvec *target_lruvec;
@@ -458,7 +458,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
/*
@@ -2827,6 +2832,17 @@ static bool can_age_anon_pages(struct pg
@@ -2834,6 +2839,17 @@ static bool can_age_anon_pages(struct pg
* shorthand helpers
******************************************************************************/
@@ -476,7 +476,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#define for_each_gen_type_zone(gen, type, zone) \
for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
@@ -2852,6 +2868,745 @@ static struct lruvec __maybe_unused *get
@@ -2859,6 +2875,745 @@ static struct lruvec __maybe_unused *get
return pgdat ? &pgdat->__lruvec : NULL;
}
@@ -1222,7 +1222,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/******************************************************************************
* initialization
******************************************************************************/
@@ -2894,6 +3649,16 @@ static int __init init_lru_gen(void)
@@ -2901,6 +3656,16 @@ static int __init init_lru_gen(void)
};
late_initcall(init_lru_gen);
@@ -1239,7 +1239,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#endif /* CONFIG_LRU_GEN */
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
@@ -2907,6 +3672,11 @@ static void shrink_lruvec(struct lruvec
@@ -2914,6 +3679,11 @@ static void shrink_lruvec(struct lruvec
bool proportional_reclaim;
struct blk_plug plug;
@@ -1251,7 +1251,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
get_scan_count(lruvec, sc, nr);
/* Record the original scan target for proportional adjustments later */
@@ -3375,6 +4145,9 @@ static void snapshot_refaults(struct mem
@@ -3382,6 +4152,9 @@ static void snapshot_refaults(struct mem
struct lruvec *target_lruvec;
unsigned long refaults;
@@ -1261,7 +1261,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
target_lruvec->refaults[0] = refaults;
@@ -3739,12 +4512,16 @@ unsigned long try_to_free_mem_cgroup_pag
@@ -3746,12 +4519,16 @@ unsigned long try_to_free_mem_cgroup_pag
}
#endif
@@ -1280,7 +1280,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (!can_age_anon_pages(pgdat, sc))
return;
@@ -4061,12 +4838,11 @@ restart:
@@ -4068,12 +4845,11 @@ restart:
sc.may_swap = !nr_boost_reclaim;
/*

View File

@@ -263,7 +263,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1409,6 +1409,11 @@ retry:
@@ -1416,6 +1416,11 @@ retry:
if (!sc->may_unmap && page_mapped(page))
goto keep_locked;
@@ -275,7 +275,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
@@ -2990,6 +2995,29 @@ static bool positive_ctrl_err(struct ctr
@@ -2997,6 +3002,29 @@ static bool positive_ctrl_err(struct ctr
* the aging
******************************************************************************/
@@ -305,7 +305,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* protect pages accessed multiple times through file descriptors */
static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
{
@@ -3001,6 +3029,11 @@ static int page_inc_gen(struct lruvec *l
@@ -3008,6 +3036,11 @@ static int page_inc_gen(struct lruvec *l
VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page);
do {
@@ -317,7 +317,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
new_gen = (old_gen + 1) % MAX_NR_GENS;
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
@@ -3015,6 +3048,43 @@ static int page_inc_gen(struct lruvec *l
@@ -3022,6 +3055,43 @@ static int page_inc_gen(struct lruvec *l
return new_gen;
}
@@ -361,7 +361,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static void inc_min_seq(struct lruvec *lruvec, int type)
{
struct lru_gen_struct *lrugen = &lruvec->lrugen;
@@ -3214,6 +3284,114 @@ static void lru_gen_age_node(struct pgli
@@ -3221,6 +3291,114 @@ static void lru_gen_age_node(struct pgli
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
}
@@ -476,7 +476,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/******************************************************************************
* the eviction
******************************************************************************/
@@ -3250,6 +3428,12 @@ static bool sort_page(struct lruvec *lru
@@ -3257,6 +3435,12 @@ static bool sort_page(struct lruvec *lru
return true;
}

View File

@@ -474,7 +474,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -2853,7 +2855,7 @@ static bool can_age_anon_pages(struct pg
@@ -2860,7 +2862,7 @@ static bool can_age_anon_pages(struct pg
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
@@ -483,7 +483,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
struct pglist_data *pgdat = NODE_DATA(nid);
@@ -2899,6 +2901,371 @@ static bool __maybe_unused seq_is_valid(
@@ -2906,6 +2908,371 @@ static bool __maybe_unused seq_is_valid(
}
/******************************************************************************
@@ -855,7 +855,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* refault feedback loop
******************************************************************************/
@@ -3048,6 +3415,118 @@ static int page_inc_gen(struct lruvec *l
@@ -3055,6 +3422,118 @@ static int page_inc_gen(struct lruvec *l
return new_gen;
}
@@ -974,7 +974,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
{
unsigned long pfn = pte_pfn(pte);
@@ -3066,8 +3545,28 @@ static unsigned long get_pte_pfn(pte_t p
@@ -3073,8 +3552,28 @@ static unsigned long get_pte_pfn(pte_t p
return pfn;
}
@@ -1004,7 +1004,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
struct page *page;
@@ -3082,9 +3581,375 @@ static struct page *get_pfn_page(unsigne
@@ -3089,9 +3588,375 @@ static struct page *get_pfn_page(unsigne
if (page_memcg_rcu(page) != memcg)
return NULL;
@@ -1380,7 +1380,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static void inc_min_seq(struct lruvec *lruvec, int type)
{
struct lru_gen_struct *lrugen = &lruvec->lrugen;
@@ -3136,7 +4001,7 @@ next:
@@ -3143,7 +4008,7 @@ next:
return success;
}
@@ -1389,7 +1389,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
int prev, next;
int type, zone;
@@ -3146,9 +4011,6 @@ static void inc_max_seq(struct lruvec *l
@@ -3153,9 +4018,6 @@ static void inc_max_seq(struct lruvec *l
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
@@ -1399,7 +1399,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
for (type = ANON_AND_FILE - 1; type >= 0; type--) {
if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
continue;
@@ -3186,10 +4048,76 @@ static void inc_max_seq(struct lruvec *l
@@ -3193,10 +4055,76 @@ static void inc_max_seq(struct lruvec *l
/* make sure preceding modifications appear */
smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
@@ -1477,7 +1477,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq,
struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
{
@@ -3265,7 +4193,7 @@ static void age_lruvec(struct lruvec *lr
@@ -3272,7 +4200,7 @@ static void age_lruvec(struct lruvec *lr
need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
if (need_aging)
@@ -1486,7 +1486,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
@@ -3274,6 +4202,8 @@ static void lru_gen_age_node(struct pgli
@@ -3281,6 +4209,8 @@ static void lru_gen_age_node(struct pgli
VM_WARN_ON_ONCE(!current_is_kswapd());
@@ -1495,7 +1495,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
@@ -3282,11 +4212,16 @@ static void lru_gen_age_node(struct pgli
@@ -3289,11 +4219,16 @@ static void lru_gen_age_node(struct pgli
cond_resched();
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
@@ -1513,7 +1513,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
*/
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
@@ -3295,6 +4230,8 @@ void lru_gen_look_around(struct page_vma
@@ -3302,6 +4237,8 @@ void lru_gen_look_around(struct page_vma
unsigned long start;
unsigned long end;
unsigned long addr;
@@ -1522,7 +1522,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
struct page *page = pvmw->page;
struct mem_cgroup *memcg = page_memcg(page);
@@ -3309,6 +4246,9 @@ void lru_gen_look_around(struct page_vma
@@ -3316,6 +4253,9 @@ void lru_gen_look_around(struct page_vma
if (spin_is_contended(pvmw->ptl))
return;
@@ -1532,7 +1532,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
@@ -3338,13 +4278,15 @@ void lru_gen_look_around(struct page_vma
@@ -3345,13 +4285,15 @@ void lru_gen_look_around(struct page_vma
if (!pte_young(pte[i]))
continue;
@@ -1549,7 +1549,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (pte_dirty(pte[i]) && !PageDirty(page) &&
!(PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page)))
@@ -3360,7 +4302,11 @@ void lru_gen_look_around(struct page_vma
@@ -3367,7 +4309,11 @@ void lru_gen_look_around(struct page_vma
arch_leave_lazy_mmu_mode();
rcu_read_unlock();
@@ -1562,7 +1562,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
page = pte_page(pte[i]);
activate_page(page);
@@ -3372,8 +4318,10 @@ void lru_gen_look_around(struct page_vma
@@ -3379,8 +4325,10 @@ void lru_gen_look_around(struct page_vma
if (!mem_cgroup_trylock_pages(memcg))
return;
@@ -1575,7 +1575,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
page = compound_head(pte_page(pte[i]));
@@ -3384,10 +4332,14 @@ void lru_gen_look_around(struct page_vma
@@ -3391,10 +4339,14 @@ void lru_gen_look_around(struct page_vma
if (old_gen < 0 || old_gen == new_gen)
continue;
@@ -1592,7 +1592,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mem_cgroup_unlock_pages();
}
@@ -3670,6 +4622,7 @@ static int evict_pages(struct lruvec *lr
@@ -3677,6 +4629,7 @@ static int evict_pages(struct lruvec *lr
struct page *page;
enum vm_event_item item;
struct reclaim_stat stat;
@@ -1600,7 +1600,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -3706,6 +4659,10 @@ static int evict_pages(struct lruvec *lr
@@ -3713,6 +4666,10 @@ static int evict_pages(struct lruvec *lr
move_pages_to_lru(lruvec, &list);
@@ -1611,7 +1611,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
if (!cgroup_reclaim(sc))
__count_vm_events(item, reclaimed);
@@ -3722,6 +4679,11 @@ static int evict_pages(struct lruvec *lr
@@ -3729,6 +4686,11 @@ static int evict_pages(struct lruvec *lr
return scanned;
}
@@ -1623,7 +1623,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
bool can_swap)
{
@@ -3747,7 +4709,8 @@ static unsigned long get_nr_to_scan(stru
@@ -3754,7 +4716,8 @@ static unsigned long get_nr_to_scan(stru
if (current_is_kswapd())
return 0;
@@ -1633,7 +1633,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
done:
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
}
@@ -3761,6 +4724,8 @@ static void lru_gen_shrink_lruvec(struct
@@ -3768,6 +4731,8 @@ static void lru_gen_shrink_lruvec(struct
blk_start_plug(&plug);
@@ -1642,7 +1642,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
while (true) {
int delta;
int swappiness;
@@ -3788,6 +4753,8 @@ static void lru_gen_shrink_lruvec(struct
@@ -3795,6 +4760,8 @@ static void lru_gen_shrink_lruvec(struct
cond_resched();
}
@@ -1651,7 +1651,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
blk_finish_plug(&plug);
}
@@ -3804,15 +4771,21 @@ void lru_gen_init_lruvec(struct lruvec *
@@ -3811,15 +4778,21 @@ void lru_gen_init_lruvec(struct lruvec *
for_each_gen_type_zone(gen, type, zone)
INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
@@ -1673,7 +1673,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
int nid;
for_each_node(nid) {
@@ -3820,6 +4793,11 @@ void lru_gen_exit_memcg(struct mem_cgrou
@@ -3827,6 +4800,11 @@ void lru_gen_exit_memcg(struct mem_cgrou
VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
sizeof(lruvec->lrugen.nr_pages)));

View File

@@ -149,7 +149,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* Allocation order */
s8 order;
@@ -4202,6 +4208,19 @@ static void lru_gen_age_node(struct pgli
@@ -4209,6 +4215,19 @@ static void lru_gen_age_node(struct pgli
VM_WARN_ON_ONCE(!current_is_kswapd());
@@ -169,7 +169,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
set_mm_walk(pgdat);
memcg = mem_cgroup_iter(NULL, NULL, NULL);
@@ -4613,7 +4632,8 @@ static int isolate_pages(struct lruvec *
@@ -4620,7 +4639,8 @@ static int isolate_pages(struct lruvec *
return scanned;
}
@@ -179,7 +179,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
int type;
int scanned;
@@ -4676,6 +4696,9 @@ static int evict_pages(struct lruvec *lr
@@ -4683,6 +4703,9 @@ static int evict_pages(struct lruvec *lr
sc->nr_reclaimed += reclaimed;
@@ -189,7 +189,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return scanned;
}
@@ -4685,9 +4708,8 @@ static int evict_pages(struct lruvec *lr
@@ -4692,9 +4715,8 @@ static int evict_pages(struct lruvec *lr
* reclaim.
*/
static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
@@ -200,7 +200,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
@@ -4697,8 +4719,8 @@ static unsigned long get_nr_to_scan(stru
@@ -4704,8 +4726,8 @@ static unsigned long get_nr_to_scan(stru
(mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
return 0;
@@ -211,7 +211,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return nr_to_scan;
/* skip the aging path at the default priority */
@@ -4715,10 +4737,68 @@ done:
@@ -4722,10 +4744,68 @@ done:
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
}
@@ -280,7 +280,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lru_add_drain();
@@ -4738,21 +4818,28 @@ static void lru_gen_shrink_lruvec(struct
@@ -4745,21 +4825,28 @@ static void lru_gen_shrink_lruvec(struct
else
swappiness = 0;

View File

@@ -219,7 +219,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -2841,6 +2842,14 @@ static bool can_age_anon_pages(struct pg
@@ -2848,6 +2849,14 @@ static bool can_age_anon_pages(struct pg
#ifdef CONFIG_LRU_GEN
@@ -234,7 +234,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/******************************************************************************
* shorthand helpers
******************************************************************************/
@@ -3717,7 +3726,8 @@ static void walk_pmd_range_locked(pud_t
@@ -3724,7 +3733,8 @@ static void walk_pmd_range_locked(pud_t
goto next;
if (!pmd_trans_huge(pmd[i])) {
@@ -244,7 +244,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
pmdp_test_and_clear_young(vma, addr, pmd + i);
goto next;
}
@@ -3815,10 +3825,12 @@ restart:
@@ -3822,10 +3832,12 @@ restart:
walk->mm_stats[MM_NONLEAF_TOTAL]++;
#ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
@@ -260,7 +260,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#endif
if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
continue;
@@ -4080,7 +4092,7 @@ static bool try_to_inc_max_seq(struct lr
@@ -4087,7 +4099,7 @@ static bool try_to_inc_max_seq(struct lr
* handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults.
*/
@@ -269,7 +269,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
success = iterate_mm_list_nowalk(lruvec, max_seq);
goto done;
}
@@ -4846,6 +4858,208 @@ done:
@@ -4853,6 +4865,208 @@ done:
}
/******************************************************************************
@@ -478,7 +478,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* initialization
******************************************************************************/
@@ -4855,6 +5069,7 @@ void lru_gen_init_lruvec(struct lruvec *
@@ -4862,6 +5076,7 @@ void lru_gen_init_lruvec(struct lruvec *
struct lru_gen_struct *lrugen = &lruvec->lrugen;
lrugen->max_seq = MIN_NR_GENS + 1;
@@ -486,7 +486,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
for_each_gen_type_zone(gen, type, zone)
INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
@@ -4894,6 +5109,9 @@ static int __init init_lru_gen(void)
@@ -4901,6 +5116,9 @@ static int __init init_lru_gen(void)
BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);

View File

@@ -81,7 +81,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* the multi-gen LRU sizes, eventually consistent */
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4064,6 +4064,7 @@ static void inc_max_seq(struct lruvec *l
@@ -4071,6 +4071,7 @@ static void inc_max_seq(struct lruvec *l
for (type = 0; type < ANON_AND_FILE; type++)
reset_ctrl_pos(lruvec, type, false);
@@ -89,7 +89,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* make sure preceding modifications appear */
smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
@@ -4193,7 +4194,7 @@ static bool should_run_aging(struct lruv
@@ -4200,7 +4201,7 @@ static bool should_run_aging(struct lruv
return false;
}
@@ -98,7 +98,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
bool need_aging;
unsigned long nr_to_scan;
@@ -4207,16 +4208,36 @@ static void age_lruvec(struct lruvec *lr
@@ -4214,16 +4215,36 @@ static void age_lruvec(struct lruvec *lr
mem_cgroup_calculate_protection(NULL, memcg);
if (mem_cgroup_below_min(memcg))
@@ -136,7 +136,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
VM_WARN_ON_ONCE(!current_is_kswapd());
@@ -4239,12 +4260,32 @@ static void lru_gen_age_node(struct pgli
@@ -4246,12 +4267,32 @@ static void lru_gen_age_node(struct pgli
do {
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
@@ -170,7 +170,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
/*
@@ -5002,6 +5043,28 @@ unlock:
@@ -5009,6 +5050,28 @@ unlock:
* sysfs interface
******************************************************************************/
@@ -199,7 +199,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
unsigned int caps = 0;
@@ -5050,6 +5113,7 @@ static struct kobj_attribute lru_gen_ena
@@ -5057,6 +5120,7 @@ static struct kobj_attribute lru_gen_ena
);
static struct attribute *lru_gen_attrs[] = {
@@ -207,7 +207,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
&lru_gen_enabled_attr.attr,
NULL
};
@@ -5065,12 +5129,16 @@ static struct attribute_group lru_gen_at
@@ -5072,12 +5136,16 @@ static struct attribute_group lru_gen_at
void lru_gen_init_lruvec(struct lruvec *lruvec)
{

View File

@@ -84,7 +84,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -3968,12 +3969,40 @@ static void clear_mm_walk(void)
@@ -3975,12 +3976,40 @@ static void clear_mm_walk(void)
kfree(walk);
}
@@ -126,7 +126,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
@@ -4019,7 +4048,7 @@ next:
@@ -4026,7 +4055,7 @@ next:
return success;
}
@@ -135,7 +135,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
int prev, next;
int type, zone;
@@ -4033,9 +4062,13 @@ static void inc_max_seq(struct lruvec *l
@@ -4040,9 +4069,13 @@ static void inc_max_seq(struct lruvec *l
if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
continue;
@@ -151,7 +151,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
/*
@@ -4072,7 +4105,7 @@ static void inc_max_seq(struct lruvec *l
@@ -4079,7 +4112,7 @@ static void inc_max_seq(struct lruvec *l
}
static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
@@ -160,7 +160,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
bool success;
struct lru_gen_mm_walk *walk;
@@ -4093,7 +4126,7 @@ static bool try_to_inc_max_seq(struct lr
@@ -4100,7 +4133,7 @@ static bool try_to_inc_max_seq(struct lr
* handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults.
*/
@@ -169,7 +169,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
success = iterate_mm_list_nowalk(lruvec, max_seq);
goto done;
}
@@ -4107,7 +4140,7 @@ static bool try_to_inc_max_seq(struct lr
@@ -4114,7 +4147,7 @@ static bool try_to_inc_max_seq(struct lr
walk->lruvec = lruvec;
walk->max_seq = max_seq;
walk->can_swap = can_swap;
@@ -178,7 +178,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
do {
success = iterate_mm_list(lruvec, walk, &mm);
@@ -4127,7 +4160,7 @@ done:
@@ -4134,7 +4167,7 @@ done:
VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
@@ -187,7 +187,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* either this sees any waiters or they will see updated max_seq */
if (wq_has_sleeper(&lruvec->mm_state.wait))
wake_up_all(&lruvec->mm_state.wait);
@@ -4225,7 +4258,7 @@ static bool age_lruvec(struct lruvec *lr
@@ -4232,7 +4265,7 @@ static bool age_lruvec(struct lruvec *lr
}
if (need_aging)
@@ -196,7 +196,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return true;
}
@@ -4784,7 +4817,7 @@ static unsigned long get_nr_to_scan(stru
@@ -4791,7 +4824,7 @@ static unsigned long get_nr_to_scan(stru
if (current_is_kswapd())
return 0;
@@ -205,7 +205,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return nr_to_scan;
done:
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
@@ -5124,6 +5157,361 @@ static struct attribute_group lru_gen_at
@@ -5131,6 +5164,361 @@ static struct attribute_group lru_gen_at
};
/******************************************************************************
@@ -567,7 +567,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* initialization
******************************************************************************/
@@ -5180,6 +5568,9 @@ static int __init init_lru_gen(void)
@@ -5187,6 +5575,9 @@ static int __init init_lru_gen(void)
if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
pr_err("lru_gen: failed to create sysfs group\n");

View File

@@ -21,7 +21,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4165,8 +4165,6 @@ done:
@@ -4172,8 +4172,6 @@ done:
if (wq_has_sleeper(&lruvec->mm_state.wait))
wake_up_all(&lruvec->mm_state.wait);

View File

@@ -45,7 +45,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4723,10 +4723,13 @@ static int evict_pages(struct lruvec *lr
@@ -4730,10 +4730,13 @@ static int evict_pages(struct lruvec *lr
int scanned;
int reclaimed;
LIST_HEAD(list);
@@ -59,7 +59,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -4743,20 +4746,37 @@ static int evict_pages(struct lruvec *lr
@@ -4750,20 +4753,37 @@ static int evict_pages(struct lruvec *lr
if (list_empty(&list))
return scanned;
@@ -107,7 +107,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
spin_lock_irq(&lruvec->lru_lock);
@@ -4778,7 +4798,13 @@ static int evict_pages(struct lruvec *lr
@@ -4785,7 +4805,13 @@ static int evict_pages(struct lruvec *lr
mem_cgroup_uncharge_list(&list);
free_unref_page_list(&list);

View File

@@ -75,7 +75,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* Return whether the accessed bit is supported on the local CPU.
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3727,7 +3727,7 @@ static void walk_pmd_range_locked(pud_t
@@ -3734,7 +3734,7 @@ static void walk_pmd_range_locked(pud_t
goto next;
if (!pmd_trans_huge(pmd[i])) {
@@ -84,7 +84,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
get_cap(LRU_GEN_NONLEAF_YOUNG))
pmdp_test_and_clear_young(vma, addr, pmd + i);
goto next;
@@ -3825,14 +3825,14 @@ restart:
@@ -3832,14 +3832,14 @@ restart:
#endif
walk->mm_stats[MM_NONLEAF_TOTAL]++;
@@ -102,7 +102,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
continue;
@@ -5132,7 +5132,7 @@ static ssize_t show_enabled(struct kobje
@@ -5139,7 +5139,7 @@ static ssize_t show_enabled(struct kobje
if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
caps |= BIT(LRU_GEN_MM_WALK);

View File

@@ -36,7 +36,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3024,13 +3024,16 @@ void lru_gen_migrate_mm(struct mm_struct
@@ -3031,13 +3031,16 @@ void lru_gen_migrate_mm(struct mm_struct
if (mem_cgroup_disabled())
return;

View File

@@ -182,7 +182,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3486,7 +3486,10 @@ static int should_skip_vma(unsigned long
@@ -3493,7 +3493,10 @@ static int should_skip_vma(unsigned long
if (is_vm_hugetlb_page(vma))
return true;

View File

@@ -173,7 +173,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#endif
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2910,7 +2910,7 @@ static int get_nr_gens(struct lruvec *lr
@@ -2917,7 +2917,7 @@ static int get_nr_gens(struct lruvec *lr
static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
{
@@ -182,7 +182,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
@@ -3316,7 +3316,7 @@ struct ctrl_pos {
@@ -3323,7 +3323,7 @@ struct ctrl_pos {
static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
struct ctrl_pos *pos)
{
@@ -191,7 +191,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
pos->refaulted = lrugen->avg_refaulted[type][tier] +
@@ -3331,7 +3331,7 @@ static void read_ctrl_pos(struct lruvec
@@ -3338,7 +3338,7 @@ static void read_ctrl_pos(struct lruvec
static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
{
int hist, tier;
@@ -200,7 +200,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
@@ -3408,7 +3408,7 @@ static int page_update_gen(struct page *
@@ -3415,7 +3415,7 @@ static int page_update_gen(struct page *
static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
{
int type = page_is_file_lru(page);
@@ -209,7 +209,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
unsigned long new_flags, old_flags = READ_ONCE(page->flags);
@@ -3453,7 +3453,7 @@ static void update_batch_size(struct lru
@@ -3460,7 +3460,7 @@ static void update_batch_size(struct lru
static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
{
int gen, type, zone;
@@ -218,7 +218,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
walk->batched = 0;
@@ -3979,7 +3979,7 @@ static bool inc_min_seq(struct lruvec *l
@@ -3986,7 +3986,7 @@ static bool inc_min_seq(struct lruvec *l
{
int zone;
int remaining = MAX_LRU_BATCH;
@@ -227,7 +227,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
if (type == LRU_GEN_ANON && !can_swap)
@@ -4015,7 +4015,7 @@ static bool try_to_inc_min_seq(struct lr
@@ -4022,7 +4022,7 @@ static bool try_to_inc_min_seq(struct lr
{
int gen, type, zone;
bool success = false;
@@ -236,7 +236,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
DEFINE_MIN_SEQ(lruvec);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
@@ -4036,7 +4036,7 @@ next:
@@ -4043,7 +4043,7 @@ next:
;
}
@@ -245,7 +245,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (can_swap) {
min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
@@ -4058,7 +4058,7 @@ static void inc_max_seq(struct lruvec *l
@@ -4065,7 +4065,7 @@ static void inc_max_seq(struct lruvec *l
{
int prev, next;
int type, zone;
@@ -254,7 +254,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
spin_lock_irq(&lruvec->lru_lock);
@@ -4116,7 +4116,7 @@ static bool try_to_inc_max_seq(struct lr
@@ -4123,7 +4123,7 @@ static bool try_to_inc_max_seq(struct lr
bool success;
struct lru_gen_mm_walk *walk;
struct mm_struct *mm = NULL;
@@ -263,7 +263,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
@@ -4181,7 +4181,7 @@ static bool should_run_aging(struct lruv
@@ -4188,7 +4188,7 @@ static bool should_run_aging(struct lruv
unsigned long old = 0;
unsigned long young = 0;
unsigned long total = 0;
@@ -272,7 +272,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
for (type = !can_swap; type < ANON_AND_FILE; type++) {
@@ -4466,7 +4466,7 @@ static bool sort_page(struct lruvec *lru
@@ -4473,7 +4473,7 @@ static bool sort_page(struct lruvec *lru
int delta = thp_nr_pages(page);
int refs = page_lru_refs(page);
int tier = lru_tier_from_refs(refs);
@@ -281,7 +281,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page);
@@ -4566,7 +4566,7 @@ static int scan_pages(struct lruvec *lru
@@ -4573,7 +4573,7 @@ static int scan_pages(struct lruvec *lru
int scanned = 0;
int isolated = 0;
int remaining = MAX_LRU_BATCH;
@@ -290,7 +290,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
VM_WARN_ON_ONCE(!list_empty(list));
@@ -4967,7 +4967,7 @@ done:
@@ -4974,7 +4974,7 @@ done:
static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
{
@@ -299,7 +299,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (lrugen->enabled) {
enum lru_list lru;
@@ -5247,7 +5247,7 @@ static void lru_gen_seq_show_full(struct
@@ -5254,7 +5254,7 @@ static void lru_gen_seq_show_full(struct
int i;
int type, tier;
int hist = lru_hist_from_seq(seq);
@@ -308,7 +308,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
seq_printf(m, " %10d", tier);
@@ -5296,7 +5296,7 @@ static int lru_gen_seq_show(struct seq_f
@@ -5303,7 +5303,7 @@ static int lru_gen_seq_show(struct seq_f
unsigned long seq;
bool full = !debugfs_real_fops(m->file)->write;
struct lruvec *lruvec = v;
@@ -317,7 +317,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
int nid = lruvec_pgdat(lruvec)->node_id;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
@@ -5549,7 +5549,7 @@ void lru_gen_init_lruvec(struct lruvec *
@@ -5556,7 +5556,7 @@ void lru_gen_init_lruvec(struct lruvec *
{
int i;
int gen, type, zone;

View File

@@ -70,7 +70,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* the exponential moving average of refaulted */
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3987,7 +3987,7 @@ static bool inc_min_seq(struct lruvec *l
@@ -3994,7 +3994,7 @@ static bool inc_min_seq(struct lruvec *l
/* prevent cold/hot inversion if force_scan is true */
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
@@ -79,7 +79,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
while (!list_empty(head)) {
struct page *page = lru_to_page(head);
@@ -3998,7 +3998,7 @@ static bool inc_min_seq(struct lruvec *l
@@ -4005,7 +4005,7 @@ static bool inc_min_seq(struct lruvec *l
VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
new_gen = page_inc_gen(lruvec, page, false);
@@ -88,7 +88,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (!--remaining)
return false;
@@ -4026,7 +4026,7 @@ static bool try_to_inc_min_seq(struct lr
@@ -4033,7 +4033,7 @@ static bool try_to_inc_min_seq(struct lr
gen = lru_gen_from_seq(min_seq[type]);
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
@@ -97,7 +97,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
goto next;
}
@@ -4491,7 +4491,7 @@ static bool sort_page(struct lruvec *lru
@@ -4498,7 +4498,7 @@ static bool sort_page(struct lruvec *lru
/* promoted */
if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
@@ -106,7 +106,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return true;
}
@@ -4500,7 +4500,7 @@ static bool sort_page(struct lruvec *lru
@@ -4507,7 +4507,7 @@ static bool sort_page(struct lruvec *lru
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
gen = page_inc_gen(lruvec, page, false);
@@ -115,7 +115,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
lrugen->protected[hist][type][tier - 1] + delta);
@@ -4512,7 +4512,7 @@ static bool sort_page(struct lruvec *lru
@@ -4519,7 +4519,7 @@ static bool sort_page(struct lruvec *lru
if (PageLocked(page) || PageWriteback(page) ||
(type == LRU_GEN_FILE && PageDirty(page))) {
gen = page_inc_gen(lruvec, page, true);
@@ -124,7 +124,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return true;
}
@@ -4579,7 +4579,7 @@ static int scan_pages(struct lruvec *lru
@@ -4586,7 +4586,7 @@ static int scan_pages(struct lruvec *lru
for (zone = sc->reclaim_idx; zone >= 0; zone--) {
LIST_HEAD(moved);
int skipped = 0;
@@ -133,7 +133,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
while (!list_empty(head)) {
struct page *page = lru_to_page(head);
@@ -4980,7 +4980,7 @@ static bool __maybe_unused state_is_vali
@@ -4987,7 +4987,7 @@ static bool __maybe_unused state_is_vali
int gen, type, zone;
for_each_gen_type_zone(gen, type, zone) {
@@ -142,7 +142,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return false;
}
}
@@ -5025,7 +5025,7 @@ static bool drain_evictable(struct lruve
@@ -5032,7 +5032,7 @@ static bool drain_evictable(struct lruve
int remaining = MAX_LRU_BATCH;
for_each_gen_type_zone(gen, type, zone) {
@@ -151,7 +151,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
while (!list_empty(head)) {
bool success;
@@ -5558,7 +5558,7 @@ void lru_gen_init_lruvec(struct lruvec *
@@ -5565,7 +5565,7 @@ void lru_gen_init_lruvec(struct lruvec *
lrugen->timestamps[i] = jiffies;
for_each_gen_type_zone(gen, type, zone)

View File

@@ -61,7 +61,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static bool writeback_throttling_sane(struct scan_control *sc)
{
return true;
@@ -4722,8 +4732,7 @@ static int isolate_pages(struct lruvec *
@@ -4729,8 +4739,7 @@ static int isolate_pages(struct lruvec *
return scanned;
}
@@ -71,7 +71,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
int type;
int scanned;
@@ -4812,9 +4821,6 @@ retry:
@@ -4819,9 +4828,6 @@ retry:
goto retry;
}
@@ -81,7 +81,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return scanned;
}
@@ -4853,68 +4859,26 @@ done:
@@ -4860,68 +4866,26 @@ done:
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
}
@@ -159,7 +159,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lru_add_drain();
@@ -4938,7 +4902,7 @@ static void lru_gen_shrink_lruvec(struct
@@ -4945,7 +4909,7 @@ static void lru_gen_shrink_lruvec(struct
if (!nr_to_scan)
goto done;
@@ -168,7 +168,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (!delta)
goto done;
@@ -4946,7 +4910,7 @@ static void lru_gen_shrink_lruvec(struct
@@ -4953,7 +4917,7 @@ static void lru_gen_shrink_lruvec(struct
if (scanned >= nr_to_scan)
break;
@@ -177,7 +177,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
break;
cond_resched();
@@ -5393,7 +5357,7 @@ static int run_eviction(struct lruvec *l
@@ -5400,7 +5364,7 @@ static int run_eviction(struct lruvec *l
if (sc->nr_reclaimed >= nr_to_reclaim)
return 0;

View File

@@ -45,7 +45,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
unsigned long last_reclaimed;
#endif
@@ -4184,7 +4183,7 @@ done:
@@ -4191,7 +4190,7 @@ done:
return true;
}
@@ -54,7 +54,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
{
int gen, type, zone;
@@ -4193,6 +4192,13 @@ static bool should_run_aging(struct lruv
@@ -4200,6 +4199,13 @@ static bool should_run_aging(struct lruv
unsigned long total = 0;
struct lru_gen_page *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
@@ -68,7 +68,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
for (type = !can_swap; type < ANON_AND_FILE; type++) {
unsigned long seq;
@@ -4221,8 +4227,6 @@ static bool should_run_aging(struct lruv
@@ -4228,8 +4234,6 @@ static bool should_run_aging(struct lruv
* stalls when the number of generations reaches MIN_NR_GENS. Hence, the
* ideal number of generations is MIN_NR_GENS+1.
*/
@@ -77,7 +77,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
return false;
@@ -4241,40 +4245,54 @@ static bool should_run_aging(struct lruv
@@ -4248,40 +4252,54 @@ static bool should_run_aging(struct lruv
return false;
}
@@ -153,7 +153,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
/* to protect the working set of the last N jiffies */
@@ -4283,46 +4301,32 @@ static unsigned long lru_gen_min_ttl __r
@@ -4290,46 +4308,32 @@ static unsigned long lru_gen_min_ttl __r
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
struct mem_cgroup *memcg;
@@ -207,7 +207,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
*/
if (mutex_trylock(&oom_lock)) {
struct oom_control oc = {
@@ -4830,33 +4834,27 @@ retry:
@@ -4837,33 +4841,27 @@ retry:
* reclaim.
*/
static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
@@ -247,7 +247,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
@@ -4875,9 +4873,7 @@ static unsigned long get_nr_to_reclaim(s
@@ -4882,9 +4880,7 @@ static unsigned long get_nr_to_reclaim(s
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
struct blk_plug plug;
@@ -257,7 +257,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
lru_add_drain();
@@ -4898,13 +4894,13 @@ static void lru_gen_shrink_lruvec(struct
@@ -4905,13 +4901,13 @@ static void lru_gen_shrink_lruvec(struct
else
swappiness = 0;
@@ -274,7 +274,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
scanned += delta;
if (scanned >= nr_to_scan)
@@ -4916,10 +4912,6 @@ static void lru_gen_shrink_lruvec(struct
@@ -4923,10 +4919,6 @@ static void lru_gen_shrink_lruvec(struct
cond_resched();
}

View File

@@ -21,7 +21,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4183,68 +4183,6 @@ done:
@@ -4190,68 +4190,6 @@ done:
return true;
}
@@ -90,7 +90,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
{
int gen, type, zone;
@@ -4828,6 +4766,68 @@ retry:
@@ -4835,6 +4773,68 @@ retry:
return scanned;
}

View File

@@ -385,7 +385,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* Allocation order */
s8 order;
@@ -2880,6 +2877,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_ca
@@ -2887,6 +2884,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_ca
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
@@ -395,7 +395,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
{
struct pglist_data *pgdat = NODE_DATA(nid);
@@ -4169,8 +4169,7 @@ done:
@@ -4176,8 +4176,7 @@ done:
if (sc->priority <= DEF_PRIORITY - 2)
wait_event_killable(lruvec->mm_state.wait,
max_seq < READ_ONCE(lrugen->max_seq));
@@ -405,7 +405,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
@@ -4243,8 +4242,6 @@ static void lru_gen_age_node(struct pgli
@@ -4250,8 +4249,6 @@ static void lru_gen_age_node(struct pgli
VM_WARN_ON_ONCE(!current_is_kswapd());
@@ -414,7 +414,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* check the order to exclude compaction-induced reclaim */
if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
return;
@@ -4833,8 +4830,7 @@ static bool should_run_aging(struct lruv
@@ -4840,8 +4837,7 @@ static bool should_run_aging(struct lruv
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
* reclaim.
*/
@@ -424,7 +424,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
@@ -4851,10 +4847,8 @@ static unsigned long get_nr_to_scan(stru
@@ -4858,10 +4854,8 @@ static unsigned long get_nr_to_scan(stru
if (sc->priority == DEF_PRIORITY)
return nr_to_scan;
@@ -436,7 +436,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
}
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
@@ -4863,29 +4857,18 @@ static unsigned long get_nr_to_reclaim(s
@@ -4870,29 +4864,18 @@ static unsigned long get_nr_to_reclaim(s
if (!global_reclaim(sc))
return -1;
@@ -468,7 +468,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (sc->may_swap)
swappiness = get_swappiness(lruvec, sc);
@@ -4895,7 +4878,7 @@ static void lru_gen_shrink_lruvec(struct
@@ -4902,7 +4885,7 @@ static void lru_gen_shrink_lruvec(struct
swappiness = 0;
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
@@ -477,7 +477,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
break;
delta = evict_pages(lruvec, sc, swappiness);
@@ -4912,10 +4895,250 @@ static void lru_gen_shrink_lruvec(struct
@@ -4919,10 +4902,250 @@ static void lru_gen_shrink_lruvec(struct
cond_resched();
}
@@ -728,7 +728,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/******************************************************************************
* state change
@@ -5370,11 +5593,11 @@ static int run_cmd(char cmd, int memcg_i
@@ -5377,11 +5600,11 @@ static int run_cmd(char cmd, int memcg_i
if (!mem_cgroup_disabled()) {
rcu_read_lock();
@@ -743,7 +743,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
rcu_read_unlock();
if (!memcg)
@@ -5521,6 +5744,19 @@ void lru_gen_init_lruvec(struct lruvec *
@@ -5528,6 +5751,19 @@ void lru_gen_init_lruvec(struct lruvec *
}
#ifdef CONFIG_MEMCG
@@ -763,7 +763,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
INIT_LIST_HEAD(&memcg->mm_list.fifo);
@@ -5544,7 +5780,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
@@ -5551,7 +5787,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
}
}
}
@@ -834,7 +834,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
static int __init init_lru_gen(void)
{
@@ -5571,6 +5869,10 @@ static void lru_gen_shrink_lruvec(struct
@@ -5578,6 +5876,10 @@ static void lru_gen_shrink_lruvec(struct
{
}
@@ -845,7 +845,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#endif /* CONFIG_LRU_GEN */
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
@@ -5584,7 +5886,7 @@ static void shrink_lruvec(struct lruvec
@@ -5591,7 +5893,7 @@ static void shrink_lruvec(struct lruvec
bool proportional_reclaim;
struct blk_plug plug;
@@ -854,7 +854,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lru_gen_shrink_lruvec(lruvec, sc);
return;
}
@@ -5826,6 +6128,11 @@ static void shrink_node(pg_data_t *pgdat
@@ -5833,6 +6135,11 @@ static void shrink_node(pg_data_t *pgdat
struct lruvec *target_lruvec;
bool reclaimable = false;

View File

@@ -35,7 +35,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2905,6 +2905,9 @@ static int get_swappiness(struct lruvec
@@ -2912,6 +2912,9 @@ static int get_swappiness(struct lruvec
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -45,7 +45,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (!can_demote(pgdat->node_id, sc) &&
mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
return 0;
@@ -3952,7 +3955,7 @@ static void walk_mm(struct lruvec *lruve
@@ -3959,7 +3962,7 @@ static void walk_mm(struct lruvec *lruve
} while (err == -EAGAIN);
}
@@ -54,7 +54,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
{
struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
@@ -3960,7 +3963,7 @@ static struct lru_gen_mm_walk *set_mm_wa
@@ -3967,7 +3970,7 @@ static struct lru_gen_mm_walk *set_mm_wa
VM_WARN_ON_ONCE(walk);
walk = &pgdat->mm_walk;
@@ -63,7 +63,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
VM_WARN_ON_ONCE(current_is_kswapd());
walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
@@ -4146,7 +4149,7 @@ static bool try_to_inc_max_seq(struct lr
@@ -4153,7 +4156,7 @@ static bool try_to_inc_max_seq(struct lr
goto done;
}
@@ -72,7 +72,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (!walk) {
success = iterate_mm_list_nowalk(lruvec, max_seq);
goto done;
@@ -4215,8 +4218,6 @@ static bool lruvec_is_reclaimable(struct
@@ -4222,8 +4225,6 @@ static bool lruvec_is_reclaimable(struct
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MIN_SEQ(lruvec);
@@ -81,7 +81,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* see the comment on lru_gen_page */
gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
@@ -4472,12 +4473,8 @@ static bool isolate_page(struct lruvec *
@@ -4479,12 +4480,8 @@ static bool isolate_page(struct lruvec *
{
bool success;
@@ -95,7 +95,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
(PageDirty(page) ||
(PageAnon(page) && !PageSwapCache(page))))
return false;
@@ -4574,9 +4571,8 @@ static int scan_pages(struct lruvec *lru
@@ -4581,9 +4578,8 @@ static int scan_pages(struct lruvec *lru
__count_vm_events(PGSCAN_ANON + type, isolated);
/*
@@ -107,7 +107,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
*/
return isolated || !remaining ? scanned : 0;
}
@@ -4836,8 +4832,7 @@ static long get_nr_to_scan(struct lruvec
@@ -4843,8 +4839,7 @@ static long get_nr_to_scan(struct lruvec
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
@@ -117,7 +117,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return 0;
if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
@@ -4865,17 +4860,14 @@ static bool try_to_shrink_lruvec(struct
@@ -4872,17 +4867,14 @@ static bool try_to_shrink_lruvec(struct
long nr_to_scan;
unsigned long scanned = 0;
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
@@ -140,7 +140,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
if (nr_to_scan <= 0)
@@ -5005,12 +4997,13 @@ static void lru_gen_shrink_lruvec(struct
@@ -5012,12 +5004,13 @@ static void lru_gen_shrink_lruvec(struct
struct blk_plug plug;
VM_WARN_ON_ONCE(global_reclaim(sc));
@@ -155,7 +155,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
if (try_to_shrink_lruvec(lruvec, sc))
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
@@ -5066,11 +5059,19 @@ static void lru_gen_shrink_node(struct p
@@ -5073,11 +5066,19 @@ static void lru_gen_shrink_node(struct p
VM_WARN_ON_ONCE(!global_reclaim(sc));
@@ -176,7 +176,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
set_initial_priority(pgdat, sc);
@@ -5088,7 +5089,7 @@ static void lru_gen_shrink_node(struct p
@@ -5095,7 +5096,7 @@ static void lru_gen_shrink_node(struct p
clear_mm_walk();
blk_finish_plug(&plug);
@@ -185,7 +185,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* kswapd should never fail */
pgdat->kswapd_failures = 0;
}
@@ -5656,7 +5657,7 @@ static ssize_t lru_gen_seq_write(struct
@@ -5663,7 +5664,7 @@ static ssize_t lru_gen_seq_write(struct
set_task_reclaim_state(current, &sc.reclaim_state);
flags = memalloc_noreclaim_save();
blk_start_plug(&plug);

View File

@@ -23,7 +23,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4144,7 +4144,7 @@ static bool try_to_inc_max_seq(struct lr
@@ -4151,7 +4151,7 @@ static bool try_to_inc_max_seq(struct lr
* handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults.
*/

View File

@@ -25,7 +25,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4934,18 +4934,20 @@ static int shrink_one(struct lruvec *lru
@@ -4941,18 +4941,20 @@ static int shrink_one(struct lruvec *lru
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
{
@@ -48,7 +48,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
rcu_read_lock();
@@ -4969,14 +4971,22 @@ restart:
@@ -4976,14 +4978,22 @@ restart:
op = shrink_one(lruvec, sc);
@@ -74,7 +74,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
/* restart if raced with lru_gen_rotate_memcg() */
if (gen != get_nulls_value(pos))
goto restart;
@@ -4985,11 +4995,6 @@ restart:
@@ -4992,11 +5002,6 @@ restart:
bin = get_memcg_bin(bin + 1);
if (bin != first_bin)
goto restart;

View File

@@ -128,7 +128,7 @@ Link: https://lore.kernel.org/r/20220118173504.2867523-2-michael@walle.cc
/**
* of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it
@@ -1685,47 +1613,6 @@ free:
@@ -1690,47 +1618,6 @@ free:
EXPORT_SYMBOL(of_parse_phandle_with_args_map);
/**

View File

@@ -44,7 +44,7 @@ Subject: [PATCH] net/usb/qmi_wwan: add MeigLink modem support
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
@@ -1160,6 +1165,11 @@ static const struct usb_device_id option
@@ -1162,6 +1167,11 @@ static const struct usb_device_id option
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
.driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
@@ -56,7 +56,7 @@ Subject: [PATCH] net/usb/qmi_wwan: add MeigLink modem support
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1201,6 +1211,11 @@ static const struct usb_device_id option
@@ -1203,6 +1213,11 @@ static const struct usb_device_id option
.driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },

View File

@@ -77,7 +77,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
u32));
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
@@ -2005,9 +2019,11 @@ static void __sk_free(struct sock *sk)
@@ -2008,9 +2022,11 @@ static void __sk_free(struct sock *sk)
if (likely(sk->sk_net_refcnt))
sock_inuse_add(sock_net(sk), -1);

View File

@@ -235,7 +235,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (!pe)
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3986,6 +3986,8 @@ static const struct seq_operations vmall
@@ -3987,6 +3987,8 @@ static const struct seq_operations vmall
static int __init proc_vmalloc_init(void)
{
@@ -330,7 +330,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3894,6 +3894,8 @@ static __net_initdata struct pernet_oper
@@ -3897,6 +3897,8 @@ static __net_initdata struct pernet_oper
static int __init proto_init(void)
{

View File

@@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
#define PACKET_FANOUT_LB 1
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1891,6 +1891,7 @@ static int packet_rcv_spkt(struct sk_buf
@@ -1877,6 +1877,7 @@ static int packet_rcv_spkt(struct sk_buf
{
struct sock *sk;
struct sockaddr_pkt *spkt;
@@ -38,7 +38,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/*
* When we registered the protocol we saved the socket in the data
@@ -1898,6 +1899,7 @@ static int packet_rcv_spkt(struct sk_buf
@@ -1884,6 +1885,7 @@ static int packet_rcv_spkt(struct sk_buf
*/
sk = pt->af_packet_priv;
@@ -46,7 +46,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/*
* Yank back the headers [hope the device set this
@@ -1910,7 +1912,7 @@ static int packet_rcv_spkt(struct sk_buf
@@ -1896,7 +1898,7 @@ static int packet_rcv_spkt(struct sk_buf
* so that this procedure is noop.
*/
@@ -55,7 +55,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
goto out;
if (!net_eq(dev_net(dev), sock_net(sk)))
@@ -2156,12 +2158,12 @@ static int packet_rcv(struct sk_buff *sk
@@ -2142,12 +2144,12 @@ static int packet_rcv(struct sk_buff *sk
unsigned int snaplen, res;
bool is_drop_n_account = false;
@@ -71,7 +71,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
@@ -2287,12 +2289,12 @@ static int tpacket_rcv(struct sk_buff *s
@@ -2273,12 +2275,12 @@ static int tpacket_rcv(struct sk_buff *s
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
@@ -87,7 +87,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
@@ -3409,6 +3411,7 @@ static int packet_create(struct net *net
@@ -3395,6 +3397,7 @@ static int packet_create(struct net *net
mutex_init(&po->pg_vec_lock);
po->rollover = NULL;
po->prot_hook.func = packet_rcv;
@@ -95,7 +95,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (sock->type == SOCK_PACKET)
po->prot_hook.func = packet_rcv_spkt;
@@ -4062,6 +4065,16 @@ packet_setsockopt(struct socket *sock, i
@@ -4048,6 +4051,16 @@ packet_setsockopt(struct socket *sock, i
WRITE_ONCE(po->xmit, val ? packet_direct_xmit : dev_queue_xmit);
return 0;
}
@@ -112,7 +112,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
default:
return -ENOPROTOOPT;
}
@@ -4118,6 +4131,13 @@ static int packet_getsockopt(struct sock
@@ -4104,6 +4117,13 @@ static int packet_getsockopt(struct sock
case PACKET_VNET_HDR:
val = po->has_vnet_hdr;
break;

View File

@@ -22,7 +22,7 @@ Signed-off-by: Tim Harvey <tharvey@gateworks.com>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
@@ -6011,3 +6012,34 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
@@ -6015,3 +6016,34 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif