mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-06 17:35:20 +02:00
Revert "FROMLIST: BACKPORT: mm: swap: mTHP allocate swap entries from nonfull list"
This reverts commit 216c128a8c
.
Signed-off-by: Chris Li <chrisl@kernel.org>
Bug: 313807618
Bug: 351082780
Change-Id: Ib1b56087923eeac5309c20d409518cb0fe809e63
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
parent
d24fe223ea
commit
887a20b67d
|
@ -264,14 +264,11 @@ struct swap_cluster_info {
|
|||
*/
|
||||
unsigned int count:12;
|
||||
unsigned int state:3;
|
||||
unsigned int order:4;
|
||||
struct list_head list; /* Protected by swap_info_struct->lock */
|
||||
};
|
||||
|
||||
#define CLUSTER_STATE_FREE 1 /* This cluster is free */
|
||||
#define CLUSTER_STATE_PER_CPU 2 /* This cluster on per_cpu_cluster */
|
||||
#define CLUSTER_STATE_SCANNED 3 /* This cluster off per_cpu_cluster */
|
||||
#define CLUSTER_STATE_NONFULL 4 /* This cluster is on nonfull list */
|
||||
|
||||
|
||||
/*
|
||||
|
@ -310,8 +307,6 @@ struct swap_info_struct {
|
|||
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
|
||||
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
|
||||
struct list_head free_clusters; /* free clusters list */
|
||||
struct list_head nonfull_clusters[SWAP_NR_ORDERS];
|
||||
/* list of cluster that contains at least one free slot */
|
||||
unsigned int lowest_bit; /* index of first free in swap_map */
|
||||
unsigned int highest_bit; /* index of last free in swap_map */
|
||||
unsigned int pages; /* total of usable pages of swap */
|
||||
|
|
|
@ -360,12 +360,8 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
|
|||
|
||||
static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
|
||||
{
|
||||
if (ci->state == CLUSTER_STATE_NONFULL)
|
||||
list_move_tail(&ci->list, &si->free_clusters);
|
||||
else
|
||||
list_add_tail(&ci->list, &si->free_clusters);
|
||||
ci->state = CLUSTER_STATE_FREE;
|
||||
ci->order = 0;
|
||||
list_add_tail(&ci->list, &si->free_clusters);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -487,12 +483,7 @@ static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluste
|
|||
ci->count--;
|
||||
|
||||
if (!ci->count)
|
||||
return free_cluster(p, ci);
|
||||
|
||||
if (ci->state == CLUSTER_STATE_SCANNED) {
|
||||
list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]);
|
||||
ci->state = CLUSTER_STATE_NONFULL;
|
||||
}
|
||||
free_cluster(p, ci);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -543,25 +534,17 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
|
|||
unsigned int nr_pages = 1 << order;
|
||||
struct percpu_cluster *cluster;
|
||||
struct swap_cluster_info *ci;
|
||||
unsigned int tmp, max, found = 0;
|
||||
unsigned int tmp, max;
|
||||
|
||||
new_cluster:
|
||||
cluster = this_cpu_ptr(si->percpu_cluster);
|
||||
tmp = cluster->next[order];
|
||||
if (tmp == SWAP_NEXT_INVALID) {
|
||||
if (!list_empty(&si->nonfull_clusters[order])) {
|
||||
ci = list_first_entry(&si->nonfull_clusters[order], struct swap_cluster_info, list);
|
||||
list_del(&ci->list);
|
||||
spin_lock(&ci->lock);
|
||||
ci->state = CLUSTER_STATE_PER_CPU;
|
||||
spin_unlock(&ci->lock);
|
||||
tmp = (ci - si->cluster_info) * SWAPFILE_CLUSTER;
|
||||
} else if (!list_empty(&si->free_clusters)) {
|
||||
if (!list_empty(&si->free_clusters)) {
|
||||
ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
|
||||
list_del(&ci->list);
|
||||
spin_lock(&ci->lock);
|
||||
ci->state = CLUSTER_STATE_PER_CPU;
|
||||
ci->order = order;
|
||||
spin_unlock(&ci->lock);
|
||||
tmp = (ci - si->cluster_info) * SWAPFILE_CLUSTER;
|
||||
} else if (!list_empty(&si->discard_clusters)) {
|
||||
|
@ -586,24 +569,21 @@ new_cluster:
|
|||
max = min_t(unsigned long, si->max, ALIGN(tmp + 1, SWAPFILE_CLUSTER));
|
||||
if (tmp < max) {
|
||||
ci = lock_cluster(si, tmp);
|
||||
while (!found && tmp < max) {
|
||||
while (tmp < max) {
|
||||
if (swap_range_empty(si->swap_map, tmp, nr_pages))
|
||||
found = tmp;
|
||||
break;
|
||||
tmp += nr_pages;
|
||||
}
|
||||
if (tmp >= max) {
|
||||
ci->state = CLUSTER_STATE_SCANNED;
|
||||
cluster->next[order] = SWAP_NEXT_INVALID;
|
||||
} else
|
||||
cluster->next[order] = tmp;
|
||||
WARN_ONCE(ci->order != order, "expecting order %d got %d", order, ci->order);
|
||||
unlock_cluster(ci);
|
||||
}
|
||||
if (!found)
|
||||
if (tmp >= max) {
|
||||
cluster->next[order] = SWAP_NEXT_INVALID;
|
||||
goto new_cluster;
|
||||
|
||||
*offset = found;
|
||||
*scan_base = found;
|
||||
}
|
||||
*offset = tmp;
|
||||
*scan_base = tmp;
|
||||
tmp += nr_pages;
|
||||
cluster->next[order] = tmp < max ? tmp : SWAP_NEXT_INVALID;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2921,9 +2901,6 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
|
|||
INIT_LIST_HEAD(&p->free_clusters);
|
||||
INIT_LIST_HEAD(&p->discard_clusters);
|
||||
|
||||
for (i = 0; i < SWAP_NR_ORDERS; i++)
|
||||
INIT_LIST_HEAD(&p->nonfull_clusters[i]);
|
||||
|
||||
for (i = 0; i < swap_header->info.nr_badpages; i++) {
|
||||
unsigned int page_nr = swap_header->info.badpages[i];
|
||||
if (page_nr == 0 || page_nr > swap_header->info.last_page)
|
||||
|
|
Loading…
Reference in New Issue
Block a user