Skip to content
Snippets Groups Projects
ksm.c 63.5 KiB
Newer Older
  • Learn to ignore specific revisions
  • Kenneth Johansson's avatar
    Kenneth Johansson committed
    1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
    /*
     * Memory merging support.
     *
     * This code enables dynamic sharing of identical pages found in different
     * memory areas, even if they are not shared by fork()
     *
     * Copyright (C) 2008-2009 Red Hat, Inc.
     * Authors:
     *	Izik Eidus
     *	Andrea Arcangeli
     *	Chris Wright
     *	Hugh Dickins
     *
     * This work is licensed under the terms of the GNU GPL, version 2.
     */
    
    #include <linux/errno.h>
    #include <linux/mm.h>
    #include <linux/fs.h>
    #include <linux/mman.h>
    #include <linux/sched.h>
    #include <linux/rwsem.h>
    #include <linux/pagemap.h>
    #include <linux/rmap.h>
    #include <linux/spinlock.h>
    #include <linux/jhash.h>
    #include <linux/delay.h>
    #include <linux/kthread.h>
    #include <linux/wait.h>
    #include <linux/slab.h>
    #include <linux/rbtree.h>
    #include <linux/memory.h>
    #include <linux/mmu_notifier.h>
    #include <linux/swap.h>
    #include <linux/ksm.h>
    #include <linux/hashtable.h>
    #include <linux/freezer.h>
    #include <linux/oom.h>
    #include <linux/numa.h>
    
    #include <asm/tlbflush.h>
    #include "internal.h"
    
    #ifdef CONFIG_NUMA
    #define NUMA(x)		(x)
    #define DO_NUMA(x)	do { (x); } while (0)
    #else
    #define NUMA(x)		(0)
    #define DO_NUMA(x)	do { } while (0)
    #endif
    
    /*
     * A few notes about the KSM scanning process,
     * to make it easier to understand the data structures below:
     *
     * In order to reduce excessive scanning, KSM sorts the memory pages by their
     * contents into a data structure that holds pointers to the pages' locations.
     *
     * Since the contents of the pages may change at any moment, KSM cannot just
     * insert the pages into a normal sorted tree and expect it to find anything.
     * Therefore KSM uses two data structures - the stable and the unstable tree.
     *
     * The stable tree holds pointers to all the merged pages (ksm pages), sorted
     * by their contents.  Because each such page is write-protected, searching on
     * this tree is fully assured to be working (except when pages are unmapped),
     * and therefore this tree is called the stable tree.
     *
     * In addition to the stable tree, KSM uses a second data structure called the
     * unstable tree: this tree holds pointers to pages which have been found to
     * be "unchanged for a period of time".  The unstable tree sorts these pages
     * by their contents, but since they are not write-protected, KSM cannot rely
     * upon the unstable tree to work correctly - the unstable tree is liable to
     * be corrupted as its contents are modified, and so it is called unstable.
     *
     * KSM solves this problem by several techniques:
     *
     * 1) The unstable tree is flushed every time KSM completes scanning all
     *    memory areas, and then the tree is rebuilt again from the beginning.
     * 2) KSM will only insert into the unstable tree, pages whose hash value
     *    has not changed since the previous scan of all memory areas.
     * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
     *    colors of the nodes and not on their contents, assuring that even when
     *    the tree gets "corrupted" it won't get out of balance, so scanning time
     *    remains the same (also, searching and inserting nodes in an rbtree uses
     *    the same algorithm, so we have no overhead when we flush and rebuild).
     * 4) KSM never flushes the stable tree, which means that even if it were to
     *    take 10 attempts to find a page in the unstable tree, once it is found,
     *    it is secured in the stable tree.  (When we scan a new page, we first
     *    compare it against the stable tree, and then against the unstable tree.)
     *
     * If the merge_across_nodes tunable is unset, then KSM maintains multiple
     * stable trees and multiple unstable trees: one of each for each NUMA node.
     */
    
    /**
     * struct mm_slot - ksm information per mm that is being scanned
     * @link: link to the mm_slots hash list
     * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
     * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
     * @mm: the mm that this information is valid for
     */
    struct mm_slot {
    	struct hlist_node link;
    	struct list_head mm_list;
    	struct rmap_item *rmap_list;
    	struct mm_struct *mm;
    };
    
    /**
     * struct ksm_scan - cursor for scanning
     * @mm_slot: the current mm_slot we are scanning
     * @address: the next address inside that to be scanned
     * @rmap_list: link to the next rmap to be scanned in the rmap_list
     * @seqnr: count of completed full scans (needed when removing unstable node)
     *
     * There is only the one ksm_scan instance of this cursor structure.
     */
    struct ksm_scan {
    	struct mm_slot *mm_slot;
    	unsigned long address;
    	struct rmap_item **rmap_list;
    	unsigned long seqnr;
    };
    
    /**
     * struct stable_node - node of the stable rbtree
     * @node: rb node of this ksm page in the stable tree
     * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
     * @list: linked into migrate_nodes, pending placement in the proper node tree
     * @hlist: hlist head of rmap_items using this ksm page
     * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
     * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
     */
    struct stable_node {
    	union {
    		struct rb_node node;	/* when node of stable tree */
    		struct {		/* when listed for migration */
    			struct list_head *head;
    			struct list_head list;
    		};
    	};
    	struct hlist_head hlist;
    	unsigned long kpfn;
    #ifdef CONFIG_NUMA
    	int nid;
    #endif
    };
    
    /**
     * struct rmap_item - reverse mapping item for virtual addresses
     * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
     * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
     * @nid: NUMA node id of unstable tree in which linked (may not match page)
     * @mm: the memory structure this rmap_item is pointing into
     * @address: the virtual address this rmap_item tracks (+ flags in low bits)
     * @oldchecksum: previous checksum of the page at that virtual address
     * @node: rb node of this rmap_item in the unstable tree
     * @head: pointer to stable_node heading this list in the stable tree
     * @hlist: link into hlist of rmap_items hanging off that stable_node
     */
    struct rmap_item {
    	struct rmap_item *rmap_list;
    	union {
    		struct anon_vma *anon_vma;	/* when stable */
    #ifdef CONFIG_NUMA
    		int nid;		/* when node of unstable tree */
    #endif
    	};
    	struct mm_struct *mm;
    	unsigned long address;		/* + low bits used for flags below */
    	unsigned int oldchecksum;	/* when unstable */
    	union {
    		struct rb_node node;	/* when node of unstable tree */
    		struct {		/* when listed from stable tree */
    			struct stable_node *head;
    			struct hlist_node hlist;
    		};
    	};
    };
    
    #define SEQNR_MASK	0x0ff	/* low bits of unstable tree seqnr */
    #define UNSTABLE_FLAG	0x100	/* is a node of the unstable tree */
    #define STABLE_FLAG	0x200	/* is listed from the stable tree */
    
    /* The stable and unstable tree heads */
    static struct rb_root one_stable_tree[1] = { RB_ROOT };
    static struct rb_root one_unstable_tree[1] = { RB_ROOT };
    static struct rb_root *root_stable_tree = one_stable_tree;
    static struct rb_root *root_unstable_tree = one_unstable_tree;
    
    /* Recently migrated nodes of stable tree, pending proper placement */
    static LIST_HEAD(migrate_nodes);
    
    #define MM_SLOTS_HASH_BITS 10
    static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
    
    static struct mm_slot ksm_mm_head = {
    	.mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
    };
    static struct ksm_scan ksm_scan = {
    	.mm_slot = &ksm_mm_head,
    };
    
    static struct kmem_cache *rmap_item_cache;
    static struct kmem_cache *stable_node_cache;
    static struct kmem_cache *mm_slot_cache;
    
    /* The number of nodes in the stable tree */
    static unsigned long ksm_pages_shared;
    
    /* The number of page slots additionally sharing those nodes */
    static unsigned long ksm_pages_sharing;
    
    /* The number of nodes in the unstable tree */
    static unsigned long ksm_pages_unshared;
    
    /* The number of rmap_items in use: to calculate pages_volatile */
    static unsigned long ksm_rmap_items;
    
    /* Number of pages ksmd should scan in one batch */
    static unsigned int ksm_thread_pages_to_scan = 100;
    
    /* Milliseconds ksmd should sleep between batches */
    static unsigned int ksm_thread_sleep_millisecs = 20;
    
    #ifdef CONFIG_NUMA
    /* Zeroed when merging across nodes is not allowed */
    static unsigned int ksm_merge_across_nodes = 1;
    static int ksm_nr_node_ids = 1;
    #else
    #define ksm_merge_across_nodes	1U
    #define ksm_nr_node_ids		1
    #endif
    
    #define KSM_RUN_STOP	0
    #define KSM_RUN_MERGE	1
    #define KSM_RUN_UNMERGE	2
    #define KSM_RUN_OFFLINE	4
    static unsigned long ksm_run = KSM_RUN_STOP;
    static void wait_while_offlining(void);
    
    static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
    static DEFINE_MUTEX(ksm_thread_mutex);
    static DEFINE_SPINLOCK(ksm_mmlist_lock);
    
    #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
    		sizeof(struct __struct), __alignof__(struct __struct),\
    		(__flags), NULL)
    
    static int __init ksm_slab_init(void)
    {
    	rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
    	if (!rmap_item_cache)
    		goto out;
    
    	stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
    	if (!stable_node_cache)
    		goto out_free1;
    
    	mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
    	if (!mm_slot_cache)
    		goto out_free2;
    
    	return 0;
    
    out_free2:
    	kmem_cache_destroy(stable_node_cache);
    out_free1:
    	kmem_cache_destroy(rmap_item_cache);
    out:
    	return -ENOMEM;
    }
    
    static void __init ksm_slab_free(void)
    {
    	kmem_cache_destroy(mm_slot_cache);
    	kmem_cache_destroy(stable_node_cache);
    	kmem_cache_destroy(rmap_item_cache);
    	mm_slot_cache = NULL;
    }
    
    static inline struct rmap_item *alloc_rmap_item(void)
    {
    	struct rmap_item *rmap_item;
    
    	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
    						__GFP_NORETRY | __GFP_NOWARN);
    	if (rmap_item)
    		ksm_rmap_items++;
    	return rmap_item;
    }
    
    static inline void free_rmap_item(struct rmap_item *rmap_item)
    {
    	ksm_rmap_items--;
    	rmap_item->mm = NULL;	/* debug safety */
    	kmem_cache_free(rmap_item_cache, rmap_item);
    }
    
    static inline struct stable_node *alloc_stable_node(void)
    {
    	/*
    	 * The allocation can take too long with GFP_KERNEL when memory is under
    	 * pressure, which may lead to hung task warnings.  Adding __GFP_HIGH
    	 * grants access to memory reserves, helping to avoid this problem.
    	 */
    	return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
    }
    
    static inline void free_stable_node(struct stable_node *stable_node)
    {
    	kmem_cache_free(stable_node_cache, stable_node);
    }
    
    static inline struct mm_slot *alloc_mm_slot(void)
    {
    	if (!mm_slot_cache)	/* initialization failed */
    		return NULL;
    	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
    }
    
    static inline void free_mm_slot(struct mm_slot *mm_slot)
    {
    	kmem_cache_free(mm_slot_cache, mm_slot);
    }
    
    static struct mm_slot *get_mm_slot(struct mm_struct *mm)
    {
    	struct mm_slot *slot;
    
    	hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
    		if (slot->mm == mm)
    			return slot;
    
    	return NULL;
    }
    
    static void insert_to_mm_slots_hash(struct mm_struct *mm,
    				    struct mm_slot *mm_slot)
    {
    	mm_slot->mm = mm;
    	hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
    }
    
    /*
     * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
     * page tables after it has passed through ksm_exit() - which, if necessary,
     * takes mmap_sem briefly to serialize against them.  ksm_exit() does not set
     * a special flag: they can just back out as soon as mm_users goes to zero.
     * ksm_test_exit() is used throughout to make this test for exit: in some
     * places for correctness, in some places just to avoid unnecessary work.
     */
    static inline bool ksm_test_exit(struct mm_struct *mm)
    {
    	return atomic_read(&mm->mm_users) == 0;
    }
    
    /*
     * We use break_ksm to break COW on a ksm page: it's a stripped down
     *
     *	if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
     *		put_page(page);
     *
     * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
     * in case the application has unmapped and remapped mm,addr meanwhile.
     * Could a ksm page appear anywhere else?  Actually yes, in a VM_PFNMAP
     * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
     *
     * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context
     * of the process that owns 'vma'.  We also do not want to enforce
     * protection keys here anyway.
     */
    static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
    {
    	struct page *page;
    	int ret = 0;
    
    	do {
    		cond_resched();
    		page = follow_page(vma, addr,
    				FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
    		if (IS_ERR_OR_NULL(page))
    			break;
    		if (PageKsm(page))
    			ret = handle_mm_fault(vma, addr,
    					FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
    		else
    			ret = VM_FAULT_WRITE;
    		put_page(page);
    	} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
    	/*
    	 * We must loop because handle_mm_fault() may back out if there's
    	 * any difficulty e.g. if pte accessed bit gets updated concurrently.
    	 *
    	 * VM_FAULT_WRITE is what we have been hoping for: it indicates that
    	 * COW has been broken, even if the vma does not permit VM_WRITE;
    	 * but note that a concurrent fault might break PageKsm for us.
    	 *
    	 * VM_FAULT_SIGBUS could occur if we race with truncation of the
    	 * backing file, which also invalidates anonymous pages: that's
    	 * okay, that truncation will have unmapped the PageKsm for us.
    	 *
    	 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
    	 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
    	 * current task has TIF_MEMDIE set, and will be OOM killed on return
    	 * to user; and ksmd, having no mm, would never be chosen for that.
    	 *
    	 * But if the mm is in a limited mem_cgroup, then the fault may fail
    	 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
    	 * even ksmd can fail in this way - though it's usually breaking ksm
    	 * just to undo a merge it made a moment before, so unlikely to oom.
    	 *
    	 * That's a pity: we might therefore have more kernel pages allocated
    	 * than we're counting as nodes in the stable tree; but ksm_do_scan
    	 * will retry to break_cow on each pass, so should recover the page
    	 * in due course.  The important thing is to not let VM_MERGEABLE
    	 * be cleared while any such pages might remain in the area.
    	 */
    	return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
    }
    
    static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
    		unsigned long addr)
    {
    	struct vm_area_struct *vma;
    	if (ksm_test_exit(mm))
    		return NULL;
    	vma = find_vma(mm, addr);
    	if (!vma || vma->vm_start > addr)
    		return NULL;
    	if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
    		return NULL;
    	return vma;
    }
    
    static void break_cow(struct rmap_item *rmap_item)
    {
    	struct mm_struct *mm = rmap_item->mm;
    	unsigned long addr = rmap_item->address;
    	struct vm_area_struct *vma;
    
    	/*
    	 * It is not an accident that whenever we want to break COW
    	 * to undo, we also need to drop a reference to the anon_vma.
    	 */
    	put_anon_vma(rmap_item->anon_vma);
    
    	down_read(&mm->mmap_sem);
    	vma = find_mergeable_vma(mm, addr);
    	if (vma)
    		break_ksm(vma, addr);
    	up_read(&mm->mmap_sem);
    }
    
    static struct page *get_mergeable_page(struct rmap_item *rmap_item)
    {
    	struct mm_struct *mm = rmap_item->mm;
    	unsigned long addr = rmap_item->address;
    	struct vm_area_struct *vma;
    	struct page *page;
    
    	down_read(&mm->mmap_sem);
    	vma = find_mergeable_vma(mm, addr);
    	if (!vma)
    		goto out;
    
    	page = follow_page(vma, addr, FOLL_GET);
    	if (IS_ERR_OR_NULL(page))
    		goto out;
    	if (PageAnon(page)) {
    		flush_anon_page(vma, page, addr);
    		flush_dcache_page(page);
    	} else {
    		put_page(page);
    out:
    		page = NULL;
    	}
    	up_read(&mm->mmap_sem);
    	return page;
    }
    
    /*
     * This helper is used for getting right index into array of tree roots.
     * When merge_across_nodes knob is set to 1, there are only two rb-trees for
     * stable and unstable pages from all nodes with roots in index 0. Otherwise,
     * every node has its own stable and unstable tree.
     */
    static inline int get_kpfn_nid(unsigned long kpfn)
    {
    	return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
    }
    
    static void remove_node_from_stable_tree(struct stable_node *stable_node)
    {
    	struct rmap_item *rmap_item;
    
    	hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
    		if (rmap_item->hlist.next)
    			ksm_pages_sharing--;
    		else
    			ksm_pages_shared--;
    		put_anon_vma(rmap_item->anon_vma);
    		rmap_item->address &= PAGE_MASK;
    		cond_resched();
    	}
    
    	if (stable_node->head == &migrate_nodes)
    		list_del(&stable_node->list);
    	else
    		rb_erase(&stable_node->node,
    			 root_stable_tree + NUMA(stable_node->nid));
    	free_stable_node(stable_node);
    }
    
    /*
     * get_ksm_page: checks if the page indicated by the stable node
     * is still its ksm page, despite having held no reference to it.
     * In which case we can trust the content of the page, and it
     * returns the gotten page; but if the page has now been zapped,
     * remove the stale node from the stable tree and return NULL.
     * But beware, the stable node's page might be being migrated.
     *
     * You would expect the stable_node to hold a reference to the ksm page.
     * But if it increments the page's count, swapping out has to wait for
     * ksmd to come around again before it can free the page, which may take
     * seconds or even minutes: much too unresponsive.  So instead we use a
     * "keyhole reference": access to the ksm page from the stable node peeps
     * out through its keyhole to see if that page still holds the right key,
     * pointing back to this stable node.  This relies on freeing a PageAnon
     * page to reset its page->mapping to NULL, and relies on no other use of
     * a page to put something that might look like our key in page->mapping.
     * is on its way to being freed; but it is an anomaly to bear in mind.
     */
    static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
    {
    	struct page *page;
    	void *expected_mapping;
    	unsigned long kpfn;
    
    	expected_mapping = (void *)((unsigned long)stable_node |
    					PAGE_MAPPING_KSM);
    again:
    	kpfn = READ_ONCE(stable_node->kpfn);
    	page = pfn_to_page(kpfn);
    
    	/*
    	 * page is computed from kpfn, so on most architectures reading
    	 * page->mapping is naturally ordered after reading node->kpfn,
    	 * but on Alpha we need to be more careful.
    	 */
    	smp_read_barrier_depends();
    	if (READ_ONCE(page->mapping) != expected_mapping)
    		goto stale;
    
    	/*
    	 * We cannot do anything with the page while its refcount is 0.
    	 * Usually 0 means free, or tail of a higher-order page: in which
    	 * case this node is no longer referenced, and should be freed;
    	 * however, it might mean that the page is under page_freeze_refs().
    	 * The __remove_mapping() case is easy, again the node is now stale;
    	 * but if page is swapcache in migrate_page_move_mapping(), it might
    	 * still be our page, in which case it's essential to keep the node.
    	 */
    	while (!get_page_unless_zero(page)) {
    		/*
    		 * Another check for page->mapping != expected_mapping would
    		 * work here too.  We have chosen the !PageSwapCache test to
    		 * optimize the common case, when the page is or is about to
    		 * be freed: PageSwapCache is cleared (under spin_lock_irq)
    		 * in the freeze_refs section of __remove_mapping(); but Anon
    		 * page->mapping reset to NULL later, in free_pages_prepare().
    		 */
    		if (!PageSwapCache(page))
    			goto stale;
    		cpu_relax();
    	}
    
    	if (READ_ONCE(page->mapping) != expected_mapping) {
    		put_page(page);
    		goto stale;
    	}
    
    	if (lock_it) {
    		lock_page(page);
    		if (READ_ONCE(page->mapping) != expected_mapping) {
    			unlock_page(page);
    			put_page(page);
    			goto stale;
    		}
    	}
    	return page;
    
    stale:
    	/*
    	 * We come here from above when page->mapping or !PageSwapCache
    	 * suggests that the node is stale; but it might be under migration.
    	 * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
    	 * before checking whether node->kpfn has been changed.
    	 */
    	smp_rmb();
    	if (READ_ONCE(stable_node->kpfn) != kpfn)
    		goto again;
    	remove_node_from_stable_tree(stable_node);
    	return NULL;
    }
    
    /*
     * Removing rmap_item from stable or unstable tree.
     * This function will clean the information from the stable/unstable tree.
     */
    static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
    {
    	if (rmap_item->address & STABLE_FLAG) {
    		struct stable_node *stable_node;
    		struct page *page;
    
    		stable_node = rmap_item->head;
    		page = get_ksm_page(stable_node, true);
    		if (!page)
    			goto out;
    
    		hlist_del(&rmap_item->hlist);
    		unlock_page(page);
    		put_page(page);
    
    		if (!hlist_empty(&stable_node->hlist))
    			ksm_pages_sharing--;
    		else
    			ksm_pages_shared--;
    
    		put_anon_vma(rmap_item->anon_vma);
    		rmap_item->address &= PAGE_MASK;
    
    	} else if (rmap_item->address & UNSTABLE_FLAG) {
    		unsigned char age;
    		/*
    		 * Usually ksmd can and must skip the rb_erase, because
    		 * root_unstable_tree was already reset to RB_ROOT.
    		 * But be careful when an mm is exiting: do the rb_erase
    		 * if this rmap_item was inserted by this scan, rather
    		 * than left over from before.
    		 */
    		age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
    		BUG_ON(age > 1);
    		if (!age)
    			rb_erase(&rmap_item->node,
    				 root_unstable_tree + NUMA(rmap_item->nid));
    		ksm_pages_unshared--;
    		rmap_item->address &= PAGE_MASK;
    	}
    out:
    	cond_resched();		/* we're called from many long loops */
    }
    
    static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
    				       struct rmap_item **rmap_list)
    {
    	while (*rmap_list) {
    		struct rmap_item *rmap_item = *rmap_list;
    		*rmap_list = rmap_item->rmap_list;
    		remove_rmap_item_from_tree(rmap_item);
    		free_rmap_item(rmap_item);
    	}
    }
    
    /*
     * Though it's very tempting to unmerge rmap_items from stable tree rather
     * than check every pte of a given vma, the locking doesn't quite work for
     * that - an rmap_item is assigned to the stable tree after inserting ksm
     * page and upping mmap_sem.  Nor does it fit with the way we skip dup'ing
     * rmap_items from parent to child at fork time (so as not to waste time
     * if exit comes before the next scan reaches it).
     *
     * Similarly, although we'd like to remove rmap_items (so updating counts
     * and freeing memory) when unmerging an area, it's easier to leave that
     * to the next pass of ksmd - consider, for example, how ksmd might be
     * in cmp_and_merge_page on one of the rmap_items we would be removing.
     */
    static int unmerge_ksm_pages(struct vm_area_struct *vma,
    			     unsigned long start, unsigned long end)
    {
    	unsigned long addr;
    	int err = 0;
    
    	for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
    		if (ksm_test_exit(vma->vm_mm))
    			break;
    		if (signal_pending(current))
    			err = -ERESTARTSYS;
    		else
    			err = break_ksm(vma, addr);
    	}
    	return err;
    }
    
    #ifdef CONFIG_SYSFS
    /*
     * Only called through the sysfs control interface:
     */
    static int remove_stable_node(struct stable_node *stable_node)
    {
    	struct page *page;
    	int err;
    
    	page = get_ksm_page(stable_node, true);
    	if (!page) {
    		/*
    		 * get_ksm_page did remove_node_from_stable_tree itself.
    		 */
    		return 0;
    	}
    
    	if (WARN_ON_ONCE(page_mapped(page))) {
    		/*
    		 * This should not happen: but if it does, just refuse to let
    		 * merge_across_nodes be switched - there is no need to panic.
    		 */
    		err = -EBUSY;
    	} else {
    		/*
    		 * The stable node did not yet appear stale to get_ksm_page(),
    		 * since that allows for an unmapped ksm page to be recognized
    		 * right up until it is freed; but the node is safe to remove.
    		 * This page might be in a pagevec waiting to be freed,
    		 * or it might be PageSwapCache (perhaps under writeback),
    		 * or it might have been removed from swapcache a moment ago.
    		 */
    		set_page_stable_node(page, NULL);
    		remove_node_from_stable_tree(stable_node);
    		err = 0;
    	}
    
    	unlock_page(page);
    	put_page(page);
    	return err;
    }
    
    static int remove_all_stable_nodes(void)
    {
    	struct stable_node *stable_node, *next;
    	int nid;
    	int err = 0;
    
    	for (nid = 0; nid < ksm_nr_node_ids; nid++) {
    		while (root_stable_tree[nid].rb_node) {
    			stable_node = rb_entry(root_stable_tree[nid].rb_node,
    						struct stable_node, node);
    			if (remove_stable_node(stable_node)) {
    				err = -EBUSY;
    				break;	/* proceed to next nid */
    			}
    			cond_resched();
    		}
    	}
    	list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
    		if (remove_stable_node(stable_node))
    			err = -EBUSY;
    		cond_resched();
    	}
    	return err;
    }
    
    static int unmerge_and_remove_all_rmap_items(void)
    {
    	struct mm_slot *mm_slot;
    	struct mm_struct *mm;
    	struct vm_area_struct *vma;
    	int err = 0;
    
    	spin_lock(&ksm_mmlist_lock);
    	ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
    						struct mm_slot, mm_list);
    	spin_unlock(&ksm_mmlist_lock);
    
    	for (mm_slot = ksm_scan.mm_slot;
    			mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
    		mm = mm_slot->mm;
    		down_read(&mm->mmap_sem);
    		for (vma = mm->mmap; vma; vma = vma->vm_next) {
    			if (ksm_test_exit(mm))
    				break;
    			if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
    				continue;
    			err = unmerge_ksm_pages(vma,
    						vma->vm_start, vma->vm_end);
    			if (err)
    				goto error;
    		}
    
    		remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
    		up_read(&mm->mmap_sem);
    
    		spin_lock(&ksm_mmlist_lock);
    		ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
    						struct mm_slot, mm_list);
    		if (ksm_test_exit(mm)) {
    			hash_del(&mm_slot->link);
    			list_del(&mm_slot->mm_list);
    			spin_unlock(&ksm_mmlist_lock);
    
    			free_mm_slot(mm_slot);
    			clear_bit(MMF_VM_MERGEABLE, &mm->flags);
    			mmdrop(mm);
    		} else
    			spin_unlock(&ksm_mmlist_lock);
    	}
    
    	/* Clean up stable nodes, but don't worry if some are still busy */
    	remove_all_stable_nodes();
    	ksm_scan.seqnr = 0;
    	return 0;
    
    error:
    	up_read(&mm->mmap_sem);
    	spin_lock(&ksm_mmlist_lock);
    	ksm_scan.mm_slot = &ksm_mm_head;
    	spin_unlock(&ksm_mmlist_lock);
    	return err;
    }
    #endif /* CONFIG_SYSFS */
    
    static u32 calc_checksum(struct page *page)
    {
    	u32 checksum;
    	void *addr = kmap_atomic(page);
    	checksum = jhash2(addr, PAGE_SIZE / 4, 17);
    	kunmap_atomic(addr);
    	return checksum;
    }
    
    static int memcmp_pages(struct page *page1, struct page *page2)
    {
    	char *addr1, *addr2;
    	int ret;
    
    	addr1 = kmap_atomic(page1);
    	addr2 = kmap_atomic(page2);
    	ret = memcmp(addr1, addr2, PAGE_SIZE);
    	kunmap_atomic(addr2);
    	kunmap_atomic(addr1);
    	return ret;
    }
    
    static inline int pages_identical(struct page *page1, struct page *page2)
    {
    	return !memcmp_pages(page1, page2);
    }
    
    static int write_protect_page(struct vm_area_struct *vma, struct page *page,
    			      pte_t *orig_pte)
    {
    	struct mm_struct *mm = vma->vm_mm;
    	unsigned long addr;
    	pte_t *ptep;
    	spinlock_t *ptl;
    	int swapped;
    	int err = -EFAULT;
    	unsigned long mmun_start;	/* For mmu_notifiers */
    	unsigned long mmun_end;		/* For mmu_notifiers */
    
    	addr = page_address_in_vma(page, vma);
    	if (addr == -EFAULT)
    		goto out;
    
    	BUG_ON(PageTransCompound(page));
    
    	mmun_start = addr;
    	mmun_end   = addr + PAGE_SIZE;
    	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
    
    	ptep = page_check_address(page, mm, addr, &ptl, 0);
    	if (!ptep)
    		goto out_mn;
    
    	if (pte_write(*ptep) || pte_dirty(*ptep)) {
    		pte_t entry;
    
    		swapped = PageSwapCache(page);
    		flush_cache_page(vma, addr, page_to_pfn(page));
    		/*
    		 * Ok this is tricky, when get_user_pages_fast() run it doesn't
    		 * take any lock, therefore the check that we are going to make
    		 * with the pagecount against the mapcount is racey and
    		 * O_DIRECT can happen right after the check.
    		 * So we clear the pte and flush the tlb before the check
    		 * this assure us that no O_DIRECT can happen after the check
    		 * or in the middle of the check.
    		 */
    		entry = ptep_clear_flush_notify(vma, addr, ptep);
    		/*
    		 * Check that no O_DIRECT or similar I/O is in progress on the
    		 * page
    		 */
    		if (page_mapcount(page) + 1 + swapped != page_count(page)) {
    			set_pte_at(mm, addr, ptep, entry);
    			goto out_unlock;
    		}
    		if (pte_dirty(entry))
    			set_page_dirty(page);
    		entry = pte_mkclean(pte_wrprotect(entry));
    		set_pte_at_notify(mm, addr, ptep, entry);
    	}
    	*orig_pte = *ptep;
    	err = 0;
    
    out_unlock:
    	pte_unmap_unlock(ptep, ptl);
    out_mn:
    	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
    out:
    	return err;
    }
    
    /**
     * replace_page - replace page in vma by new ksm page
     * @vma:      vma that holds the pte pointing to page
     * @page:     the page we are replacing by kpage
     * @kpage:    the ksm page we replace page by
     * @orig_pte: the original value of the pte
     *
     * Returns 0 on success, -EFAULT on failure.
     */
    static int replace_page(struct vm_area_struct *vma, struct page *page,
    			struct page *kpage, pte_t orig_pte)
    {
    	struct mm_struct *mm = vma->vm_mm;
    	pmd_t *pmd;
    	pte_t *ptep;
    	spinlock_t *ptl;
    	unsigned long addr;
    	int err = -EFAULT;
    	unsigned long mmun_start;	/* For mmu_notifiers */
    	unsigned long mmun_end;		/* For mmu_notifiers */
    
    	addr = page_address_in_vma(page, vma);
    	if (addr == -EFAULT)
    		goto out;
    
    	pmd = mm_find_pmd(mm, addr);
    	if (!pmd)
    		goto out;
    
    	mmun_start = addr;
    	mmun_end   = addr + PAGE_SIZE;
    	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
    
    	ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
    	if (!pte_same(*ptep, orig_pte)) {
    		pte_unmap_unlock(ptep, ptl);
    		goto out_mn;
    	}
    
    	get_page(kpage);
    	page_add_anon_rmap(kpage, vma, addr, false);
    
    	flush_cache_page(vma, addr, pte_pfn(*ptep));
    	ptep_clear_flush_notify(vma, addr, ptep);
    	set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
    
    	page_remove_rmap(page, false);
    	if (!page_mapped(page))
    		try_to_free_swap(page);
    	put_page(page);
    
    	pte_unmap_unlock(ptep, ptl);
    	err = 0;
    out_mn:
    	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
    out:
    	return err;
    }
    
    /*
     * try_to_merge_one_page - take two pages and merge them into one
     * @vma: the vma that holds the pte pointing to page
     * @page: the PageAnon page that we want to replace with kpage
     * @kpage: the PageKsm page that we want to map instead of page,
     *         or NULL the first time when we want to use page as kpage.
     *
     * This function returns 0 if the pages were merged, -EFAULT otherwise.
     */
    static int try_to_merge_one_page(struct vm_area_struct *vma,
    				 struct page *page, struct page *kpage)
    {
    	pte_t orig_pte = __pte(0);
    	int err = -EFAULT;
    
    	if (page == kpage)			/* ksm page forked */
    		return 0;
    
    	if (!PageAnon(page))
    		goto out;
    
    	/*
    	 * We need the page lock to read a stable PageSwapCache in
    	 * write_protect_page().  We use trylock_page() instead of
    	 * lock_page() because we don't want to wait here - we
    	 * prefer to continue scanning and merging different pages,
    	 * then come back to this page when it is unlocked.
    	 */