/* * linux/mm/page_io.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, * Asynchronous swapping added 30.12.95. Stephen Tweedie * Removed race in async swapping. 14.4.1996. Bruno Haible * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include spinlock_t ssm_lock; EXPORT_SYMBOL(ssm_lock); static struct bio *get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) { struct bio *bio; bio = bio_alloc(gfp_flags, 1); if (bio) { bio->bi_sector = map_swap_page(page, &bio->bi_bdev); bio->bi_sector <<= PAGE_SHIFT - 9; bio->bi_io_vec[0].bv_page = page; bio->bi_io_vec[0].bv_len = PAGE_SIZE; bio->bi_io_vec[0].bv_offset = 0; bio->bi_vcnt = 1; bio->bi_idx = 0; bio->bi_size = PAGE_SIZE; bio->bi_end_io = end_io; } return bio; } static void end_swap_bio_write(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; if (!uptodate) { SetPageError(page); /* * We failed to write the page out to swap-space. * Re-dirty the page in order to avoid it being reclaimed. * Also print a dire warning that things will go BAD (tm) * very quickly. * * Also clear PG_reclaim to avoid rotate_reclaimable_page() */ set_page_dirty(page); printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_sector); ClearPageReclaim(page); } end_page_writeback(page); bio_put(bio); printk("."); } void end_swap_bio_read(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; if (!uptodate) { SetPageError(page); ClearPageUptodate(page); printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_sector); } else { char *cmem = NULL; size_t dlen = -1; size_t rlen = -1; char *decomp_space = NULL; char *tmp = NULL; int decompret = -1; if (page == NULL) { bio_put(bio); return; } tmp = (char *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, 0); if (!tmp) goto out; decomp_space = (char *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, 1); if (!decomp_space) goto out; if (PageHighMem(page)) { cmem = kmap_atomic(page); copy_page(tmp, cmem); kunmap_atomic(cmem); } else { cmem = page_address(page); copy_page(tmp, cmem); } if (!(tmp[2] == 0x11 && tmp[3] == 0x22 && tmp[4] == 0x33)) goto out; dlen = 0; dlen |= tmp[0]; dlen = dlen << 8; dlen |= tmp[1]; rlen = PAGE_SIZE; decompret = lzo1x_decompress_safe(tmp + 5, dlen, decomp_space, &rlen); if (unlikely(decompret != LZO_E_OK)) { pr_err("Decompression failed. %d\n", decompret); goto out; } if (PageHighMem(page)) { cmem = kmap_atomic(page); copy_page(cmem, decomp_space); kunmap_atomic(cmem); } else { cmem = page_address(page); copy_page(cmem, decomp_space); } out: SetPageUptodate(page); if (tmp) free_pages((unsigned long) tmp, 0); if (decomp_space) free_pages((unsigned long) decomp_space, 1); } unlock_page(page); bio_put(bio); printk("-"); } /* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ int swap_writepage(struct page *page, struct writeback_control *wbc) { struct bio *bio; int ret = 0, rw = WRITE; char *uncmem = NULL; size_t clen = -1; char *comp_space = NULL; char *wrkmem = NULL; char *tmp = NULL; int compret = -1; if (try_to_free_swap(page)) { unlock_page(page); goto out; } wrkmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_ATOMIC); if (!wrkmem) { unlock_page(page); goto out; } comp_space = (char *) __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 1); if (!comp_space) { unlock_page(page); goto out; } tmp = (char *) __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 0); if (!tmp) { unlock_page(page); goto out; } if (PageHighMem(page)) { uncmem = kmap_atomic(page); copy_page(tmp, uncmem); kunmap_atomic(uncmem); } else { uncmem = page_address(page); copy_page(tmp, uncmem); } compret = lzo1x_1_compress(tmp, PAGE_SIZE, comp_space, &clen, wrkmem); if (unlikely(clen <= 0)) goto origin; if (unlikely(clen > PAGE_SIZE-10)) goto origin; memset(tmp, 0, PAGE_SIZE); tmp[0] = (clen & 0x0000ff00) >> 8; tmp[1] = (clen & 0x000000ff); tmp[2] = 0x11; tmp[3] = 0x22; tmp[4] = 0x33; memcpy((void *) (tmp+5), (void *)comp_space, PAGE_SIZE-5); if (PageHighMem(page)) { uncmem = kmap_atomic(page); copy_page(uncmem, tmp); kunmap_atomic(uncmem); } else { uncmem = page_address(page); copy_page(uncmem, tmp); } origin: bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); unlock_page(page); ret = -ENOMEM; goto out; } if (wbc->sync_mode == WB_SYNC_ALL) rw |= REQ_SYNC; count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); submit_bio(rw, bio); out: if (wrkmem) kzfree(wrkmem); if (tmp) free_pages((unsigned long) tmp, 0); if (comp_space) free_pages((unsigned long) comp_space, 1); return ret; } int swap_readpage(struct page *page) { struct bio *bio; int ret = 0; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageUptodate(page)); bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); ret = -ENOMEM; goto out; } count_vm_event(PSWPIN); submit_bio(READ, bio); // wait_on_page_bit(page, 3); wait_on_page_locked(page); out: return ret; }