/* * linux/mm/page_io.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, * Asynchronous swapping added 30.12.95. Stephen Tweedie * Removed race in async swapping. 14.4.1996. Bruno Haible * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman */ #include #include #include #include #include #include #include #include #include #include #include #include #include char *wrkmem = NULL; char *cmem = NULL; char *tmpmem = NULL; struct page *decomp_page; spinlock_t ssm_lock; spinlock_t all_lock; struct rw_semaphore sm; spinlock_t w_lock; spinlock_t wc_lock; EXPORT_SYMBOL(wrkmem); EXPORT_SYMBOL(cmem); EXPORT_SYMBOL(tmpmem); EXPORT_SYMBOL(decomp_page); EXPORT_SYMBOL(ssm_lock); EXPORT_SYMBOL(all_lock); EXPORT_SYMBOL(sm); EXPORT_SYMBOL(w_lock); EXPORT_SYMBOL(wc_lock); static struct bio *get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) { struct bio *bio; bio = bio_alloc(gfp_flags, 1); if (bio) { bio->bi_sector = map_swap_page(page, &bio->bi_bdev); bio->bi_sector <<= PAGE_SHIFT - 9; bio->bi_io_vec[0].bv_page = page; bio->bi_io_vec[0].bv_len = PAGE_SIZE; bio->bi_io_vec[0].bv_offset = 0; bio->bi_vcnt = 1; bio->bi_idx = 0; bio->bi_size = PAGE_SIZE; bio->bi_end_io = end_io; } return bio; } static void end_swap_bio_write(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; if (!uptodate) { SetPageError(page); /* * We failed to write the page out to swap-space. * Re-dirty the page in order to avoid it being reclaimed. * Also print a dire warning that things will go BAD (tm) * very quickly. * * Also clear PG_reclaim to avoid rotate_reclaimable_page() */ set_page_dirty(page); printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_sector); ClearPageReclaim(page); } end_page_writeback(page); bio_put(bio); } void end_swap_bio_read(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; if (!uptodate) { SetPageError(page); ClearPageUptodate(page); printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_sector); } else { // Decompression Process char *_cmem; size_t decomp_len; char *decomp_vir; size_t final_len; char *decomp_space; int ret = -1; // spin_lock(&all_lock); _cmem = kmap_atomic(page); // For uncompressible page if (_cmem[0] != 'S') { SetPageUptodate(page); kunmap_atomic(_cmem); goto out; } else { if (_cmem[1] != 'S') { SetPageUptodate(page); kunmap_atomic(_cmem); goto out; } else { if (_cmem[2] != 'M') { SetPageUptodate(page); kunmap_atomic(_cmem); goto out; } } } decomp_len = 0; decomp_len |= _cmem[3]; decomp_len = decomp_len << 8; decomp_len |= _cmem[4]; // spin_lock(all_lock) : cause to kill init!!! don't use! // spin_lock(&all_lock); // decomp_vir = kmap_atomic(decomp_page); final_len = PAGE_SIZE; // spin_lock(&ssm_lock); decomp_space = kmalloc(PAGE_SIZE, GFP_ATOMIC); if (!decomp_space) { printk("Kmalloc failed.\n"); goto out; } memset(decomp_space, 0, PAGE_SIZE); ret = lzo1x_decompress_safe(_cmem+5, decomp_len, decomp_space, &final_len); // spin_unlock(&ssm_lock); if (unlikely(ret != LZO_E_OK)) { pr_err("Decompression failed\n"); // memset(decomp_vir, 0, PAGE_SIZE); // kunmap_atomic(decomp_vir); // spin_unlock(&all_lock); kunmap_atomic(_cmem); if (decomp_space) kfree(decomp_space); page = NULL; return; } memcpy(_cmem, decomp_space, final_len); if (decomp_space) kfree(decomp_space); // memset(decomp_vir, 0, PAGE_SIZE); // kunmap_atomic(decomp_vir); // spin_unlock(&all_lock); kunmap_atomic(_cmem); out: printk("*"); SetPageUptodate(page); } unlock_page(page); bio_put(bio); } /* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ int swap_writepage(struct page *page, struct writeback_control *wbc) { struct bio *bio; int ret = 0, rw = WRITE; char *uncmem; char *bufmem; size_t clen; int compret; char *buf; if (try_to_free_swap(page)) { unlock_page(page); goto out; } // Compression Starting ////// Bad page map! error!!!! don't use spin_lock in writeback ////// spin_lock(&w_lock); uncmem = kmap_atomic(page); memset(cmem, 0, PAGE_SIZE); compret = lzo1x_1_compress(uncmem, PAGE_SIZE, cmem, &clen, wrkmem); // buf = kmalloc(clen, GFP_ATOMIC); // memcpy(buf, cmem, strlen(cmem)); kunmap_atomic(uncmem); if (unlikely(compret != LZO_E_OK)) { pr_err("Page Compression Failed %d\n", compret); goto out; } page->comp_len = PAGE_SIZE; // Check size over PAGE_SIZE // If page size is under 4091 (5) // 3 bytes for page compressed length if (likely(clen < PAGE_SIZE - 5)) { int start = 0; bufmem = kmap_atomic(page); // Save compressed length into original page memset(bufmem, 0, PAGE_SIZE); bufmem[start++] = 'S'; bufmem[start++] = 'S'; bufmem[start++] = 'M'; bufmem[start++] = (clen & 0x0000FF00) >> 8; bufmem[start++] = (clen & 0x000000FF); // Sotre compressed data into Page memcpy(bufmem + start, cmem, clen); kunmap_atomic(bufmem); page->comp_len = clen; } ////// spin_unlock(&w_lock); printk("."); bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); unlock_page(page); ret = -ENOMEM; goto out; } if (wbc->sync_mode == WB_SYNC_ALL) rw |= REQ_SYNC; count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); submit_bio(rw, bio); out: return ret; } int swap_readpage(struct page *page) { struct bio *bio; int ret = 0; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageUptodate(page)); bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); ret = -ENOMEM; goto out; } count_vm_event(PSWPIN); submit_bio(READ, bio); out: return ret; }