There are quite a number of places where e.g. page->va->page
translations happen.
Besides yielding smaller code (source and binary), a second goal is to
make it easier to determine where virtual addresses of pages allocated
through alloc_empty_pages_and_pagevec() are really used (in turn in
order to determine whether using highmem pages would be possible
there).
As usual, written and tested on 2.6.32 and made apply to the 2.6.18
tree without further testing.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
--- head-2009-12-07.orig/drivers/xen/blkback/blkback.c 2009-12-15
12:40:27.000000000 +0100
+++ head-2009-12-07/drivers/xen/blkback/blkback.c 2009-12-11
16:02:20.000000000 +0100
@@ -95,9 +95,11 @@ static inline int vaddr_pagenr(pending_r
return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
}
+#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
+
static inline unsigned long vaddr(pending_req_t *req, int seg)
{
- unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
+ unsigned long pfn = page_to_pfn(pending_page(req, seg));
return (unsigned long)pfn_to_kaddr(pfn);
}
@@ -175,7 +177,7 @@ static void fast_flush_area(pending_req_
handle = pending_handle(req, i);
if (handle == BLKBACK_INVALID_HANDLE)
continue;
- blkback_pagemap_clear(virt_to_page(vaddr(req, i)));
+ blkback_pagemap_clear(pending_page(req, i));
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
GNTMAP_host_map, handle);
pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
@@ -463,7 +465,7 @@ static void dispatch_rw_block_io(blkif_t
ret |= 1;
} else {
blkback_pagemap_set(vaddr_pagenr(pending_req, i),
- virt_to_page(vaddr(pending_req, i)),
+ pending_page(pending_req, i),
blkif->domid, req->handle,
req->seg[i].gref);
}
@@ -473,8 +475,8 @@ static void dispatch_rw_block_io(blkif_t
if (ret)
continue;
- set_phys_to_machine(__pa(vaddr(
- pending_req, i)) >> PAGE_SHIFT,
+ set_phys_to_machine(
+ page_to_pfn(pending_page(pending_req, i)),
FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
seg[i].buf = map[i].dev_bus_addr |
(req->seg[i].first_sect << 9);
@@ -505,7 +507,7 @@ static void dispatch_rw_block_io(blkif_t
while ((bio == NULL) ||
(bio_add_page(bio,
- virt_to_page(vaddr(pending_req, i)),
+ pending_page(pending_req, i),
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
if (bio) {
--- head-2009-12-07.orig/drivers/xen/blktap/blktap.c 2009-12-15
12:40:27.000000000 +0100
+++ head-2009-12-07/drivers/xen/blktap/blktap.c 2009-12-15 12:41:16.000000000
+0100
@@ -171,11 +171,16 @@ static inline unsigned int RTN_PEND_IDX(
#define BLKBACK_INVALID_HANDLE (~0)
static struct page **foreign_pages[MAX_DYNAMIC_MEM];
-static inline unsigned long idx_to_kaddr(
+static inline struct page *idx_to_page(
unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
{
unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
- unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
+ return foreign_pages[mmap_idx][arr_idx];
+}
+static inline unsigned long idx_to_kaddr(
+ unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
+{
+ unsigned long pfn = page_to_pfn(idx_to_page(mmap_idx,req_idx,sg_idx));
return (unsigned long)pfn_to_kaddr(pfn);
}
@@ -346,7 +351,7 @@ static pte_t blktap_clear_pte(struct vm_
mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
kvaddr = idx_to_kaddr(mmap_idx, pending_idx, seg);
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, seg);
ClearPageReserved(pg);
info->foreign_map.map[offset + RING_PAGES] = NULL;
@@ -1043,7 +1048,7 @@ static void fast_flush_area(pending_req_
struct grant_handle_pair *khandle;
uint64_t ptep;
int ret, mmap_idx;
- unsigned long kvaddr, uvaddr;
+ unsigned long uvaddr;
tap_blkif_t *info;
struct mm_struct *mm;
@@ -1069,7 +1074,6 @@ static void fast_flush_area(pending_req_
mmap_idx = req->mem_idx;
for (i = 0; i < req->nr_pages; i++) {
- kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
khandle = &pending_handle(mmap_idx, k_idx, i);
@@ -1081,8 +1085,8 @@ static void fast_flush_area(pending_req_
invcount++;
set_phys_to_machine(
- __pa(idx_to_kaddr(mmap_idx, k_idx, i))
- >> PAGE_SHIFT, INVALID_P2M_ENTRY);
+ page_to_pfn(idx_to_page(mmap_idx, k_idx, i)),
+ INVALID_P2M_ENTRY);
}
if (khandle->user != INVALID_GRANT_HANDLE) {
@@ -1231,14 +1235,13 @@ static int blktap_read_ufe_ring(tap_blki
for (j = 0; j < pending_req->nr_pages; j++) {
- unsigned long kvaddr, uvaddr;
+ unsigned long uvaddr;
struct page *pg;
int offset;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, j);
ClearPageReserved(pg);
offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
info->foreign_map.map[offset] = NULL;
@@ -1524,12 +1527,10 @@ static void dispatch_rw_block_io(blkif_t
for (i = 0; i < (nseg*2); i+=2) {
unsigned long uvaddr;
- unsigned long kvaddr;
unsigned long offset;
struct page *pg;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
if (unlikely(map[i].status != 0)) {
WPRINTK("invalid kernel buffer -- "
@@ -1553,22 +1554,20 @@ static void dispatch_rw_block_io(blkif_t
if (ret)
continue;
- set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
+ pg = idx_to_page(mmap_idx, pending_idx, i/2);
+ set_phys_to_machine(page_to_pfn(pg),
FOREIGN_FRAME(map[i].dev_bus_addr
>> PAGE_SHIFT));
offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
info->foreign_map.map[offset] = pg;
}
} else {
for (i = 0; i < nseg; i++) {
unsigned long uvaddr;
- unsigned long kvaddr;
unsigned long offset;
struct page *pg;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
if (unlikely(map[i].status != 0)) {
WPRINTK("invalid kernel buffer -- "
@@ -1584,7 +1583,7 @@ static void dispatch_rw_block_io(blkif_t
continue;
offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, i);
info->foreign_map.map[offset] = pg;
}
}
@@ -1596,11 +1595,9 @@ static void dispatch_rw_block_io(blkif_t
down_write(&mm->mmap_sem);
/* Mark mapped pages as reserved: */
for (i = 0; i < req->nr_segments; i++) {
- unsigned long kvaddr;
struct page *pg;
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, i);
SetPageReserved(pg);
if (xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long uvaddr = MMAP_VADDR(info->user_vstart,
--- head-2009-12-07.orig/drivers/xen/blktap2/blktap.h 2009-12-15
12:40:27.000000000 +0100
+++ head-2009-12-07/drivers/xen/blktap2/blktap.h 2009-12-15
10:46:35.000000000 +0100
@@ -243,6 +243,13 @@ int blktap_request_pool_grow(void);
int blktap_request_pool_shrink(void);
struct blktap_request *blktap_request_allocate(struct blktap *);
void blktap_request_free(struct blktap *, struct blktap_request *);
-unsigned long request_to_kaddr(struct blktap_request *, int);
+struct page *request_to_page(struct blktap_request *, int);
+
+static inline unsigned long
+request_to_kaddr(struct blktap_request *req, int seg)
+{
+ unsigned long pfn = page_to_pfn(request_to_page(req, seg));
+ return (unsigned long)pfn_to_kaddr(pfn);
+}
#endif
--- head-2009-12-07.orig/drivers/xen/blktap2/device.c 2009-12-15
12:40:27.000000000 +0100
+++ head-2009-12-07/drivers/xen/blktap2/device.c 2009-12-15
11:07:38.000000000 +0100
@@ -324,16 +324,15 @@ blktap_unmap(struct blktap *tap, struct
down_write(&tap->ring.vma->vm_mm->mmap_sem);
for (i = 0; i < request->nr_pages; i++) {
+ kvaddr = request_to_kaddr(request, i);
BTDBG("request: %p, seg: %d, kvaddr: 0x%08lx, khandle: %u, "
"uvaddr: 0x%08lx, uhandle: %u\n", request, i,
- request_to_kaddr(request, i),
- request->handles[i].kernel,
+ kvaddr, request->handles[i].kernel,
MMAP_VADDR(tap->ring.user_vstart, usr_idx, i),
request->handles[i].user);
if (!xen_feature(XENFEAT_auto_translated_physmap) &&
request->handles[i].kernel == INVALID_GRANT_HANDLE) {
- kvaddr = request_to_kaddr(request, i);
blktap_umap_uaddr(&init_mm, kvaddr);
flush_tlb_kernel_page(kvaddr);
set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
@@ -454,7 +453,7 @@ blktap_prep_foreign(struct blktap *tap,
table->cnt++;
/* enable chained tap devices */
- tap_page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ tap_page = request_to_page(request, seg);
set_page_private(tap_page, page_private(page));
SetPageBlkback(tap_page);
@@ -483,7 +482,7 @@ blktap_map_foreign(struct blktap *tap,
struct page *page;
int i, grant, err, usr_idx;
struct blktap_ring *ring;
- unsigned long uvaddr, kvaddr, foreign_mfn;
+ unsigned long uvaddr, foreign_mfn;
if (!table->cnt)
return 0;
@@ -501,7 +500,6 @@ blktap_map_foreign(struct blktap *tap,
continue;
uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i);
- kvaddr = request_to_kaddr(request, i);
if (unlikely(table->grants[grant].status)) {
BTERR("invalid kernel buffer: could not remap it\n");
@@ -529,18 +527,19 @@ blktap_map_foreign(struct blktap *tap,
if (err)
continue;
- page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ page = request_to_page(request, i);
if (!xen_feature(XENFEAT_auto_translated_physmap))
- set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
+ set_phys_to_machine(page_to_pfn(page),
FOREIGN_FRAME(foreign_mfn));
else if (vm_insert_page(ring->vma, uvaddr, page))
err |= 1;
BTDBG("pending_req: %p, seg: %d, page: %p, "
- "kvaddr: 0x%08lx, khandle: %u, uvaddr: 0x%08lx, "
+ "kvaddr: 0x%p, khandle: %u, uvaddr: 0x%08lx, "
"uhandle: %u\n", request, i, page,
- kvaddr, request->handles[i].kernel,
+ pfn_to_kaddr(page_to_pfn(page)),
+ request->handles[i].kernel,
uvaddr, request->handles[i].user);
}
@@ -593,7 +592,7 @@ blktap_map(struct blktap *tap,
gnttab_set_map_op(&map, kvaddr, flags, gref, domid);
/* enable chained tap devices */
- tap_page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ tap_page = request_to_page(request, seg);
set_page_private(tap_page, page_private(page));
SetPageBlkback(tap_page);
@@ -631,7 +630,7 @@ blktap_device_process_request(struct blk
struct scatterlist *sg;
struct blktap_grant_table table;
unsigned int fsect, lsect, nr_sects;
- unsigned long offset, uvaddr, kvaddr;
+ unsigned long offset, uvaddr;
struct blkif_request blkif_req, *target;
err = -1;
@@ -689,18 +688,17 @@ blktap_device_process_request(struct blk
}
uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i);
- kvaddr = request_to_kaddr(request, i);
offset = (uvaddr - ring->vma->vm_start) >> PAGE_SHIFT;
- page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ page = request_to_page(request, i);
ring->foreign_map.map[offset] = page;
SetPageReserved(page);
BTDBG("mapped uaddr %08lx to page %p pfn 0x%lx\n",
- uvaddr, page, __pa(kvaddr) >> PAGE_SHIFT);
+ uvaddr, page, page_to_pfn(page));
BTDBG("offset: 0x%08lx, pending_req: %p, seg: %d, "
- "page: %p, kvaddr: 0x%08lx, uvaddr: 0x%08lx\n",
+ "page: %p, kvaddr: %p, uvaddr: 0x%08lx\n",
offset, request, i,
- page, kvaddr, uvaddr);
+ page, pfn_to_kaddr(page_to_pfn(page)), uvaddr);
request->nr_pages++;
}
--- head-2009-12-07.orig/drivers/xen/blktap2/request.c 2009-12-15
12:40:27.000000000 +0100
+++ head-2009-12-07/drivers/xen/blktap2/request.c 2009-12-15
10:47:13.000000000 +0100
@@ -123,13 +123,12 @@ blktap_request_pool_free_bucket(struct b
kfree(bucket);
}
-unsigned long
-request_to_kaddr(struct blktap_request *req, int seg)
+struct page *
+request_to_page(struct blktap_request *req, int seg)
{
struct blktap_request_handle *handle = blktap_request_to_handle(req);
int idx = handle->slot * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
- unsigned long pfn = page_to_pfn(handle->bucket->foreign_pages[idx]);
- return (unsigned long)pfn_to_kaddr(pfn);
+ return handle->bucket->foreign_pages[idx];
}
int
--- head-2009-12-07.orig/drivers/xen/gntdev/gntdev.c 2009-12-15
12:40:27.000000000 +0100
+++ head-2009-12-07/drivers/xen/gntdev/gntdev.c 2009-12-15 10:08:53.000000000
+0100
@@ -586,7 +586,7 @@ static int gntdev_mmap (struct file *fli
kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
user_vaddr = get_user_vaddr(vma, i);
- page = pfn_to_page(__pa(kernel_vaddr) >> PAGE_SHIFT);
+ page = private_data->foreign_pages[slot_index + i];
gnttab_set_map_op(&op, kernel_vaddr, flags,
private_data->grants[slot_index+i]
@@ -805,9 +805,9 @@ static pte_t gntdev_clear_pte(struct vm_
GNTDEV_SLOT_NOT_YET_MAPPED;
/* Invalidate the physical to machine mapping for this page. */
- set_phys_to_machine(__pa(get_kernel_vaddr(private_data,
- slot_index))
- >> PAGE_SHIFT, INVALID_P2M_ENTRY);
+ set_phys_to_machine(
+ page_to_pfn(private_data->foreign_pages[slot_index]),
+ INVALID_P2M_ENTRY);
} else {
pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
--- head-2009-12-07.orig/drivers/xen/netback/netback.c 2009-12-15
12:40:27.000000000 +0100
+++ head-2009-12-07/drivers/xen/netback/netback.c 2009-12-15
10:02:18.000000000 +0100
@@ -1102,8 +1102,7 @@ static int netbk_tx_check_mop(struct sk_
pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
netif_put(netif);
} else {
- set_phys_to_machine(
- __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
+ set_phys_to_machine(idx_to_pfn(pending_idx),
FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
grant_tx_handle[pending_idx] = mop->handle;
}
@@ -1119,8 +1118,7 @@ static int netbk_tx_check_mop(struct sk_
/* Check error status: if okay then remember grant handle. */
newerr = (++mop)->status;
if (likely(!newerr)) {
- set_phys_to_machine(
- __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
+ set_phys_to_machine(idx_to_pfn(pending_idx),
FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
grant_tx_handle[pending_idx] = mop->handle;
/* Had a previous error? Invalidate this fragment. */
@@ -1173,7 +1171,7 @@ static void netbk_fill_frags(struct sk_b
&pending_inuse_head);
txp = &pending_tx_info[pending_idx].req;
- frag->page = virt_to_page(idx_to_kaddr(pending_idx));
+ frag->page = mmap_pages[pending_idx];
frag->size = txp->size;
frag->page_offset = txp->offset;
--- head-2009-12-07.orig/drivers/xen/scsiback/scsiback.c 2009-12-15
12:40:27.000000000 +0100
+++ head-2009-12-07/drivers/xen/scsiback/scsiback.c 2009-12-15
11:10:58.000000000 +0100
@@ -285,6 +285,8 @@ static int scsiback_gnttab_data_map(vscs
BUG_ON(err);
for (i = 0; i < nr_segments; i++) {
+ struct page *pg;
+
if (unlikely(map[i].status != 0)) {
printk(KERN_ERR "scsiback: invalid buffer --
could not remap it\n");
map[i].handle = SCSIBACK_INVALID_HANDLE;
@@ -296,11 +298,12 @@ static int scsiback_gnttab_data_map(vscs
if (err)
continue;
- set_phys_to_machine(__pa(vaddr(
- pending_req, i)) >> PAGE_SHIFT,
+ pg = pending_pages[vaddr_pagenr(pending_req, i)];
+
+ set_phys_to_machine(page_to_pfn(pg),
FOREIGN_FRAME(map[i].dev_bus_addr >>
PAGE_SHIFT));
- pending_req->sgl[i].page =
virt_to_page(vaddr(pending_req, i));
+ pending_req->sgl[i].page = pg;
pending_req->sgl[i].offset = ring_req->seg[i].offset;
pending_req->sgl[i].length = ring_req->seg[i].length;
data_len += pending_req->sgl[i].length;
xenlinux-backend-translations.patch
Description: Text document
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|