WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] libxc hardening

To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] libxc hardening
From: Anthony Liguori <aliguori@xxxxxxxxxx>
Date: Wed, 22 Jun 2005 23:15:33 -0500
Delivery-date: Thu, 23 Jun 2005 04:14:44 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mozilla Thunderbird 1.0.2 (X11/20050317)
Howdy,

The following patches hardens a good portion of libxc's error path code against errno clobbering. Errno clobbering occurs when a function returns a failure code (such as -1) but then calls some other function (like munmap or perror) that could potentially change the value of errno.

The patch doesn't touch any of the build/save/restore code because it seems like since these functions do so much work it would be useful to create special error returns for them. Does this sound reasonable?

There's also a ton of read() calls that need to be hardened but that's another patch.

Signed-off-by: Anthony Liguori

Regards,

Anthony Liguori


diff -ur xen-unstable-1/tools/libxc/xc_evtchn.c 
xen-unstable/tools/libxc/xc_evtchn.c
--- xen-unstable-1/tools/libxc/xc_evtchn.c      2005-06-22 17:50:44.000000000 
-0500
+++ xen-unstable/tools/libxc/xc_evtchn.c        2005-06-22 22:59:39.297821600 
-0500
@@ -13,21 +13,29 @@
 {
     int ret = -1;
     privcmd_hypercall_t hypercall;
+    int saved_errno = 0;
 
     hypercall.op     = __HYPERVISOR_event_channel_op;
     hypercall.arg[0] = (unsigned long)op;
 
     if ( mlock(op, sizeof(*op)) != 0 )
     {
+        saved_errno = errno;
         PERROR("do_evtchn_op: op mlock failed");
         goto out;
     }
 
-    if ((ret = do_xen_hypercall(xc_handle, &hypercall)) < 0)
+    if ((ret = do_xen_hypercall(xc_handle, &hypercall)) < 0) {
+        saved_errno = errno;
         ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret);
+    }
 
     (void)munlock(op, sizeof(*op));
  out:
+
+    if (ret < 0) {
+        errno = saved_errno;
+    }
     return ret;
 }
 
diff -ur xen-unstable-1/tools/libxc/xc_gnttab.c 
xen-unstable/tools/libxc/xc_gnttab.c
--- xen-unstable-1/tools/libxc/xc_gnttab.c      2005-06-22 17:50:44.000000000 
-0500
+++ xen-unstable/tools/libxc/xc_gnttab.c        2005-06-22 23:03:13.451265328 
-0500
@@ -18,6 +18,7 @@
 {
     int ret = -1;
     privcmd_hypercall_t hypercall;
+    int saved_errno = 0;
 
     hypercall.op     = __HYPERVISOR_grant_table_op;
     hypercall.arg[0] = cmd;
@@ -26,15 +27,21 @@
 
     if ( mlock(op, sizeof(*op)) != 0 )
     {
+        saved_errno = errno;
         PERROR("do_gnttab_op: op mlock failed");
         goto out;
     }
 
-    if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
+    if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 ) {
+        saved_errno = errno;
         ERROR("do_gnttab_op: HYPERVISOR_grant_table_op failed: %d", ret);
+    }
 
     (void)munlock(op, sizeof(*op));
  out:
+    if (ret < 0) {
+        errno = saved_errno;
+    }
     return ret;
 }
 
@@ -129,8 +136,11 @@
 int xc_grant_interface_open(void)
 {
     int fd = open("/proc/xen/grant", O_RDWR);
-    if ( fd == -1 )
+    if ( fd == -1 ) {
+        int saved_errno = errno;
         PERROR("Could not obtain handle on grant command interface");
+        errno = saved_errno;
+    }
     return fd;
 
 }
diff -ur xen-unstable-1/tools/libxc/xc_misc.c xen-unstable/tools/libxc/xc_misc.c
--- xen-unstable-1/tools/libxc/xc_misc.c        2005-06-22 17:50:44.000000000 
-0500
+++ xen-unstable/tools/libxc/xc_misc.c  2005-06-22 23:05:47.644824344 -0500
@@ -9,8 +9,11 @@
 int xc_interface_open(void)
 {
     int fd = open("/proc/xen/privcmd", O_RDWR);
-    if ( fd == -1 )
+    if ( fd == -1 ) {
+        int saved_errno = errno;
         PERROR("Could not obtain handle on privileged command interface");
+       errno = saved_errno;
+    }
     return fd;
 }
 
@@ -28,6 +31,7 @@
     dom0_op_t op;
     char *buffer = *pbuffer;
     unsigned int nr_chars = *pnr_chars;
+    int saved_errno = 0;
 
     op.cmd = DOM0_READCONSOLE;
     op.u.readconsole.buffer = buffer;
@@ -41,10 +45,14 @@
     {
         *pbuffer   = op.u.readconsole.buffer;
         *pnr_chars = op.u.readconsole.count;
+        saved_errno = errno;
     }
 
     (void)munlock(buffer, nr_chars);
 
+    if (ret != 0) {
+        errno = saved_errno;
+    }
     return ret;
 }    
 
diff -ur xen-unstable-1/tools/libxc/xc_private.c 
xen-unstable/tools/libxc/xc_private.c
--- xen-unstable-1/tools/libxc/xc_private.c     2005-06-22 17:50:44.000000000 
-0500
+++ xen-unstable/tools/libxc/xc_private.c       2005-06-22 23:07:56.069300856 
-0500
@@ -16,14 +16,25 @@
     if ( addr == MAP_FAILED )
        return NULL;
 
+    /* even though NULL is a valid return for mmap(), it's not likely on the
+       platforms we support.  we use null as an invalid return those so just
+       make sure we don't ever get ourselves confused. */
+    if ( addr == NULL ) {
+        munmap(addr, num*PAGE_SIZE);
+        errno = EINVAL;
+        return NULL;
+    }
+
     ioctlx.num=num;
     ioctlx.dom=dom;
     ioctlx.addr=(unsigned long)addr;
     ioctlx.arr=arr;
     if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAPBATCH, &ioctlx ) < 0 )
     {
-       perror("XXXXXXXX");
+       int saved_errno = errno;
+       PERROR("mmap_batch ioctl failed!");
        munmap(addr, num*PAGE_SIZE);
+       errno = saved_errno;
        return NULL;
     }
     return addr;
@@ -43,6 +54,12 @@
     if ( addr == MAP_FAILED )
        return NULL;
 
+    if ( addr == NULL ) {
+        munmap(addr, size);
+        errno = EINVAL;
+        return NULL;
+    }
+
     ioctlx.num=1;
     ioctlx.dom=dom;
     ioctlx.entry=&entry;
@@ -51,7 +68,9 @@
     entry.npages=(size+PAGE_SIZE-1)>>PAGE_SHIFT;
     if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx ) < 0 )
     {
+       int saved_errno = errno;
        munmap(addr, size);
+       errno = saved_errno;
        return NULL;
     }
     return addr;
@@ -82,7 +101,9 @@
     op.u.getpageframeinfo.domain = (domid_t)dom;
     if ( do_dom0_op(xc_handle, &op) < 0 )
     {
+        int saved_errno = errno;
         PERROR("Unexpected failure when getting page frame info!");
+        errno = saved_errno;
         return GETPFN_ERR;
     }
     return op.u.getpageframeinfo.type;
@@ -110,6 +131,7 @@
 {
     int err = 0;
     privcmd_hypercall_t hypercall;
+    int saved_errno = 0;
 
     if ( mmu->idx == 0 )
         return 0;
@@ -122,6 +144,7 @@
 
     if ( mlock(mmu->updates, sizeof(mmu->updates)) != 0 )
     {
+        saved_errno = errno;
         PERROR("flush_mmu_updates: mmu updates mlock failed");
         err = 1;
         goto out;
@@ -129,6 +152,7 @@
 
     if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
     {
+        saved_errno = errno;
         ERROR("Failure when submitting mmu updates");
         err = 1;
     }
@@ -138,6 +162,10 @@
     (void)munlock(mmu->updates, sizeof(mmu->updates));
 
  out:
+    if (err != 0) {
+        errno = saved_errno;
+    }
+
     return err;
 }
 
@@ -179,7 +207,9 @@
     op.u.getvcpucontext.ctxt   = NULL;
     if ( (do_dom0_op(xc_handle, &op) < 0) )
     {
+        int saved_errno = errno;
         PERROR("Could not get info on domain");
+        errno = saved_errno;
         return -1;
     }
     return op.u.getvcpucontext.cpu_time;
@@ -205,7 +235,9 @@
 
     if ( ioctl( xc_handle, IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN, &mfn ) < 0 )
     {
-       perror("xc_get_m2p_start_mfn:");
+       int saved_errno = errno;
+       PERROR("xc_get_m2p_start_mfn:");
+       errno = saved_errno;
        return 0;
     }
     return mfn;
@@ -218,6 +250,8 @@
 {
     dom0_op_t op;
     int ret;
+    int saved_errno;
+
     op.cmd = DOM0_GETMEMLIST;
     op.u.getmemlist.domain   = (domid_t)domid;
     op.u.getmemlist.max_pfns = max_pfns;
@@ -231,6 +265,7 @@
     }    
 
     ret = do_dom0_op(xc_handle, &op);
+    saved_errno = errno;
 
     (void)munlock(pfn_buf, max_pfns * sizeof(unsigned long));
 
@@ -249,6 +284,8 @@
 #endif
 #endif
 
+    if (ret < 0) errno = saved_errno;
+
     return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
 }
 
@@ -303,33 +340,39 @@
     gzFile kernel_gfd = NULL;
     char *image = NULL;
     unsigned int bytes;
+    int saved_errno = 0;
 
     if ( (kernel_fd = open(filename, O_RDONLY)) < 0 )
     {
+        saved_errno = errno;
         PERROR("Could not open kernel image");
         goto out;
     }
 
     if ( (*size = xc_get_filesz(kernel_fd)) == 0 )
     {
+        saved_errno = errno;
         PERROR("Could not read kernel image");
         goto out;
     }
 
     if ( (kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL )
     {
+        saved_errno = errno;
         PERROR("Could not allocate decompression state for state file");
         goto out;
     }
 
     if ( (image = malloc(*size)) == NULL )
     {
+        saved_errno = errno;
         PERROR("Could not allocate memory for kernel image");
         goto out;
     }
 
     if ( (bytes = gzread(kernel_gfd, image, *size)) != *size )
     {
+        saved_errno = errno;
         PERROR("Error reading kernel image, could not"
                " read the whole image (%d != %ld).", bytes, *size);
         free(image);
@@ -341,6 +384,10 @@
         gzclose(kernel_gfd);
     else if ( kernel_fd >= 0 )
         close(kernel_fd);
+    if (image == NULL) {
+       errno = saved_errno;
+    }
+
     return image;
 }
 
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>