WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 4/4] use xzalloc in x86 code

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 4/4] use xzalloc in x86 code
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Tue, 04 Oct 2011 12:49:36 +0100
Delivery-date: Tue, 04 Oct 2011 04:52:45 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
This includes the removal of a redundant memset() from microcode_amd.c.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -602,10 +602,9 @@ static int cpuidle_init_cpu(int cpu)
     if ( !acpi_power )
     {
         int i;
-        acpi_power = xmalloc(struct acpi_processor_power);
+        acpi_power = xzalloc(struct acpi_processor_power);
         if ( !acpi_power )
             return -ENOMEM;
-        memset(acpi_power, 0, sizeof(*acpi_power));
 
         for ( i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++ )
             acpi_power->states[i].idx = i;
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -552,10 +552,9 @@ acpi_cpufreq_cpu_init(struct cpufreq_pol
     struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
     struct processor_performance *perf;
 
-    data = xmalloc(struct acpi_cpufreq_data);
+    data = xzalloc(struct acpi_cpufreq_data);
     if (!data)
         return -ENOMEM;
-    memset(data, 0, sizeof(struct acpi_cpufreq_data));
 
     cpufreq_drv_data[cpu] = data;
 
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -189,10 +189,9 @@ static int powernow_cpufreq_cpu_init(str
     uint64_t msr_content;
     struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
 
-    data = xmalloc(struct acpi_cpufreq_data);
+    data = xzalloc(struct acpi_cpufreq_data);
     if (!data)
         return -ENOMEM;
-    memset(data, 0, sizeof(struct acpi_cpufreq_data));
 
     cpufreq_drv_data[cpu] = data;
 
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -113,7 +113,7 @@ struct mca_banks *mcabanks_alloc(void)
     if (!mb)
         return NULL;
 
-    mb->bank_map = xmalloc_array(unsigned long,
+    mb->bank_map = xzalloc_array(unsigned long,
                                  BITS_TO_LONGS(nr_mce_banks));
     if (!mb->bank_map)
     {
@@ -122,7 +122,6 @@ struct mca_banks *mcabanks_alloc(void)
     }
 
     mb->num = nr_mce_banks;
-    memset(mb->bank_map, 0, sizeof(long) * BITS_TO_LONGS(nr_mce_banks));
 
     return mb;
 }
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -362,14 +362,13 @@ static struct bank_entry* alloc_bank_ent
 {
     struct bank_entry *entry;
 
-    entry = xmalloc(struct bank_entry);
+    entry = xzalloc(struct bank_entry);
     if ( entry == NULL )
     {
         printk(KERN_ERR "MCE: malloc bank_entry failed\n");
         return NULL;
     }
 
-    memset(entry, 0x0, sizeof(entry));
     INIT_LIST_HEAD(&entry->list);
     return entry;
 }
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -580,11 +580,10 @@ int arch_domain_create(struct domain *d,
 
     if ( !is_idle_domain(d) )
     {
-        d->arch.cpuids = xmalloc_array(cpuid_input_t, MAX_CPUID_INPUT);
+        d->arch.cpuids = xzalloc_array(cpuid_input_t, MAX_CPUID_INPUT);
         rc = -ENOMEM;
         if ( d->arch.cpuids == NULL )
             goto fail;
-        memset(d->arch.cpuids, 0, MAX_CPUID_INPUT * sizeof(*d->arch.cpuids));
         for ( i = 0; i < MAX_CPUID_INPUT; i++ )
         {
             d->arch.cpuids[i].input[0] = XEN_CPUID_INPUT_UNUSED;
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -92,10 +92,9 @@ struct vcpu *__init alloc_dom0_vcpu0(voi
     if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
         opt_dom0_max_vcpus = MAX_VIRT_CPUS;
 
-    dom0->vcpu = xmalloc_array(struct vcpu *, opt_dom0_max_vcpus);
+    dom0->vcpu = xzalloc_array(struct vcpu *, opt_dom0_max_vcpus);
     if ( !dom0->vcpu )
         return NULL;
-    memset(dom0->vcpu, 0, opt_dom0_max_vcpus * sizeof(*dom0->vcpu));
     dom0->max_vcpus = opt_dom0_max_vcpus;
 
     return alloc_vcpu(dom0, 0, 0);
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -365,10 +365,9 @@ static void __init hpet_fsb_cap_lookup(v
     num_chs = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
     num_chs++; /* Value read out starts from 0 */
 
-    hpet_events = xmalloc_array(struct hpet_event_channel, num_chs);
+    hpet_events = xzalloc_array(struct hpet_event_channel, num_chs);
     if ( !hpet_events )
         return;
-    memset(hpet_events, 0, num_chs * sizeof(*hpet_events));
 
     for ( i = 0; i < num_chs; i++ )
     {
@@ -504,10 +503,9 @@ void __init hpet_broadcast_init(void)
             return;
 
         if ( !hpet_events )
-            hpet_events = xmalloc(struct hpet_event_channel);
+            hpet_events = xzalloc(struct hpet_event_channel);
         if ( !hpet_events )
             return;
-        memset(hpet_events, 0, sizeof(*hpet_events));
         hpet_events->irq = -1;
 
         /* Start HPET legacy interrupts */
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -444,17 +444,13 @@ int hvm_domain_initialise(struct domain 
     INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
     spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
 
-    d->arch.hvm_domain.pbuf = xmalloc_array(char, HVM_PBUF_SIZE);
-    d->arch.hvm_domain.params = xmalloc_array(uint64_t, HVM_NR_PARAMS);
+    d->arch.hvm_domain.pbuf = xzalloc_array(char, HVM_PBUF_SIZE);
+    d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
     d->arch.hvm_domain.io_handler = xmalloc(struct hvm_io_handler);
     rc = -ENOMEM;
     if ( !d->arch.hvm_domain.pbuf || !d->arch.hvm_domain.params ||
          !d->arch.hvm_domain.io_handler )
         goto fail0;
-    memset(d->arch.hvm_domain.pbuf, 0,
-           HVM_PBUF_SIZE * sizeof(*d->arch.hvm_domain.pbuf));
-    memset(d->arch.hvm_domain.params, 0,
-           HVM_NR_PARAMS * sizeof(*d->arch.hvm_domain.params));
     d->arch.hvm_domain.io_handler->num_slot = 0;
 
     hvm_init_guest_time(d);
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -202,10 +202,9 @@ int hvm_vcpu_cacheattr_init(struct vcpu 
 
     memset(m, 0, sizeof(*m));
 
-    m->var_ranges = xmalloc_array(struct mtrr_var_range, MTRR_VCNT);
+    m->var_ranges = xzalloc_array(struct mtrr_var_range, MTRR_VCNT);
     if ( m->var_ranges == NULL )
         return -ENOMEM;
-    memset(m->var_ranges, 0, MTRR_VCNT * sizeof(struct mtrr_var_range));
 
     m->mtrr_cap = (1u << 10) | (1u << 8) | MTRR_VCNT;
 
@@ -608,12 +607,10 @@ int32_t hvm_set_mem_pinned_cacheattr(
          !is_hvm_domain(d) )
         return -EINVAL;
 
-    range = xmalloc(struct hvm_mem_pinned_cacheattr_range);
+    range = xzalloc(struct hvm_mem_pinned_cacheattr_range);
     if ( range == NULL )
         return -ENOMEM;
 
-    memset(range, 0, sizeof(*range));
-
     range->start = gfn_start;
     range->end = gfn_end;
     range->type = type;
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -323,8 +323,7 @@ static void amd_vpmu_initialise(struct v
         }
     }
 
-    ctxt = xmalloc_bytes(sizeof(struct amd_vpmu_context));
-
+    ctxt = xzalloc_bytes(sizeof(struct amd_vpmu_context));
     if ( !ctxt )
     {
         gdprintk(XENLOG_WARNING, "Insufficient memory for PMU, "
@@ -333,7 +332,6 @@ static void amd_vpmu_initialise(struct v
         return;
     }
 
-    memset(ctxt, 0, sizeof(struct amd_vpmu_context));
     vpmu->context = (void *)ctxt;
     vpmu->flags |= VPMU_CONTEXT_ALLOCATED;
 }
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -328,19 +328,15 @@ static int core2_vpmu_alloc_resource(str
     vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
                  core2_calc_intial_glb_ctrl_msr());
 
-    pmu_enable = xmalloc_bytes(sizeof(struct core2_pmu_enable) +
-                 (core2_get_pmc_count()-1)*sizeof(char));
+    pmu_enable = xzalloc_bytes(sizeof(struct core2_pmu_enable) +
+                               core2_get_pmc_count() - 1);
     if ( !pmu_enable )
         goto out1;
-    memset(pmu_enable, 0, sizeof(struct core2_pmu_enable) +
-                 (core2_get_pmc_count()-1)*sizeof(char));
 
-    core2_vpmu_cxt = xmalloc_bytes(sizeof(struct core2_vpmu_context) +
+    core2_vpmu_cxt = xzalloc_bytes(sizeof(struct core2_vpmu_context) +
                     (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair));
     if ( !core2_vpmu_cxt )
         goto out2;
-    memset(core2_vpmu_cxt, 0, sizeof(struct core2_vpmu_context) +
-                    (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair));
     core2_vpmu_cxt->pmu_enable = pmu_enable;
     vpmu->context = (void *)core2_vpmu_cxt;
 
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -245,13 +245,12 @@ int vcpu_init_fpu(struct vcpu *v)
         v->arch.fpu_ctxt = &v->arch.xsave_area->fpu_sse;
     else
     {
-        v->arch.fpu_ctxt = _xmalloc(sizeof(v->arch.xsave_area->fpu_sse), 16);
+        v->arch.fpu_ctxt = _xzalloc(sizeof(v->arch.xsave_area->fpu_sse), 16);
         if ( !v->arch.fpu_ctxt )
         {
             rc = -ENOMEM;
             goto done;
         }
-        memset(v->arch.fpu_ctxt, 0, sizeof(v->arch.xsave_area->fpu_sse));
     }
 
 done:
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -1365,8 +1365,7 @@ static void __init enable_IO_APIC(void)
     int i, apic;
 
     /* Initialise dynamic irq_2_pin free list. */
-    irq_2_pin = xmalloc_array(struct irq_pin_list, PIN_MAP_SIZE);
-    memset(irq_2_pin, 0, PIN_MAP_SIZE * sizeof(*irq_2_pin));
+    irq_2_pin = xzalloc_array(struct irq_pin_list, PIN_MAP_SIZE);
         
     for (i = 0; i < PIN_MAP_SIZE; i++)
         irq_2_pin[i].pin = -1;
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -313,17 +313,13 @@ int __init init_irq_data(void)
     for (vector = 0; vector < NR_VECTORS; ++vector)
         this_cpu(vector_irq)[vector] = -1;
 
-    irq_desc = xmalloc_array(struct irq_desc, nr_irqs);
-    irq_cfg = xmalloc_array(struct irq_cfg, nr_irqs);
-    irq_vector = xmalloc_array(u8, nr_irqs_gsi);
+    irq_desc = xzalloc_array(struct irq_desc, nr_irqs);
+    irq_cfg = xzalloc_array(struct irq_cfg, nr_irqs);
+    irq_vector = xzalloc_array(u8, nr_irqs_gsi);
     
     if ( !irq_desc || !irq_cfg ||! irq_vector )
         return -ENOMEM;
 
-    memset(irq_desc, 0,  nr_irqs * sizeof(*irq_desc));
-    memset(irq_cfg, 0,  nr_irqs * sizeof(*irq_cfg));
-    memset(irq_vector, 0, nr_irqs_gsi * sizeof(*irq_vector));
-    
     for (irq = 0; irq < nr_irqs; irq++) {
         desc = irq_to_desc(irq);
         cfg = irq_cfg(irq);
@@ -1097,11 +1093,10 @@ struct pirq *alloc_pirq_struct(struct do
 {
     size_t sz = is_hvm_domain(d) ? sizeof(struct pirq) :
                                    offsetof(struct pirq, arch.hvm);
-    struct pirq *pirq = xmalloc_bytes(sz);
+    struct pirq *pirq = xzalloc_bytes(sz);
 
     if ( pirq )
     {
-        memset(pirq, 0, sz);
         if ( is_hvm_domain(d) )
         {
             pirq->arch.hvm.emuirq = IRQ_UNBOUND;
--- a/xen/arch/x86/microcode_amd.c
+++ b/xen/arch/x86/microcode_amd.c
@@ -231,7 +231,6 @@ static int install_equiv_cpu_table(const
         return -ENOMEM;
     }
 
-    memset(equiv_cpu_table, 0, size);
     memcpy(equiv_cpu_table, (const void *)&buf_pos[3], size);
 
     *offset = size + 12;       /* add header length */
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1922,10 +1922,8 @@ static int shadow_hash_alloc(struct doma
     ASSERT(paging_locked_by_me(d));
     ASSERT(!d->arch.paging.shadow.hash_table);
 
-    table = xmalloc_array(struct page_info *, SHADOW_HASH_BUCKETS);
+    table = xzalloc_array(struct page_info *, SHADOW_HASH_BUCKETS);
     if ( !table ) return 1;
-    memset(table, 0, 
-           SHADOW_HASH_BUCKETS * sizeof (struct page_info *));
     d->arch.paging.shadow.hash_table = table;
     return 0;
 }
@@ -2816,7 +2814,7 @@ static void sh_update_paging_modes(struc
     /* Make sure this vcpu has a virtual TLB array allocated */
     if ( unlikely(!v->arch.paging.vtlb) )
     {
-        v->arch.paging.vtlb = xmalloc_array(struct shadow_vtlb, VTLB_ENTRIES);
+        v->arch.paging.vtlb = xzalloc_array(struct shadow_vtlb, VTLB_ENTRIES);
         if ( unlikely(!v->arch.paging.vtlb) )
         {
             SHADOW_ERROR("Could not allocate vTLB space for dom %u vcpu %u\n",
@@ -2824,8 +2822,6 @@ static void sh_update_paging_modes(struc
             domain_crash(v->domain);
             return;
         }
-        memset(v->arch.paging.vtlb, 0, 
-               VTLB_ENTRIES * sizeof (struct shadow_vtlb));
         spin_lock_init(&v->arch.paging.vtlb_lock);
     }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
@@ -3656,9 +3652,8 @@ int shadow_track_dirty_vram(struct domai
             goto out_dirty_vram;
         memset(dirty_vram->sl1ma, ~0, sizeof(paddr_t) * nr);
 
-        if ( (dirty_vram->dirty_bitmap = xmalloc_array(uint8_t, dirty_size)) 
== NULL )
+        if ( (dirty_vram->dirty_bitmap = xzalloc_array(uint8_t, dirty_size)) 
== NULL )
             goto out_sl1ma;
-        memset(dirty_vram->dirty_bitmap, 0, dirty_size);
 
         dirty_vram->last_dirty = NOW();
 
--- a/xen/arch/x86/oprofile/op_model_ppro.c
+++ b/xen/arch/x86/oprofile/op_model_ppro.c
@@ -226,10 +226,9 @@ static int ppro_allocate_msr(struct vcpu
        struct vpmu_struct *vpmu = vcpu_vpmu(v);
        struct arch_msr_pair *msr_content;
 
-       msr_content = xmalloc_bytes( sizeof(struct arch_msr_pair) * 
num_counters );
+       msr_content = xzalloc_array(struct arch_msr_pair, num_counters);
        if ( !msr_content )
                goto out;
-       memset(msr_content, 0, sizeof(struct arch_msr_pair) * num_counters);
        vpmu->context = (void *)msr_content;
        vpmu->flags = 0;
        vpmu->flags |= PASSIVE_DOMAIN_ALLOCATED;
--- a/xen/arch/x86/x86_64/mmconfig-shared.c
+++ b/xen/arch/x86/x86_64/mmconfig-shared.c
@@ -57,10 +57,9 @@ static const char __init *pci_mmcfg_e752
         pci_mmcfg_config_num = 0;
     else {
         pci_mmcfg_config_num = 1;
-        pci_mmcfg_config = xmalloc(struct acpi_mcfg_allocation);
+        pci_mmcfg_config = xzalloc(struct acpi_mcfg_allocation);
         if (!pci_mmcfg_config)
             return NULL;
-        memset(pci_mmcfg_config, 0, sizeof(pci_mmcfg_config[0]));
         pci_mmcfg_config[0].address = win << 16;
         pci_mmcfg_config[0].pci_segment = 0;
         pci_mmcfg_config[0].start_bus_number = 0;
@@ -111,10 +110,9 @@ static const char __init *pci_mmcfg_inte
         pci_mmcfg_config_num = 0;
 
     if (pci_mmcfg_config_num) {
-        pci_mmcfg_config = xmalloc(struct acpi_mcfg_allocation);
+        pci_mmcfg_config = xzalloc(struct acpi_mcfg_allocation);
         if (!pci_mmcfg_config)
             return NULL;
-        memset(pci_mmcfg_config, 0, sizeof(pci_mmcfg_config[0]));
         pci_mmcfg_config[0].address = pciexbar & mask;
         pci_mmcfg_config[0].pci_segment = 0;
         pci_mmcfg_config[0].start_bus_number = 0;
--- a/xen/arch/x86/x86_64/mmconfig_64.c
+++ b/xen/arch/x86/x86_64/mmconfig_64.c
@@ -171,13 +171,12 @@ int __init pci_mmcfg_arch_init(void)
     if (pci_mmcfg_virt)
         return 0;
 
-    pci_mmcfg_virt = xmalloc_array(struct mmcfg_virt, pci_mmcfg_config_num);
+    pci_mmcfg_virt = xzalloc_array(struct mmcfg_virt, pci_mmcfg_config_num);
     if (pci_mmcfg_virt == NULL) {
         printk(KERN_ERR "PCI: Can not allocate memory for mmconfig 
structures\n");
         pci_mmcfg_config_num = 0;
         return 0;
     }
-    memset(pci_mmcfg_virt, 0, sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num);
 
     for (i = 0; i < pci_mmcfg_config_num; ++i) {
         pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -105,11 +105,10 @@ int xstate_alloc_save_area(struct vcpu *
     BUG_ON(xsave_cntxt_size < XSTATE_AREA_MIN_SIZE);
 
     /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
-    save_area = _xmalloc(xsave_cntxt_size, 64);
+    save_area = _xzalloc(xsave_cntxt_size, 64);
     if ( save_area == NULL )
         return -ENOMEM;
 
-    memset(save_area, 0, xsave_cntxt_size);
     ((u32 *)save_area)[6] = 0x1f80;  /* MXCSR */
     *(uint64_t *)(save_area + 512) = XSTATE_FP_SSE;  /* XSETBV */
 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 4/4] use xzalloc in x86 code, Jan Beulich <=