diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 --- xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 2005-08-16 10:13:14.000000000 -0400 +++ xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 2005-08-16 17:06:07.000000000 -0400 @@ -16,6 +16,10 @@ CONFIG_XEN_BLKDEV_BACKEND=y # CONFIG_XEN_BLKDEV_TAP_BE is not set CONFIG_XEN_BLKDEV_GRANT=y CONFIG_XEN_NETDEV_BACKEND=y +# CONFIG_XEN_TPMDEV_FRONTEND is not set +CONFIG_XEN_TPMDEV_BACKEND=y +# CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS is not set +CONFIG_XEN_TPMDEV_GRANT=y CONFIG_XEN_BLKDEV_FRONTEND=y CONFIG_XEN_NETDEV_FRONTEND=y CONFIG_XEN_NETDEV_GRANT_TX=y @@ -853,7 +857,9 @@ CONFIG_DRM_SIS=m # # TPM devices # -# CONFIG_TCG_TPM is not set +CONFIG_TCG_TPM=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m # # I2C support diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 --- xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 2005-08-16 10:13:14.000000000 -0400 +++ xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 2005-08-16 17:06:07.000000000 -0400 @@ -16,6 +16,10 @@ CONFIG_XEN_BLKDEV_BACKEND=y # CONFIG_XEN_BLKDEV_TAP_BE is not set CONFIG_XEN_BLKDEV_GRANT=y CONFIG_XEN_NETDEV_BACKEND=y +# CONFIG_XEN_TPMDEV_FRONTEND is not set +CONFIG_XEN_TPMDEV_BACKEND=y +# CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS is not set +CONFIG_XEN_TPMDEV_GRANT=y CONFIG_XEN_BLKDEV_FRONTEND=y CONFIG_XEN_NETDEV_FRONTEND=y CONFIG_XEN_NETDEV_GRANT_TX=y @@ -759,7 +763,9 @@ CONFIG_DRM_SIS=m # # TPM devices # -# CONFIG_TCG_TPM is not set +CONFIG_TCG_TPM=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m # # I2C support diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 --- xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 2005-08-15 23:32:29.000000000 -0400 +++ xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 2005-08-16 17:06:07.000000000 -0400 @@ -13,6 +13,10 @@ CONFIG_NO_IDLE_HZ=y # CONFIG_XEN_PRIVILEGED_GUEST is not set # CONFIG_XEN_PHYSDEV_ACCESS is not set CONFIG_XEN_BLKDEV_GRANT=y +CONFIG_XEN_TPMDEV_FRONTEND=y +# CONFIG_XEN_TPMDEV_BACKEND is not set +# CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS is not set +CONFIG_XEN_TPMDEV_GRANT=y CONFIG_XEN_BLKDEV_FRONTEND=y CONFIG_XEN_NETDEV_FRONTEND=y CONFIG_XEN_NETDEV_GRANT_TX=y @@ -337,6 +341,8 @@ CONFIG_NETDEVICES=y CONFIG_UNIX98_PTYS=y CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 +CONFIG_TCG_TPM=y +CONFIG_TCG_XEN=y # # Character devices diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 --- xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 2005-08-16 10:13:14.000000000 -0400 +++ xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 2005-08-16 17:06:07.000000000 -0400 @@ -13,6 +13,10 @@ CONFIG_NO_IDLE_HZ=y # CONFIG_XEN_PRIVILEGED_GUEST is not set # CONFIG_XEN_PHYSDEV_ACCESS is not set CONFIG_XEN_BLKDEV_GRANT=y +CONFIG_XEN_TPMDEV_FRONTEND=y +# CONFIG_XEN_TPMDEV_BACKEND is not set +# CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS is not set +CONFIG_XEN_TPMDEV_GRANT=y CONFIG_XEN_BLKDEV_FRONTEND=y CONFIG_XEN_NETDEV_FRONTEND=y CONFIG_XEN_NETDEV_GRANT_TX=y @@ -660,6 +664,8 @@ CONFIG_NETCONSOLE=m CONFIG_INPUT=m CONFIG_UNIX98_PTYS=y # CONFIG_LEGACY_PTYS is not set +CONFIG_TCG_TPM=y +CONFIG_TCG_XEN=y # # Character devices diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/Kconfig xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/Kconfig --- xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/Kconfig 2005-08-04 12:37:18.000000000 -0400 +++ xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/Kconfig 2005-08-16 17:06:07.000000000 -0400 @@ -79,6 +79,34 @@ config XEN_NETDEV_BACKEND network devices to other guests via a high-performance shared-memory interface. +config XEN_TPMDEV_FRONTEND + bool "TPM-device frontend driver" + default y + help + The TPM-device frontend driver. + +config XEN_TPMDEV_BACKEND + bool "TPM-device backend driver" + default n + help + The TPM-device backend driver + +config XEN_TPMDEV_CLOSE_IF_VTPM_FAILS + bool "TPM backend closes upon vTPM failure" + depends on XEN_TPMDEV_BACKEND + default n + help + The TPM backend closes the channel if the vTPM in userspace indicates + a failure. The corresponding domain's channel will be closed. + Say Y if you want this feature. + +config XEN_TPMDEV_GRANT + bool "TPM devices with grant tables" + depends on XEN_TPMDEV_BACKEND || XEN_TPMDEV_FRONTEND + default n + help + Compile the TPM devices with grant table support + config XEN_BLKDEV_FRONTEND bool "Block-device frontend driver" default y diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/Kconfig.drivers xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/Kconfig.drivers --- xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/Kconfig.drivers 2005-07-19 11:35:16.000000000 -0400 +++ xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/Kconfig.drivers 2005-08-16 17:06:07.000000000 -0400 @@ -49,6 +49,10 @@ source "drivers/infiniband/Kconfig" endif if !XEN_PHYSDEV_ACCESS +source "drivers/char/tpm/Kconfig.domU" +endif + +if !XEN_PHYSDEV_ACCESS menu "Character devices" diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c 2005-08-15 23:32:29.000000000 -0400 +++ xen-unstable.hg/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c 2005-08-16 17:06:07.000000000 -0400 @@ -81,6 +81,14 @@ static void __do_suspend(void) #define netif_resume() do{}while(0) #endif +#ifdef CONFIG_XEN_TPMDEV_FRONTEND + extern void tpmif_suspend(void); + extern void tpmif_resume(void); +#else +#define tpmif_suspend() do{}while(0) +#define tpmif_resume() do{}while(0) +#endif + #ifdef CONFIG_XEN_USB_FRONTEND extern void usbif_resume(); #else @@ -117,6 +125,8 @@ static void __do_suspend(void) kmem_cache_shrink(pgd_cache); #endif + tpmif_suspend(); + netif_suspend(); blkdev_suspend(); @@ -179,6 +189,8 @@ static void __do_suspend(void) blkdev_resume(); netif_resume(); + + tpmif_resume(); usbif_resume(); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig.domU xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig.domU --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig.domU 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig.domU 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,30 @@ +# +# TPM device configuration +# + +menu "TPM devices" + +config TCG_TPM + tristate "TPM Support for XEN" + depends on ARCH_XEN && !XEN_PHYSDEV_ACCESS + ---help--- + If you want to make TPM security available in your system, + say Yes and it will be accessible from within a user domain. For + more information see . + An implementation of the Trusted Software Stack (TSS), the + userspace enablement piece of the specification, can be + obtained at: . To + compile this driver as a module, choose M here; the module + will be called tpm. If unsure, say N. + +config TCG_XEN + tristate "XEN TPM Interface" + depends on TCG_TPM && ARCH_XEN + ---help--- + If you want to make TPM support available to a Xen + user domain, say Yes and it will + be accessible from within Linux. To compile this driver + as a module, choose M here; the module will be called + tpm_xen. + +endmenu diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/Makefile xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/Makefile --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/Makefile 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/Makefile 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,12 @@ +# +# Makefile for the kernel tpm device drivers. +# +ifeq ($(CONFIG_XEN_PHYSDEV_ACCESS),y) +obj-$(CONFIG_TCG_TPM) += tpm.o +obj-$(CONFIG_TCG_NSC) += tpm_nsc.o +obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o +obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o +else +obj-$(CONFIG_TCG_TPM) += tpm_nopci.o +obj-$(CONFIG_TCG_XEN) += tpm_xen.o +endif diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_atmel.c xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_atmel.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_atmel.c 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_atmel.c 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include "tpm.h" + +/* Atmel definitions */ +enum tpm_atmel_addr { + TPM_ATMEL_BASE_ADDR_LO = 0x08, + TPM_ATMEL_BASE_ADDR_HI = 0x09 +}; + +/* write status bits */ +#define ATML_STATUS_ABORT 0x01 +#define ATML_STATUS_LASTBYTE 0x04 + +/* read status bits */ +#define ATML_STATUS_BUSY 0x01 +#define ATML_STATUS_DATA_AVAIL 0x02 +#define ATML_STATUS_REWRITE 0x04 + + +static int tpm_atml_recv(struct tpm_chip *chip, u8 * buf, size_t count) +{ + u8 status, *hdr = buf; + u32 size; + int i; + __be32 *native_size; + + /* start reading header */ + if (count < 6) + return -EIO; + + for (i = 0; i < 6; i++) { + status = inb(chip->vendor->base + 1); + if ((status & ATML_STATUS_DATA_AVAIL) == 0) { + dev_err(&chip->pci_dev->dev, + "error reading header\n"); + return -EIO; + } + *buf++ = inb(chip->vendor->base); + } + + /* size of the data received */ + native_size = (__force __be32 *) (hdr + 2); + size = be32_to_cpu(*native_size); + + if (count < size) { + dev_err(&chip->pci_dev->dev, + "Recv size(%d) less than available space\n", size); + for (; i < size; i++) { /* clear the waiting data anyway */ + status = inb(chip->vendor->base + 1); + if ((status & ATML_STATUS_DATA_AVAIL) == 0) { + dev_err(&chip->pci_dev->dev, + "error reading data\n"); + return -EIO; + } + } + return -EIO; + } + + /* read all the data available */ + for (; i < size; i++) { + status = inb(chip->vendor->base + 1); + if ((status & ATML_STATUS_DATA_AVAIL) == 0) { + dev_err(&chip->pci_dev->dev, + "error reading data\n"); + return -EIO; + } + *buf++ = inb(chip->vendor->base); + } + + /* make sure data available is gone */ + status = inb(chip->vendor->base + 1); + if (status & ATML_STATUS_DATA_AVAIL) { + dev_err(&chip->pci_dev->dev, "data available is stuck\n"); + return -EIO; + } + + return size; +} + +static int tpm_atml_send(struct tpm_chip *chip, u8 * buf, size_t count) +{ + int i; + + dev_dbg(&chip->pci_dev->dev, "tpm_atml_send: "); + for (i = 0; i < count; i++) { + dev_dbg(&chip->pci_dev->dev, "0x%x(%d) ", buf[i], buf[i]); + outb(buf[i], chip->vendor->base); + } + + return count; +} + +static void tpm_atml_cancel(struct tpm_chip *chip) +{ + outb(ATML_STATUS_ABORT, chip->vendor->base + 1); +} + +static struct file_operations atmel_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static struct tpm_vendor_specific tpm_atmel = { + .recv = tpm_atml_recv, + .send = tpm_atml_send, + .cancel = tpm_atml_cancel, + .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL, + .req_complete_val = ATML_STATUS_DATA_AVAIL, + .miscdev = { .fops = &atmel_ops, }, +}; + +static int __devinit tpm_atml_init(struct pci_dev *pci_dev, + const struct pci_device_id *pci_id) +{ + u8 version[4]; + int rc = 0; + int lo, hi; + + if (pci_enable_device(pci_dev)) + return -EIO; + + lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO ); + hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI ); + + tpm_atmel.base = (hi<<8)|lo; + dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base); + + /* verify that it is an Atmel part */ + if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T' + || tpm_read_index(6) != 'M' || tpm_read_index(7) != 'L') { + rc = -ENODEV; + goto out_err; + } + + /* query chip for its version number */ + if ((version[0] = tpm_read_index(0x00)) != 0xFF) { + version[1] = tpm_read_index(0x01); + version[2] = tpm_read_index(0x02); + version[3] = tpm_read_index(0x03); + } else { + dev_info(&pci_dev->dev, "version query failed\n"); + rc = -ENODEV; + goto out_err; + } + + if ((rc = tpm_register_hardware(pci_dev, &tpm_atmel)) < 0) + goto out_err; + + dev_info(&pci_dev->dev, + "Atmel TPM version %d.%d.%d.%d\n", version[0], version[1], + version[2], version[3]); + + return 0; +out_err: + pci_disable_device(pci_dev); + return rc; +} + +static struct pci_device_id tpm_pci_tbl[] __devinitdata = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, tpm_pci_tbl); + +static struct pci_driver atmel_pci_driver = { + .name = "tpm_atmel", + .id_table = tpm_pci_tbl, + .probe = tpm_atml_init, + .remove = __devexit_p(tpm_remove), + .suspend = tpm_pm_suspend, + .resume = tpm_pm_resume, +}; + +static int __init init_atmel(void) +{ + return pci_register_driver(&atmel_pci_driver); +} + +static void __exit cleanup_atmel(void) +{ + pci_unregister_driver(&atmel_pci_driver); +} + +module_init(init_atmel); +module_exit(cleanup_atmel); + +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm.c xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm.c 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm.c 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,627 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * Note, the TPM chip is not interrupt driven (only polling) + * and can have very long timeouts (minutes!). Hence the unusual + * calls to schedule_timeout. + * + */ + +#include +#include +#include +#include "tpm.h" + +#define TPM_MINOR 224 /* officially assigned */ + +#define TPM_BUFSIZE 2048 + +static LIST_HEAD(tpm_chip_list); +static DEFINE_SPINLOCK(driver_lock); +static int dev_mask[32]; + +static void user_reader_timeout(unsigned long ptr) +{ + struct tpm_chip *chip = (struct tpm_chip *) ptr; + + down(&chip->buffer_mutex); + atomic_set(&chip->data_pending, 0); + memset(chip->data_buffer, 0, TPM_BUFSIZE); + up(&chip->buffer_mutex); +} + +void tpm_time_expired(unsigned long ptr) +{ + int *exp = (int *) ptr; + *exp = 1; +} + +EXPORT_SYMBOL_GPL(tpm_time_expired); + +/* + * Internal kernel interface to transmit TPM commands + */ +static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, + size_t bufsiz) +{ + ssize_t len; + u32 count; + __be32 *native_size; + + native_size = (__force __be32 *) (buf + 2); + count = be32_to_cpu(*native_size); + + if (count == 0) + return -ENODATA; + if (count > bufsiz) { + dev_err(&chip->pci_dev->dev, + "invalid count value %x %zx \n", count, bufsiz); + return -E2BIG; + } + + down(&chip->tpm_mutex); + + if ((len = chip->vendor->send(chip, (u8 *) buf, count)) < 0) { + dev_err(&chip->pci_dev->dev, + "tpm_transmit: tpm_send: error %zd\n", len); + return len; + } + + down(&chip->timer_manipulation_mutex); + chip->time_expired = 0; + init_timer(&chip->device_timer); + chip->device_timer.function = tpm_time_expired; + chip->device_timer.expires = jiffies + 2 * 60 * HZ; + chip->device_timer.data = (unsigned long) &chip->time_expired; + add_timer(&chip->device_timer); + up(&chip->timer_manipulation_mutex); + + do { + u8 status = inb(chip->vendor->base + 1); + if ((status & chip->vendor->req_complete_mask) == + chip->vendor->req_complete_val) { + down(&chip->timer_manipulation_mutex); + del_singleshot_timer_sync(&chip->device_timer); + up(&chip->timer_manipulation_mutex); + goto out_recv; + } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(TPM_TIMEOUT); + rmb(); + } while (!chip->time_expired); + + + chip->vendor->cancel(chip); + dev_err(&chip->pci_dev->dev, "Time expired\n"); + up(&chip->tpm_mutex); + return -EIO; + +out_recv: + len = chip->vendor->recv(chip, (u8 *) buf, bufsiz); + if (len < 0) + dev_err(&chip->pci_dev->dev, + "tpm_transmit: tpm_recv: error %zd\n", len); + up(&chip->tpm_mutex); + return len; +} + +#define TPM_DIGEST_SIZE 20 +#define CAP_PCR_RESULT_SIZE 18 +static u8 cap_pcr[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 22, /* length */ + 0, 0, 0, 101, /* TPM_ORD_GetCapability */ + 0, 0, 0, 5, + 0, 0, 0, 4, + 0, 0, 1, 1 +}; + +#define READ_PCR_RESULT_SIZE 30 +static u8 pcrread[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 14, /* length */ + 0, 0, 0, 21, /* TPM_ORD_PcrRead */ + 0, 0, 0, 0 /* PCR index */ +}; + +static ssize_t show_pcrs(struct device *dev, char *buf) +{ + u8 data[READ_PCR_RESULT_SIZE]; + ssize_t len; + int i, j, index, num_pcrs; + char *str = buf; + + struct tpm_chip *chip = + pci_get_drvdata(container_of(dev, struct pci_dev, dev)); + if (chip == NULL) + return -ENODEV; + + memcpy(data, cap_pcr, sizeof(cap_pcr)); + if ((len = tpm_transmit(chip, data, sizeof(data))) + < CAP_PCR_RESULT_SIZE) + return len; + + num_pcrs = be32_to_cpu(*((__force __be32 *) (data + 14))); + + for (i = 0; i < num_pcrs; i++) { + memcpy(data, pcrread, sizeof(pcrread)); + index = cpu_to_be32(i); + memcpy(data + 10, &index, 4); + if ((len = tpm_transmit(chip, data, sizeof(data))) + < READ_PCR_RESULT_SIZE) + return len; + str += sprintf(str, "PCR-%02d: ", i); + for (j = 0; j < TPM_DIGEST_SIZE; j++) + str += sprintf(str, "%02X ", *(data + 10 + j)); + str += sprintf(str, "\n"); + } + return str - buf; +} + +static DEVICE_ATTR(pcrs, S_IRUGO, show_pcrs, NULL); + +#define READ_PUBEK_RESULT_SIZE 314 +static u8 readpubek[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 30, /* length */ + 0, 0, 0, 124, /* TPM_ORD_ReadPubek */ +}; + +static ssize_t show_pubek(struct device *dev, char *buf) +{ + u8 data[READ_PUBEK_RESULT_SIZE]; + ssize_t len; + __be32 *native_val; + int i; + char *str = buf; + + struct tpm_chip *chip = + pci_get_drvdata(container_of(dev, struct pci_dev, dev)); + if (chip == NULL) + return -ENODEV; + + memcpy(data, readpubek, sizeof(readpubek)); + memset(data + sizeof(readpubek), 0, 20); /* zero nonce */ + + if ((len = tpm_transmit(chip, data, sizeof(data))) < + READ_PUBEK_RESULT_SIZE) + return len; + + /* + ignore header 10 bytes + algorithm 32 bits (1 == RSA ) + encscheme 16 bits + sigscheme 16 bits + parameters (RSA 12->bytes: keybit, #primes, expbit) + keylenbytes 32 bits + 256 byte modulus + ignore checksum 20 bytes + */ + + native_val = (__force __be32 *) (data + 34); + + str += + sprintf(str, + "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n" + "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X" + " %02X %02X %02X %02X %02X %02X %02X %02X\n" + "Modulus length: %d\nModulus: \n", + data[10], data[11], data[12], data[13], data[14], + data[15], data[16], data[17], data[22], data[23], + data[24], data[25], data[26], data[27], data[28], + data[29], data[30], data[31], data[32], data[33], + be32_to_cpu(*native_val) + ); + + for (i = 0; i < 256; i++) { + str += sprintf(str, "%02X ", data[i + 39]); + if ((i + 1) % 16 == 0) + str += sprintf(str, "\n"); + } + return str - buf; +} + +static DEVICE_ATTR(pubek, S_IRUGO, show_pubek, NULL); + +#define CAP_VER_RESULT_SIZE 18 +static u8 cap_version[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 18, /* length */ + 0, 0, 0, 101, /* TPM_ORD_GetCapability */ + 0, 0, 0, 6, + 0, 0, 0, 0 +}; + +#define CAP_MANUFACTURER_RESULT_SIZE 18 +static u8 cap_manufacturer[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 22, /* length */ + 0, 0, 0, 101, /* TPM_ORD_GetCapability */ + 0, 0, 0, 5, + 0, 0, 0, 4, + 0, 0, 1, 3 +}; + +static ssize_t show_caps(struct device *dev, char *buf) +{ + u8 data[READ_PUBEK_RESULT_SIZE]; + ssize_t len; + char *str = buf; + + struct tpm_chip *chip = + pci_get_drvdata(container_of(dev, struct pci_dev, dev)); + if (chip == NULL) + return -ENODEV; + + memcpy(data, cap_manufacturer, sizeof(cap_manufacturer)); + + if ((len = tpm_transmit(chip, data, sizeof(data))) < + CAP_MANUFACTURER_RESULT_SIZE) + return len; + + str += sprintf(str, "Manufacturer: 0x%x\n", + be32_to_cpu(*(data + 14))); + + memcpy(data, cap_version, sizeof(cap_version)); + + if ((len = tpm_transmit(chip, data, sizeof(data))) < + CAP_VER_RESULT_SIZE) + return len; + + str += + sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", + (int) data[14], (int) data[15], (int) data[16], + (int) data[17]); + + return str - buf; +} + +static DEVICE_ATTR(caps, S_IRUGO, show_caps, NULL); + +/* + * Device file system interface to the TPM + */ +int tpm_open(struct inode *inode, struct file *file) +{ + int rc = 0, minor = iminor(inode); + struct tpm_chip *chip = NULL, *pos; + + spin_lock(&driver_lock); + + list_for_each_entry(pos, &tpm_chip_list, list) { + if (pos->vendor->miscdev.minor == minor) { + chip = pos; + break; + } + } + + if (chip == NULL) { + rc = -ENODEV; + goto err_out; + } + + if (chip->num_opens) { + dev_dbg(&chip->pci_dev->dev, + "Another process owns this TPM\n"); + rc = -EBUSY; + goto err_out; + } + + chip->num_opens++; + pci_dev_get(chip->pci_dev); + + spin_unlock(&driver_lock); + + chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); + if (chip->data_buffer == NULL) { + chip->num_opens--; + pci_dev_put(chip->pci_dev); + return -ENOMEM; + } + + atomic_set(&chip->data_pending, 0); + + file->private_data = chip; + return 0; + +err_out: + spin_unlock(&driver_lock); + return rc; +} + +EXPORT_SYMBOL_GPL(tpm_open); + +int tpm_release(struct inode *inode, struct file *file) +{ + struct tpm_chip *chip = file->private_data; + + file->private_data = NULL; + + spin_lock(&driver_lock); + chip->num_opens--; + spin_unlock(&driver_lock); + + down(&chip->timer_manipulation_mutex); + if (timer_pending(&chip->user_read_timer)) + del_singleshot_timer_sync(&chip->user_read_timer); + else if (timer_pending(&chip->device_timer)) + del_singleshot_timer_sync(&chip->device_timer); + up(&chip->timer_manipulation_mutex); + + kfree(chip->data_buffer); + atomic_set(&chip->data_pending, 0); + + pci_dev_put(chip->pci_dev); + return 0; +} + +EXPORT_SYMBOL_GPL(tpm_release); + +ssize_t tpm_write(struct file * file, const char __user * buf, + size_t size, loff_t * off) +{ + struct tpm_chip *chip = file->private_data; + int in_size = size, out_size; + + /* cannot perform a write until the read has cleared + either via tpm_read or a user_read_timer timeout */ + while (atomic_read(&chip->data_pending) != 0) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(TPM_TIMEOUT); + } + + down(&chip->buffer_mutex); + + if (in_size > TPM_BUFSIZE) + in_size = TPM_BUFSIZE; + + if (copy_from_user + (chip->data_buffer, (void __user *) buf, in_size)) { + up(&chip->buffer_mutex); + return -EFAULT; + } + + /* atomic tpm command send and result receive */ + out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); + + atomic_set(&chip->data_pending, out_size); + atomic_set(&chip->data_position, 0); + up(&chip->buffer_mutex); + + /* Set a timeout by which the reader must come claim the result */ + down(&chip->timer_manipulation_mutex); + init_timer(&chip->user_read_timer); + chip->user_read_timer.function = user_reader_timeout; + chip->user_read_timer.data = (unsigned long) chip; + chip->user_read_timer.expires = jiffies + (60 * HZ); + add_timer(&chip->user_read_timer); + up(&chip->timer_manipulation_mutex); + + return in_size; +} + +EXPORT_SYMBOL_GPL(tpm_write); + +ssize_t tpm_read(struct file * file, char __user * buf, + size_t size, loff_t * off) +{ + struct tpm_chip *chip = file->private_data; + int ret_size = -ENODATA; + int pos, pending = 0; + + down(&chip->buffer_mutex); + ret_size = atomic_read(&chip->data_pending); + if ( ret_size > 0 ) { /* Result available */ + if (size < ret_size) + ret_size = size; + + pos = atomic_read(&chip->data_position); + + if (copy_to_user((void __user *) buf, + &chip->data_buffer[pos], ret_size)) { + ret_size = -EFAULT; + } else { + pending = atomic_read(&chip->data_pending) - ret_size; + if ( pending ) { + atomic_set( &chip->data_pending, pending ); + atomic_set( &chip->data_position, pos+ret_size ); + } + } + } + up(&chip->buffer_mutex); + + if ( ret_size <= 0 || pending == 0 ) { + atomic_set( &chip->data_pending, 0 ); + down(&chip->timer_manipulation_mutex); + del_singleshot_timer_sync(&chip->user_read_timer); + up(&chip->timer_manipulation_mutex); + } + + return ret_size; +} + +EXPORT_SYMBOL_GPL(tpm_read); + +void __devexit tpm_remove(struct pci_dev *pci_dev) +{ + struct tpm_chip *chip = pci_get_drvdata(pci_dev); + + if (chip == NULL) { + dev_err(&pci_dev->dev, "No device data found\n"); + return; + } + + spin_lock(&driver_lock); + + list_del(&chip->list); + + spin_unlock(&driver_lock); + + pci_set_drvdata(pci_dev, NULL); + misc_deregister(&chip->vendor->miscdev); + + device_remove_file(&pci_dev->dev, &dev_attr_pubek); + device_remove_file(&pci_dev->dev, &dev_attr_pcrs); + device_remove_file(&pci_dev->dev, &dev_attr_caps); + + pci_disable_device(pci_dev); + + dev_mask[chip->dev_num / 32] &= !(1 << (chip->dev_num % 32)); + + kfree(chip); + + pci_dev_put(pci_dev); +} + +EXPORT_SYMBOL_GPL(tpm_remove); + +static u8 savestate[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 10, /* blob length (in bytes) */ + 0, 0, 0, 152 /* TPM_ORD_SaveState */ +}; + +/* + * We are about to suspend. Save the TPM state + * so that it can be restored. + */ +int tpm_pm_suspend(struct pci_dev *pci_dev, pm_message_t pm_state) +{ + struct tpm_chip *chip = pci_get_drvdata(pci_dev); + if (chip == NULL) + return -ENODEV; + + tpm_transmit(chip, savestate, sizeof(savestate)); + return 0; +} + +EXPORT_SYMBOL_GPL(tpm_pm_suspend); + +/* + * Resume from a power safe. The BIOS already restored + * the TPM state. + */ +int tpm_pm_resume(struct pci_dev *pci_dev) +{ + struct tpm_chip *chip = pci_get_drvdata(pci_dev); + + if (chip == NULL) + return -ENODEV; + + return 0; +} + +EXPORT_SYMBOL_GPL(tpm_pm_resume); + +/* + * Called from tpm_.c probe function only for devices + * the driver has determined it should claim. Prior to calling + * this function the specific probe function has called pci_enable_device + * upon errant exit from this function specific probe function should call + * pci_disable_device + */ +int tpm_register_hardware(struct pci_dev *pci_dev, + struct tpm_vendor_specific *entry) +{ + char devname[7]; + struct tpm_chip *chip; + int i, j; + + /* Driver specific per-device data */ + chip = kmalloc(sizeof(*chip), GFP_KERNEL); + if (chip == NULL) + return -ENOMEM; + + memset(chip, 0, sizeof(struct tpm_chip)); + + init_MUTEX(&chip->buffer_mutex); + init_MUTEX(&chip->tpm_mutex); + init_MUTEX(&chip->timer_manipulation_mutex); + INIT_LIST_HEAD(&chip->list); + + chip->vendor = entry; + + chip->dev_num = -1; + + for (i = 0; i < 32; i++) + for (j = 0; j < 8; j++) + if ((dev_mask[i] & (1 << j)) == 0) { + chip->dev_num = i * 32 + j; + dev_mask[i] |= 1 << j; + goto dev_num_search_complete; + } + +dev_num_search_complete: + if (chip->dev_num < 0) { + dev_err(&pci_dev->dev, + "No available tpm device numbers\n"); + kfree(chip); + return -ENODEV; + } else if (chip->dev_num == 0) + chip->vendor->miscdev.minor = TPM_MINOR; + else + chip->vendor->miscdev.minor = MISC_DYNAMIC_MINOR; + + snprintf(devname, sizeof(devname), "%s%d", "tpm", chip->dev_num); + chip->vendor->miscdev.name = devname; + + chip->vendor->miscdev.dev = &(pci_dev->dev); + chip->pci_dev = pci_dev_get(pci_dev); + + if (misc_register(&chip->vendor->miscdev)) { + dev_err(&chip->pci_dev->dev, + "unable to misc_register %s, minor %d\n", + chip->vendor->miscdev.name, + chip->vendor->miscdev.minor); + pci_dev_put(pci_dev); + kfree(chip); + dev_mask[i] &= !(1 << j); + return -ENODEV; + } + + pci_set_drvdata(pci_dev, chip); + + list_add(&chip->list, &tpm_chip_list); + + device_create_file(&pci_dev->dev, &dev_attr_pubek); + device_create_file(&pci_dev->dev, &dev_attr_pcrs); + device_create_file(&pci_dev->dev, &dev_attr_caps); + + return 0; +} + +EXPORT_SYMBOL_GPL(tpm_register_hardware); + +static int __init init_tpm(void) +{ + return 0; +} + +static void __exit cleanup_tpm(void) +{ + +} + +module_init(init_tpm); +module_exit(cleanup_tpm); + +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm.h xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm.h --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm.h 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm.h 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ +#include +#include +#include +#include +#include +#include + +#define TPM_TIMEOUT msecs_to_jiffies(5) + +/* TPM addresses */ +#define TPM_ADDR 0x4E +#define TPM_DATA 0x4F + +struct tpm_chip; + +struct tpm_vendor_specific { + u8 req_complete_mask; + u8 req_complete_val; + u16 base; /* TPM base address */ + + int (*recv) (struct tpm_chip *, u8 *, size_t); + int (*send) (struct tpm_chip *, u8 *, size_t); + void (*cancel) (struct tpm_chip *); + struct miscdevice miscdev; +}; + +struct tpm_chip { + struct pci_dev *pci_dev; /* PCI device stuff */ + + int dev_num; /* /dev/tpm# */ + int num_opens; /* only one allowed */ + int time_expired; + + /* Data passed to and from the tpm via the read/write calls */ + u8 *data_buffer; + atomic_t data_pending; + atomic_t data_position; + struct semaphore buffer_mutex; + + struct timer_list user_read_timer; /* user needs to claim result */ + struct semaphore tpm_mutex; /* tpm is processing */ + struct timer_list device_timer; /* tpm is processing */ + struct semaphore timer_manipulation_mutex; + + struct tpm_vendor_specific *vendor; + + struct list_head list; +}; + +static inline int tpm_read_index(int index) +{ + outb(index, TPM_ADDR); + return inb(TPM_DATA) & 0xFF; +} + +static inline void tpm_write_index(int index, int value) +{ + outb(index, TPM_ADDR); + outb(value & 0xFF, TPM_DATA); +} + +extern void tpm_time_expired(unsigned long); +extern int tpm_register_hardware(struct pci_dev *, + struct tpm_vendor_specific *); +extern int tpm_open(struct inode *, struct file *); +extern int tpm_release(struct inode *, struct file *); +extern ssize_t tpm_write(struct file *, const char __user *, size_t, + loff_t *); +extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *); +extern void __devexit tpm_remove(struct pci_dev *); +extern int tpm_pm_suspend(struct pci_dev *, pm_message_t); +extern int tpm_pm_resume(struct pci_dev *); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.c xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.c 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.c 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,741 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * Note, the TPM chip is not interrupt driven (only polling) + * and can have very long timeouts (minutes!). Hence the unusual + * calls to schedule_timeout. + * + */ + +#include +#include +#include +#include "tpm_nopci.h" + +enum { + TPM_MINOR = 224, /* officially assigned */ + TPM_BUFSIZE = 2048, + TPM_NUM_DEVICES = 256, + TPM_NUM_MASK_ENTRIES = TPM_NUM_DEVICES / (8 * sizeof(int)) +}; + + /* PCI configuration addresses */ +enum { + PCI_GEN_PMCON_1 = 0xA0, + PCI_GEN1_DEC = 0xE4, + PCI_LPC_EN = 0xE6, + PCI_GEN2_DEC = 0xEC +}; + +enum { + TPM_LOCK_REG = 0x0D, + TPM_INTERUPT_REG = 0x0A, + TPM_BASE_ADDR_LO = 0x08, + TPM_BASE_ADDR_HI = 0x09, + TPM_UNLOCK_VALUE = 0x55, + TPM_LOCK_VALUE = 0xAA, + TPM_DISABLE_INTERUPT_VALUE = 0x00 +}; + +static LIST_HEAD(tpm_chip_list); +static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED; +static int dev_mask[32]; + +static void user_reader_timeout(unsigned long ptr) +{ + struct tpm_chip *chip = (struct tpm_chip *) ptr; + + down(&chip->buffer_mutex); + atomic_set(&chip->data_pending, 0); + memset(chip->data_buffer, 0, TPM_BUFSIZE); + up(&chip->buffer_mutex); +} + +void tpm_time_expired(unsigned long ptr) +{ + int *exp = (int *) ptr; + *exp = 1; +} + +EXPORT_SYMBOL_GPL(tpm_time_expired); + + +/* + * This function should be used by other kernel subsystems attempting to use the tpm through the tpm_transmit interface. + * A call to this function will return the chip structure corresponding to the TPM you are looking for that can then be sent with your command to tpm_transmit. + * Passing 0 as the argument corresponds to /dev/tpm0 and thus the first and probably primary TPM on the system. Passing 1 corresponds to /dev/tpm1 and the next TPM discovered. If a TPM with the given chip_num does not exist NULL will be returned. + */ +struct tpm_chip* tpm_chip_lookup(int chip_num) +{ + + struct tpm_chip *pos; + list_for_each_entry(pos, &tpm_chip_list, list) + if (pos->dev_num == chip_num || + chip_num == TPM_ANY_NUM) + return pos; + + return NULL; + +} + +/* + * Internal kernel interface to transmit TPM commands + */ +ssize_t tpm_transmit(struct tpm_chip * chip, const char *buf, + size_t bufsiz) +{ + ssize_t rc; + u32 count; + unsigned long stop; + + count = be32_to_cpu(*((__be32 *) (buf + 2))); + + if (count == 0) + return -ENODATA; + if (count > bufsiz) { + dev_err(chip->dev, + "invalid count value %x %x \n", count, bufsiz); + return -E2BIG; + } + + dev_dbg(chip->dev, "TPM Ordinal: %d\n", + be32_to_cpu(*((__be32 *) (buf + 6)))); + dev_dbg(chip->dev, "Chip Status: %x\n", + inb(chip->vendor->base + 1)); + + down(&chip->tpm_mutex); + + if ((rc = chip->vendor->send(chip, (u8 *) buf, count)) < 0) { + dev_err(chip->dev, + "tpm_transmit: tpm_send: error %d\n", rc); + goto out; + } + + stop = jiffies + 2 * 60 * HZ; + do { + u8 status = chip->vendor->status(chip); + if ((status & chip->vendor->req_complete_mask) == + chip->vendor->req_complete_val) { + goto out_recv; + } + + if ((status == chip->vendor->req_canceled)) { + dev_err(chip->dev, "Operation Canceled\n"); + rc = -ECANCELED; + goto out; + } + + msleep(TPM_TIMEOUT); /* CHECK */ + rmb(); + } + while (time_before(jiffies, stop)); + + + chip->vendor->cancel(chip); + dev_err(chip->dev, "Operation Timed out\n"); + rc = -ETIME; + goto out; + +out_recv: + rc = chip->vendor->recv(chip, (u8 *) buf, bufsiz); + if (rc < 0) + dev_err(chip->dev, + "tpm_transmit: tpm_recv: error %d\n", rc); + atomic_set(&chip->data_position, 0); + +out: + up(&chip->tpm_mutex); + return rc; +} + +EXPORT_SYMBOL_GPL(tpm_transmit); + +#define TPM_DIGEST_SIZE 20 +#define CAP_PCR_RESULT_SIZE 18 +static const u8 cap_pcr[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 22, /* length */ + 0, 0, 0, 101, /* TPM_ORD_GetCapability */ + 0, 0, 0, 5, + 0, 0, 0, 4, + 0, 0, 1, 1 +}; + +#define READ_PCR_RESULT_SIZE 30 +static const u8 pcrread[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 14, /* length */ + 0, 0, 0, 21, /* TPM_ORD_PcrRead */ + 0, 0, 0, 0 /* PCR index */ +}; + +ssize_t tpm_show_pcrs(struct device *dev, char *buf) +{ + u8 data[READ_PCR_RESULT_SIZE]; + ssize_t len; + int i, j, num_pcrs; + __be32 index; + char *str = buf; + + struct tpm_chip *chip = dev_get_drvdata(dev); + if (chip == NULL) + return -ENODEV; + + memcpy(data, cap_pcr, sizeof(cap_pcr)); + if ((len = tpm_transmit(chip, data, sizeof(data))) + < CAP_PCR_RESULT_SIZE) + return len; + + num_pcrs = be32_to_cpu(*((__be32 *) (data + 14))); + + for (i = 0; i < num_pcrs; i++) { + memcpy(data, pcrread, sizeof(pcrread)); + index = cpu_to_be32(i); + memcpy(data + 10, &index, 4); + if ((len = tpm_transmit(chip, data, sizeof(data))) + < READ_PCR_RESULT_SIZE) + return len; + str += sprintf(str, "PCR-%02d: ", i); + for (j = 0; j < TPM_DIGEST_SIZE; j++) + str += sprintf(str, "%02X ", *(data + 10 + j)); + str += sprintf(str, "\n"); + } + return str - buf; +} + +EXPORT_SYMBOL_GPL(tpm_show_pcrs); + +/* + * Return 0 on success. On error pass along error code. + * chip_id Upper 2 bytes equal ANY, HW_ONLY or SW_ONLY + * Lower 2 bytes equal tpm idx # or AN& + * res_buf must fit a TPM_PCR (20 bytes) or NULL if you don't care + */ +int tpm_pcr_read( u32 chip_id, int pcr_idx, u8* res_buf, int res_buf_size ) +{ + u8 data[READ_PCR_RESULT_SIZE]; + int rc; + __be32 index; + int chip_num = chip_id & TPM_CHIP_NUM_MASK; + struct tpm_chip* chip; + + if ( res_buf && res_buf_size < TPM_DIGEST_SIZE ) + return -ENOSPC; + if ( (chip = tpm_chip_lookup( chip_num /*, + chip_id >> TPM_CHIP_TYPE_SHIFT*/ ) ) == NULL ) { + printk("chip %d not found.\n",chip_num); + return -ENODEV; + } + memcpy(data, pcrread, sizeof(pcrread)); + index = cpu_to_be32(pcr_idx); + memcpy(data + 10, &index, 4); + if ((rc = tpm_transmit(chip, data, sizeof(data))) > 0 ) + rc = be32_to_cpu(*((u32*)(data+6))); + + if ( rc == 0 && res_buf ) + memcpy(res_buf, data+10, TPM_DIGEST_SIZE); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_pcr_read); + +#define EXTEND_PCR_SIZE 34 +static const u8 pcrextend[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 34, /* length */ + 0, 0, 0, 20, /* TPM_ORD_Extend */ + 0, 0, 0, 0 /* PCR index */ +}; + +/* + * Return 0 on success. On error pass along error code. + * chip_id Upper 2 bytes equal ANY, HW_ONLY or SW_ONLY + * Lower 2 bytes equal tpm idx # or ANY + */ +int tpm_pcr_extend(u32 chip_id, int pcr_idx, const u8* hash) +{ + u8 data[EXTEND_PCR_SIZE]; + int rc; + __be32 index; + int chip_num = chip_id & TPM_CHIP_NUM_MASK; + struct tpm_chip* chip; + + if ( (chip = tpm_chip_lookup( chip_num /*, + chip_id >> TPM_CHIP_TYPE_SHIFT */)) == NULL ) + return -ENODEV; + + memcpy(data, pcrextend, sizeof(pcrextend)); + index = cpu_to_be32(pcr_idx); + memcpy(data + 10, &index, 4); + memcpy( data + 14, hash, TPM_DIGEST_SIZE ); + if ((rc = tpm_transmit(chip, data, sizeof(data))) > 0 ) + rc = be32_to_cpu(*((u32*)(data+6))); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_pcr_extend); + + + +#define READ_PUBEK_RESULT_SIZE 314 +static const u8 readpubek[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 30, /* length */ + 0, 0, 0, 124, /* TPM_ORD_ReadPubek */ +}; + +ssize_t tpm_show_pubek(struct device *dev, char *buf) +{ + u8 *data; + ssize_t len; + int i, rc; + char *str = buf; + + struct tpm_chip *chip = dev_get_drvdata(dev); + if (chip == NULL) + return -ENODEV; + + data = kmalloc(READ_PUBEK_RESULT_SIZE, GFP_KERNEL); + if (!data) + return -ENOMEM; + + memcpy(data, readpubek, sizeof(readpubek)); + memset(data + sizeof(readpubek), 0, 20); /* zero nonce */ + + if ((len = tpm_transmit(chip, data, READ_PUBEK_RESULT_SIZE)) < + READ_PUBEK_RESULT_SIZE) { + rc = len; + goto out; + } + + /* + ignore header 10 bytes + algorithm 32 bits (1 == RSA ) + encscheme 16 bits + sigscheme 16 bits + parameters (RSA 12->bytes: keybit, #primes, expbit) + keylenbytes 32 bits + 256 byte modulus + ignore checksum 20 bytes + */ + + str += + sprintf(str, + "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n" + "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X" + " %02X %02X %02X %02X %02X %02X %02X %02X\n" + "Modulus length: %d\nModulus: \n", + data[10], data[11], data[12], data[13], data[14], + data[15], data[16], data[17], data[22], data[23], + data[24], data[25], data[26], data[27], data[28], + data[29], data[30], data[31], data[32], data[33], + be32_to_cpu(*((__be32 *) (data + 32)))); + + for (i = 0; i < 256; i++) { + str += sprintf(str, "%02X ", data[i + 39]); + if ((i + 1) % 16 == 0) + str += sprintf(str, "\n"); + } + rc = str - buf; +out: + kfree(data); + return rc; +} + +EXPORT_SYMBOL_GPL(tpm_show_pubek); + +#define CAP_VER_RESULT_SIZE 18 +static const u8 cap_version[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 18, /* length */ + 0, 0, 0, 101, /* TPM_ORD_GetCapability */ + 0, 0, 0, 6, + 0, 0, 0, 0 +}; + +#define CAP_MANUFACTURER_RESULT_SIZE 18 +static const u8 cap_manufacturer[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 22, /* length */ + 0, 0, 0, 101, /* TPM_ORD_GetCapability */ + 0, 0, 0, 5, + 0, 0, 0, 4, + 0, 0, 1, 3 +}; + +ssize_t tpm_show_caps(struct device *dev, char *buf) +{ + u8 data[sizeof(cap_manufacturer)]; + ssize_t len; + char *str = buf; + + struct tpm_chip *chip = dev_get_drvdata(dev); + if (chip == NULL) + return -ENODEV; + + memcpy(data, cap_manufacturer, sizeof(cap_manufacturer)); + + if ((len = tpm_transmit(chip, data, sizeof(data))) < + CAP_MANUFACTURER_RESULT_SIZE) + return len; + + str += sprintf(str, "Manufacturer: 0x%x\n", + be32_to_cpu(*((__be32 *)(data + 14)))); + + memcpy(data, cap_version, sizeof(cap_version)); + + if ((len = tpm_transmit(chip, data, sizeof(data))) < + CAP_VER_RESULT_SIZE) + return len; + + str += + sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", + (int) data[14], (int) data[15], (int) data[16], + (int) data[17]); + + return str - buf; +} + +EXPORT_SYMBOL_GPL(tpm_show_caps); + +ssize_t tpm_store_cancel(struct device * dev, const char *buf, + size_t count) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + if (chip == NULL) + return 0; + + chip->vendor->cancel(chip); + return count; +} + +EXPORT_SYMBOL_GPL(tpm_store_cancel); + +/* + * Device file system interface to the TPM + */ +int tpm_open(struct inode *inode, struct file *file) +{ + int rc = 0, minor = iminor(inode); + struct tpm_chip *chip = NULL, *pos; + + spin_lock(&driver_lock); + + list_for_each_entry(pos, &tpm_chip_list, list) { + if (pos->vendor->miscdev.minor == minor) { + chip = pos; + break; + } + } + + if (chip == NULL) { + rc = -ENODEV; + goto err_out; + } + + if (chip->num_opens) { + dev_dbg(chip->dev, "Another process owns this TPM\n"); + rc = -EBUSY; + goto err_out; + } + + chip->num_opens++; + get_device(chip->dev); + + spin_unlock(&driver_lock); + + chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); + if (chip->data_buffer == NULL) { + chip->num_opens--; + put_device(chip->dev); + return -ENOMEM; + } + + atomic_set(&chip->data_pending, 0); + + file->private_data = chip; + return 0; + +err_out: + spin_unlock(&driver_lock); + return rc; +} + +EXPORT_SYMBOL_GPL(tpm_open); + +int tpm_release(struct inode *inode, struct file *file) +{ + struct tpm_chip *chip = file->private_data; + + spin_lock(&driver_lock); + file->private_data = NULL; + chip->num_opens--; + del_singleshot_timer_sync(&chip->user_read_timer); + atomic_set(&chip->data_pending, 0); + put_device(chip->dev); + kfree(chip->data_buffer); + spin_unlock(&driver_lock); + return 0; +} + +EXPORT_SYMBOL_GPL(tpm_release); + +ssize_t tpm_write(struct file * file, const char __user * buf, + size_t size, loff_t * off) +{ + struct tpm_chip *chip = file->private_data; + int in_size = size, out_size; + + /* cannot perform a write until the read has cleared + either via tpm_read or a user_read_timer timeout */ + while (atomic_read(&chip->data_pending) != 0) + msleep(TPM_TIMEOUT); + + down(&chip->buffer_mutex); + + if (in_size > TPM_BUFSIZE) + in_size = TPM_BUFSIZE; + + if (copy_from_user + (chip->data_buffer, (void __user *) buf, in_size)) { + up(&chip->buffer_mutex); + return -EFAULT; + } + + /* atomic tpm command send and result receive */ + out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); + + atomic_set(&chip->data_pending, out_size); + up(&chip->buffer_mutex); + + /* Set a timeout by which the reader must come claim the result */ + mod_timer(&chip->user_read_timer, jiffies + (60 * HZ)); + + return in_size; +} + +EXPORT_SYMBOL_GPL(tpm_write); + +ssize_t tpm_read(struct file * file, char __user * buf, + size_t size, loff_t * off) +{ + struct tpm_chip *chip = file->private_data; + int ret_size; + + del_singleshot_timer_sync(&chip->user_read_timer); + ret_size = atomic_read(&chip->data_pending); + + if (ret_size > 0) { /* relay data */ + int position = atomic_read(&chip->data_position); + + if (size < ret_size) + ret_size = size; + + down(&chip->buffer_mutex); + + if (copy_to_user((void __user *) buf, + &chip->data_buffer[position], + ret_size)) { + ret_size = -EFAULT; + } else { + int pending = atomic_read(&chip->data_pending) - ret_size; + atomic_set(&chip->data_pending, + pending); + atomic_set(&chip->data_position, + position + ret_size); + } + up(&chip->buffer_mutex); + } + + return ret_size; +} + +EXPORT_SYMBOL_GPL(tpm_read); + +void tpm_remove_hardware(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + int i; + + if (chip == NULL) { + dev_err(dev, "No device data found\n"); + return; + } + + spin_lock(&driver_lock); + + list_del(&chip->list); + + spin_unlock(&driver_lock); + + dev_set_drvdata(dev, NULL); + misc_deregister(&chip->vendor->miscdev); + + for (i = 0; i < TPM_NUM_ATTR; i++) + device_remove_file(dev, &chip->vendor->attr[i]); + + dev_mask[chip->dev_num / TPM_NUM_MASK_ENTRIES] &= + !(1 << (chip->dev_num % TPM_NUM_MASK_ENTRIES)); + + kfree(chip); + + put_device(dev); +} + +EXPORT_SYMBOL_GPL(tpm_remove_hardware); + +static const u8 savestate[] = { + 0, 193, /* TPM_TAG_RQU_COMMAND */ + 0, 0, 0, 10, /* blob length (in bytes) */ + 0, 0, 0, 152 /* TPM_ORD_SaveState */ +}; + +/* + * We are about to suspend. Save the TPM state + * so that it can be restored. + */ +int tpm_pm_suspend(struct pci_dev *pci_dev, u32 pm_state) +{ + struct tpm_chip *chip = pci_get_drvdata(pci_dev); + if (chip == NULL) + return -ENODEV; + + tpm_transmit(chip, savestate, sizeof(savestate)); + return 0; +} + +EXPORT_SYMBOL_GPL(tpm_pm_suspend); + +/* + * Resume from a power safe. The BIOS already restored + * the TPM state. + */ +int tpm_pm_resume(struct pci_dev *pci_dev) +{ + struct tpm_chip *chip = pci_get_drvdata(pci_dev); + + if (chip == NULL) + return -ENODEV; + + return 0; +} + +EXPORT_SYMBOL_GPL(tpm_pm_resume); + +/* + * Called from tpm_.c probe function only for devices + * the driver has determined it should claim. Prior to calling + * this function the specific probe function has called pci_enable_device + * upon errant exit from this function specific probe function should call + * pci_disable_device + */ +int tpm_register_hardware_nopci(struct device *dev, + struct tpm_vendor_specific *entry) +{ + char devname[7]; + struct tpm_chip *chip; + int i, j; + + /* Driver specific per-device data */ + chip = kmalloc(sizeof(*chip), GFP_KERNEL); + if (chip == NULL) + return -ENOMEM; + + memset(chip, 0, sizeof(struct tpm_chip)); + + init_MUTEX(&chip->buffer_mutex); + init_MUTEX(&chip->tpm_mutex); + INIT_LIST_HEAD(&chip->list); + + init_timer(&chip->user_read_timer); + chip->user_read_timer.function = user_reader_timeout; + chip->user_read_timer.data = (unsigned long) chip; + + chip->vendor = entry; + + chip->dev_num = -1; + + for (i = 0; i < TPM_NUM_MASK_ENTRIES; i++) + for (j = 0; j < 8 * sizeof(int); j++) + if ((dev_mask[i] & (1 << j)) == 0) { + chip->dev_num = + i * TPM_NUM_MASK_ENTRIES + j; + dev_mask[i] |= 1 << j; + goto dev_num_search_complete; + } + +dev_num_search_complete: + if (chip->dev_num < 0) { + dev_err(dev, "No available tpm device numbers\n"); + kfree(chip); + return -ENODEV; + } else if (chip->dev_num == 0) + chip->vendor->miscdev.minor = TPM_MINOR; + else + chip->vendor->miscdev.minor = MISC_DYNAMIC_MINOR; + + snprintf(devname, sizeof(devname), "%s%d", "tpm", chip->dev_num); + chip->vendor->miscdev.name = devname; + + chip->vendor->miscdev.dev = dev; + chip->dev = get_device(dev); + + + if (misc_register(&chip->vendor->miscdev)) { + dev_err(chip->dev, + "unable to misc_register %s, minor %d\n", + chip->vendor->miscdev.name, + chip->vendor->miscdev.minor); + put_device(dev); + kfree(chip); + dev_mask[i] &= !(1 << j); + return -ENODEV; + } + + spin_lock(&driver_lock); + + dev_set_drvdata(dev, chip); + + list_add(&chip->list, &tpm_chip_list); + + spin_unlock(&driver_lock); + + for (i = 0; i < TPM_NUM_ATTR; i++) + device_create_file(dev, &chip->vendor->attr[i]); + + return 0; +} + +EXPORT_SYMBOL_GPL(tpm_register_hardware_nopci); + +static int __init init_tpm(void) +{ + return 0; +} + +static void __exit cleanup_tpm(void) +{ + +} + +module_init(init_tpm); +module_exit(cleanup_tpm); + +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.h xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.h --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.h 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.h 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ +#include +#include +#include +#include +#include + +enum { + TPM_TIMEOUT = 5, /* msecs */ + TPM_NUM_ATTR = 4 +}; + +/* TPM addresses */ +enum { + TPM_ADDR = 0x4E, + TPM_DATA = 0x4F +}; + +/* + * Chip num is this value or a valid tpm idx in lower two bytes of chip_id + */ +enum tpm_chip_num { + TPM_ANY_NUM = 0xFFFF, +}; + +#define TPM_CHIP_NUM_MASK 0x0000ffff + +extern ssize_t tpm_show_pubek(struct device *, char *); +extern ssize_t tpm_show_pcrs(struct device *, char *); +extern ssize_t tpm_show_caps(struct device *, char *); +extern ssize_t tpm_store_cancel(struct device *, const char *, size_t); + +#define TPM_DEVICE_ATTRS { \ + __ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL), \ + __ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL), \ + __ATTR(caps, S_IRUGO, tpm_show_caps, NULL), \ + __ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel) } + +struct tpm_chip; + +struct tpm_vendor_specific { + u8 req_complete_mask; + u8 req_complete_val; + u8 req_canceled; + u16 base; /* TPM base address */ + + int (*recv) (struct tpm_chip *, u8 *, size_t); + int (*send) (struct tpm_chip *, u8 *, size_t); + void (*cancel) (struct tpm_chip *); + u8(*status) (struct tpm_chip *); + struct miscdevice miscdev; + struct device_attribute attr[TPM_NUM_ATTR]; +}; + +struct tpm_chip { + struct device *dev; /* PCI device stuff */ + + int dev_num; /* /dev/tpm# */ + int num_opens; /* only one allowed */ + int time_expired; + + /* Data passed to and from the tpm via the read/write calls */ + u8 *data_buffer; + atomic_t data_pending; + atomic_t data_position; + struct semaphore buffer_mutex; + + struct timer_list user_read_timer; /* user needs to claim result */ + struct semaphore tpm_mutex; /* tpm is processing */ + + struct tpm_vendor_specific *vendor; + + struct list_head list; +}; + +static inline int tpm_read_index(int index) +{ + outb(index, TPM_ADDR); + return inb(TPM_DATA) & 0xFF; +} + +static inline void tpm_write_index(int index, int value) +{ + outb(index, TPM_ADDR); + outb(value & 0xFF, TPM_DATA); +} + +extern void tpm_time_expired(unsigned long); +extern int tpm_lpc_bus_init(struct pci_dev *, u16); + +extern int tpm_register_hardware_nopci(struct device *, + struct tpm_vendor_specific *); +extern void tpm_remove_hardware(struct device *); +extern int tpm_open(struct inode *, struct file *); +extern int tpm_release(struct inode *, struct file *); +extern ssize_t tpm_write(struct file *, const char __user *, size_t, + loff_t *); +extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *); +extern int tpm_pcr_extend(u32 chip_id, int pcr_idx, const u8* hash); +extern int tpm_pcr_read( u32 chip_id, int pcr_idx, u8* res_buf, int res_buf_size ); + +extern int tpm_pm_suspend(struct pci_dev *, u32); +extern int tpm_pm_resume(struct pci_dev *); + +/* internal kernel interface */ +extern ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, + size_t bufsiz); +extern struct tpm_chip *tpm_chip_lookup(int chip_num); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nsc.c xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nsc.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nsc.c 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nsc.c 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,377 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include "tpm.h" + +/* National definitions */ +#define TPM_NSC_BASE 0x360 +#define TPM_NSC_IRQ 0x07 +#define TPM_NSC_BASE0_HI 0x60 +#define TPM_NSC_BASE0_LO 0x61 +#define TPM_NSC_BASE1_HI 0x62 +#define TPM_NSC_BASE1_LO 0x63 + +#define NSC_LDN_INDEX 0x07 +#define NSC_SID_INDEX 0x20 +#define NSC_LDC_INDEX 0x30 +#define NSC_DIO_INDEX 0x60 +#define NSC_CIO_INDEX 0x62 +#define NSC_IRQ_INDEX 0x70 +#define NSC_ITS_INDEX 0x71 + +#define NSC_STATUS 0x01 +#define NSC_COMMAND 0x01 +#define NSC_DATA 0x00 + +/* status bits */ +#define NSC_STATUS_OBF 0x01 /* output buffer full */ +#define NSC_STATUS_IBF 0x02 /* input buffer full */ +#define NSC_STATUS_F0 0x04 /* F0 */ +#define NSC_STATUS_A2 0x08 /* A2 */ +#define NSC_STATUS_RDY 0x10 /* ready to receive command */ +#define NSC_STATUS_IBR 0x20 /* ready to receive data */ + +/* command bits */ +#define NSC_COMMAND_NORMAL 0x01 /* normal mode */ +#define NSC_COMMAND_EOC 0x03 +#define NSC_COMMAND_CANCEL 0x22 + +/* + * Wait for a certain status to appear + */ +static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data) +{ + int expired = 0; + struct timer_list status_timer = + TIMER_INITIALIZER(tpm_time_expired, jiffies + 10 * HZ, + (unsigned long) &expired); + + /* status immediately available check */ + *data = inb(chip->vendor->base + NSC_STATUS); + if ((*data & mask) == val) + return 0; + + /* wait for status */ + add_timer(&status_timer); + do { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(TPM_TIMEOUT); + *data = inb(chip->vendor->base + 1); + if ((*data & mask) == val) { + del_singleshot_timer_sync(&status_timer); + return 0; + } + } + while (!expired); + + return -EBUSY; +} + +static int nsc_wait_for_ready(struct tpm_chip *chip) +{ + int status; + int expired = 0; + struct timer_list status_timer = + TIMER_INITIALIZER(tpm_time_expired, jiffies + 100, + (unsigned long) &expired); + + /* status immediately available check */ + status = inb(chip->vendor->base + NSC_STATUS); + if (status & NSC_STATUS_OBF) + status = inb(chip->vendor->base + NSC_DATA); + if (status & NSC_STATUS_RDY) + return 0; + + /* wait for status */ + add_timer(&status_timer); + do { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(TPM_TIMEOUT); + status = inb(chip->vendor->base + NSC_STATUS); + if (status & NSC_STATUS_OBF) + status = inb(chip->vendor->base + NSC_DATA); + if (status & NSC_STATUS_RDY) { + del_singleshot_timer_sync(&status_timer); + return 0; + } + } + while (!expired); + + dev_info(&chip->pci_dev->dev, "wait for ready failed\n"); + return -EBUSY; +} + + +static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) +{ + u8 *buffer = buf; + u8 data, *p; + u32 size; + __be32 *native_size; + + if (count < 6) + return -EIO; + + if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { + dev_err(&chip->pci_dev->dev, "F0 timeout\n"); + return -EIO; + } + if ((data = + inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) { + dev_err(&chip->pci_dev->dev, "not in normal mode (0x%x)\n", + data); + return -EIO; + } + + /* read the whole packet */ + for (p = buffer; p < &buffer[count]; p++) { + if (wait_for_stat + (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { + dev_err(&chip->pci_dev->dev, + "OBF timeout (while reading data)\n"); + return -EIO; + } + if (data & NSC_STATUS_F0) + break; + *p = inb(chip->vendor->base + NSC_DATA); + } + + if ((data & NSC_STATUS_F0) == 0) { + dev_err(&chip->pci_dev->dev, "F0 not set\n"); + return -EIO; + } + if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) { + dev_err(&chip->pci_dev->dev, + "expected end of command(0x%x)\n", data); + return -EIO; + } + + native_size = (__force __be32 *) (buf + 2); + size = be32_to_cpu(*native_size); + + if (count < size) + return -EIO; + + return size; +} + +static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) +{ + u8 data; + int i; + + /* + * If we hit the chip with back to back commands it locks up + * and never set IBF. Hitting it with this "hammer" seems to + * fix it. Not sure why this is needed, we followed the flow + * chart in the manual to the letter. + */ + outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND); + + if (nsc_wait_for_ready(chip) != 0) + return -EIO; + + if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { + dev_err(&chip->pci_dev->dev, "IBF timeout\n"); + return -EIO; + } + + outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND); + if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { + dev_err(&chip->pci_dev->dev, "IBR timeout\n"); + return -EIO; + } + + for (i = 0; i < count; i++) { + if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { + dev_err(&chip->pci_dev->dev, + "IBF timeout (while writing data)\n"); + return -EIO; + } + outb(buf[i], chip->vendor->base + NSC_DATA); + } + + if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { + dev_err(&chip->pci_dev->dev, "IBF timeout\n"); + return -EIO; + } + outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND); + + return count; +} + +static void tpm_nsc_cancel(struct tpm_chip *chip) +{ + outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND); +} + +static struct file_operations nsc_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static struct tpm_vendor_specific tpm_nsc = { + .recv = tpm_nsc_recv, + .send = tpm_nsc_send, + .cancel = tpm_nsc_cancel, + .req_complete_mask = NSC_STATUS_OBF, + .req_complete_val = NSC_STATUS_OBF, + .miscdev = { .fops = &nsc_ops, }, + +}; + +static int __devinit tpm_nsc_init(struct pci_dev *pci_dev, + const struct pci_device_id *pci_id) +{ + int rc = 0; + int lo, hi; + + hi = tpm_read_index(TPM_NSC_BASE0_HI); + lo = tpm_read_index(TPM_NSC_BASE0_LO); + + tpm_nsc.base = (hi<<8) | lo; + + if (pci_enable_device(pci_dev)) + return -EIO; + + /* verify that it is a National part (SID) */ + if (tpm_read_index(NSC_SID_INDEX) != 0xEF) { + rc = -ENODEV; + goto out_err; + } + + dev_dbg(&pci_dev->dev, "NSC TPM detected\n"); + dev_dbg(&pci_dev->dev, + "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n", + tpm_read_index(0x07), tpm_read_index(0x20), + tpm_read_index(0x27)); + dev_dbg(&pci_dev->dev, + "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n", + tpm_read_index(0x21), tpm_read_index(0x25), + tpm_read_index(0x26), tpm_read_index(0x28)); + dev_dbg(&pci_dev->dev, "NSC IO Base0 0x%x\n", + (tpm_read_index(0x60) << 8) | tpm_read_index(0x61)); + dev_dbg(&pci_dev->dev, "NSC IO Base1 0x%x\n", + (tpm_read_index(0x62) << 8) | tpm_read_index(0x63)); + dev_dbg(&pci_dev->dev, "NSC Interrupt number and wakeup 0x%x\n", + tpm_read_index(0x70)); + dev_dbg(&pci_dev->dev, "NSC IRQ type select 0x%x\n", + tpm_read_index(0x71)); + dev_dbg(&pci_dev->dev, + "NSC DMA channel select0 0x%x, select1 0x%x\n", + tpm_read_index(0x74), tpm_read_index(0x75)); + dev_dbg(&pci_dev->dev, + "NSC Config " + "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + tpm_read_index(0xF0), tpm_read_index(0xF1), + tpm_read_index(0xF2), tpm_read_index(0xF3), + tpm_read_index(0xF4), tpm_read_index(0xF5), + tpm_read_index(0xF6), tpm_read_index(0xF7), + tpm_read_index(0xF8), tpm_read_index(0xF9)); + + dev_info(&pci_dev->dev, + "NSC PC21100 TPM revision %d\n", + tpm_read_index(0x27) & 0x1F); + + if (tpm_read_index(NSC_LDC_INDEX) == 0) + dev_info(&pci_dev->dev, ": NSC TPM not active\n"); + + /* select PM channel 1 */ + tpm_write_index(NSC_LDN_INDEX, 0x12); + tpm_read_index(NSC_LDN_INDEX); + + /* disable the DPM module */ + tpm_write_index(NSC_LDC_INDEX, 0); + tpm_read_index(NSC_LDC_INDEX); + + /* set the data register base addresses */ + tpm_write_index(NSC_DIO_INDEX, TPM_NSC_BASE >> 8); + tpm_write_index(NSC_DIO_INDEX + 1, TPM_NSC_BASE); + tpm_read_index(NSC_DIO_INDEX); + tpm_read_index(NSC_DIO_INDEX + 1); + + /* set the command register base addresses */ + tpm_write_index(NSC_CIO_INDEX, (TPM_NSC_BASE + 1) >> 8); + tpm_write_index(NSC_CIO_INDEX + 1, (TPM_NSC_BASE + 1)); + tpm_read_index(NSC_DIO_INDEX); + tpm_read_index(NSC_DIO_INDEX + 1); + + /* set the interrupt number to be used for the host interface */ + tpm_write_index(NSC_IRQ_INDEX, TPM_NSC_IRQ); + tpm_write_index(NSC_ITS_INDEX, 0x00); + tpm_read_index(NSC_IRQ_INDEX); + + /* enable the DPM module */ + tpm_write_index(NSC_LDC_INDEX, 0x01); + tpm_read_index(NSC_LDC_INDEX); + + if ((rc = tpm_register_hardware(pci_dev, &tpm_nsc)) < 0) + goto out_err; + + return 0; + +out_err: + pci_disable_device(pci_dev); + return rc; +} + +static struct pci_device_id tpm_pci_tbl[] __devinitdata = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, tpm_pci_tbl); + +static struct pci_driver nsc_pci_driver = { + .name = "tpm_nsc", + .id_table = tpm_pci_tbl, + .probe = tpm_nsc_init, + .remove = __devexit_p(tpm_remove), + .suspend = tpm_pm_suspend, + .resume = tpm_pm_resume, +}; + +static int __init init_nsc(void) +{ + return pci_register_driver(&nsc_pci_driver); +} + +static void __exit cleanup_nsc(void) +{ + pci_unregister_driver(&nsc_pci_driver); +} + +module_init(init_nsc); +module_exit(cleanup_nsc); + +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver"); +MODULE_VERSION("2.0"); +MODULE_LICENSE("GPL"); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,501 @@ +/* + * Copyright (C) 2004 IBM Corporation + * + * Authors: + * Leendert van Doorn + * Dave Safford + * Reiner Sailer + * Kylene Hall + * Stefan Berger + * + * Maintained by: + * + * Device driver for TCG/TCPA TPM (trusted platform module) for XEN. + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include +#include +#include +#include +#include +#include "tpm_nopci.h" + +/* read status bits */ +enum { + STATUS_BUSY = 0x01, + STATUS_DATA_AVAIL = 0x02, + STATUS_READY = 0x04 +}; + +#define MIN(x,y) ((x) < (y)) ? (x) : (y) + +struct transmission { + struct list_head next; + unsigned char *request; + unsigned int request_len; + unsigned char *rcv_buffer; + unsigned int buffersize; + struct tpm_chip *chip; + unsigned int flags; +}; + +enum { + TRANSMISSION_FLAG_WAS_QUEUED = 0x1 +}; + +struct data_exchange { + struct transmission *current_request; + spinlock_t req_list_lock; + wait_queue_head_t req_wait_queue; + + struct list_head queued_requests; + + struct transmission *current_response; + spinlock_t resp_list_lock; + wait_queue_head_t resp_wait_queue; // processes waiting for responses + + struct transmission *req_cancelled; // if a cancellation was encounterd + + unsigned int fe_status; + unsigned int flags; +}; + +enum { + DATAEX_FLAG_QUEUED_ONLY = 0x1 +}; + +static struct data_exchange dataex; + +/* local function prototypes */ +static void __exit cleanup_xen(void); + + +/* ============================================================= + * Some utility functions + * ============================================================= + */ +static inline struct transmission * +transmission_alloc(void) +{ + struct transmission *t = kmalloc(sizeof(*t), GFP_KERNEL); + if (t) { + memset(t, 0x0, sizeof(*t)); + } + return t; +} + +static inline unsigned char * +transmission_set_buffer(struct transmission *t, + unsigned char *buffer, unsigned int len) +{ + if (NULL != t->request) { + kfree(t->request); + } + t->request = kmalloc(len, GFP_KERNEL); + if (t->request) { + memcpy(t->request, + buffer, + len); + t->request_len = len; + } + return t->request; +} + +static inline void +transmission_free(struct transmission *t) +{ + if (t->request) { + kfree(t->request); + } + if (t->rcv_buffer) { + kfree(t->rcv_buffer); + } + kfree(t); +} + +/* ============================================================= + * Interface with the TPM shared memory driver for XEN + * ============================================================= + */ +static int tpm_recv(const u8 *buffer, size_t count, const void *ptr) +{ + int ret_size = 0; + struct transmission *t, *temp; + + + /* + * The list with requests must contain one request + * only and the element there must be the one that + * was passed to me from the front-end. + */ + if (dataex.current_request != ptr) { + printk("WARNING: The request pointer is different than the pointer " + "the shared memory driver returned to me. %p != %p\n", + dataex.current_request, ptr); + } + + /* + * If the request has been cancelled, just quit here + */ + if (dataex.req_cancelled == (struct transmission *)ptr) { + if (dataex.current_request == dataex.req_cancelled) { + dataex.current_request = NULL; + } + transmission_free(dataex.req_cancelled); + dataex.req_cancelled = NULL; + return 0; + } + + if (NULL != (temp = dataex.current_request)) { + transmission_free(temp); + dataex.current_request = NULL; + } + + t = transmission_alloc(); + if (NULL != t) { + unsigned long flags; + t->rcv_buffer = kmalloc(count, GFP_KERNEL); + if (NULL == t->rcv_buffer) { + transmission_free(t); + return -ENOMEM; + } + t->buffersize = count; + memcpy(t->rcv_buffer, buffer, count); + ret_size = count; + + spin_lock_irqsave(&dataex.resp_list_lock ,flags); + dataex.current_response = t; + spin_unlock_irqrestore(&dataex.resp_list_lock, flags); + wake_up_interruptible(&dataex.resp_wait_queue); + } + return ret_size; +} + + +static void tpm_fe_status(unsigned int flags) +{ + dataex.fe_status = flags; +} + +/* ============================================================= + * Interface with the generic TPM driver + * ============================================================= + */ +static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&dataex.resp_list_lock, flags); + /* + * Check if the previous operation only queued the command + * In this case there won't be a response, so I just + * return from here and reset that flag. In any other + * case I should receive a response from the back-end. + */ + if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) { + dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY; + spin_unlock_irqrestore(&dataex.resp_list_lock, flags); + /* + * a little hack here. The first few measurements + * are queued since there's no way to talk to the + * TPM yet (due to slowness of the control channel) + * So we just make IMA happy by giving it 30 NULL + * bytes back where the most important part is + * that the result code is '0'. + */ + count = MIN(count, 30); + memset(buf, 0x0, count); + return count; + } + /* + * Check whether something is in the responselist and if + * there's nothing in the list wait for something to appear. + */ + + if (NULL == dataex.current_response) { + spin_unlock_irqrestore(&dataex.resp_list_lock, flags); + interruptible_sleep_on_timeout(&dataex.resp_wait_queue, + 1000); + spin_lock_irqsave(&dataex.resp_list_lock ,flags); + } + + if (NULL != dataex.current_response) { + struct transmission *t = dataex.current_response; + dataex.current_response = NULL; + rc = MIN(count, t->buffersize); + memcpy(buf, t->rcv_buffer, rc); + transmission_free(t); + } + + spin_unlock_irqrestore(&dataex.resp_list_lock, flags); + return rc; +} + +static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count) +{ + /* + * We simply pass the packet onto the XEN shared + * memory driver. + */ + unsigned long flags; + int rc; + struct transmission *t = transmission_alloc(); + + spin_lock_irqsave(&dataex.req_list_lock, flags); + /* + * If there's a current request, it must be the + * previous request that has timed out. + */ + if (dataex.current_request != NULL) { + printk("WARNING: Sending although there is a request outstanding.\n" + " Previous request must have timed out.\n"); + transmission_free(dataex.current_request); + dataex.current_request = NULL; + } + + if (t != NULL) { + unsigned int error = 0; + t->rcv_buffer = NULL; + t->buffersize = 0; + t->chip = chip; + + /* + * Queue the packet if the driver below is not + * ready, yet, or there is any packet already + * in the queue. + * If the driver below is ready, unqueue all + * packets first before sending our current + * packet. + * For each unqueued packet, except for the + * last (=current) packet, call the function + * tpm_xen_recv to wait for the response to come + * back. + */ + if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) { + /* + * copy the request into the buffer + */ + if (transmission_set_buffer(t, buf, count) == NULL) { + transmission_free(t); + rc = -ENOMEM; + goto exit; + } + dataex.flags |= DATAEX_FLAG_QUEUED_ONLY; + list_add_tail(&t->next, &dataex.queued_requests); + rc = 0; + } else { + /* + * Check whether there are any packets in the queue + */ + while (!list_empty(&dataex.queued_requests)) { + /* + * Need to dequeue them. + * Read the result into a dummy buffer. + */ + unsigned char buffer[1]; + struct transmission *qt = (struct transmission *) dataex.queued_requests.next; + list_del(&qt->next); + dataex.current_request = qt; + spin_unlock_irqrestore(&dataex.req_list_lock, flags); + + rc = tpm_fe_send(qt->request, + qt->request_len, + qt); + + if (rc < 0) { + spin_lock_irqsave(&dataex.req_list_lock, flags); + if ((qt = dataex.current_request) != NULL) { + /* + * requeue it at the beginning + * of the list + */ + list_add(&qt->next, + &dataex.queued_requests); + } + dataex.current_request = NULL; + error = 1; + break; + } + /* + * After this point qt is not valid anymore! + * It is freed when the front-end is delivering the data + * by calling tpm_recv + */ + + /* + * Try to receive the response now into the provided dummy + * buffer (I don't really care about this response since + * there is no receiver anymore for this response) + */ + rc = tpm_xen_recv(chip, buffer, sizeof(buffer)); + + spin_lock_irqsave(&dataex.req_list_lock, flags); + } + + if (error == 0) { + /* + * Finally, send the current request. + */ + dataex.current_request = t; + /* + * Call the shared memory driver + * Pass to it the buffer with the request, the + * amount of bytes in the request and + * a void * pointer (here: transmission structure) + */ + rc = tpm_fe_send(buf, count, t); + /* + * The generic TPM driver will call + * the function to receive the response. + */ + if (rc < 0) { + dataex.current_request = NULL; + goto queue_it; + } + } else { +queue_it: + if (transmission_set_buffer(t, buf, count) == NULL) { + transmission_free(t); + rc = -ENOMEM; + goto exit; + } + /* + * An error occurred. Don't event try + * to send the current request. Just + * queue it. + */ + dataex.flags |= DATAEX_FLAG_QUEUED_ONLY; + list_add_tail(&t->next, &dataex.queued_requests); + rc = 0; + } + } + } else { + rc = -ENOMEM; + } + +exit: + spin_unlock_irqrestore(&dataex.req_list_lock, flags); + return rc; +} + +static void tpm_xen_cancel(struct tpm_chip *chip) +{ + unsigned long flags; + spin_lock_irqsave(&dataex.resp_list_lock,flags); + + dataex.req_cancelled = dataex.current_request; + + spin_unlock_irqrestore(&dataex.resp_list_lock,flags); +} + +static u8 tpm_xen_status(struct tpm_chip *chip) +{ + unsigned long flags; + u8 rc = 0; + spin_lock_irqsave(&dataex.resp_list_lock, flags); + /* + * Data are available if: + * - there's a current response + * - the last packet was queued only (this is fake, but necessary to + * get the generic TPM layer to call the receive function.) + */ + if (NULL != dataex.current_response || + 0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) { + rc = STATUS_DATA_AVAIL; + } + spin_unlock_irqrestore(&dataex.resp_list_lock, flags); + return rc; +} + +static struct file_operations tpm_xen_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static struct tpm_vendor_specific tpm_xen = { + .recv = tpm_xen_recv, + .send = tpm_xen_send, + .cancel = tpm_xen_cancel, + .status = tpm_xen_status, + .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL, + .req_complete_val = STATUS_DATA_AVAIL, + .req_canceled = STATUS_READY, + .base = 0, + .attr = TPM_DEVICE_ATTRS, + .miscdev.fops = &tpm_xen_ops, +}; + +static struct device tpm_device = { + .bus_id = "vtpm", +}; + +static struct tpmfe_device tpmfe = { + .receive = tpm_recv, + .status = tpm_fe_status, +}; + + +static int __init init_xen(void) +{ + int rc; + + /* + * Register device with the low lever front-end + * driver + */ + if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) { + return rc; + } + + /* + * Register our device with the system. + */ + if ((rc = device_register(&tpm_device)) < 0) { + tpm_fe_unregister_receiver(); + return rc; + } + + if ((rc = tpm_register_hardware_nopci(&tpm_device, &tpm_xen)) < 0) { + device_unregister(&tpm_device); + tpm_fe_unregister_receiver(); + return rc; + } + + dataex.current_request = NULL; + spin_lock_init(&dataex.req_list_lock); + init_waitqueue_head(&dataex.req_wait_queue); + INIT_LIST_HEAD(&dataex.queued_requests); + + dataex.current_response = NULL; + spin_lock_init(&dataex.resp_list_lock); + init_waitqueue_head(&dataex.resp_wait_queue); + + return 0; +} + +static void __exit cleanup_xen(void) +{ + tpm_remove_hardware(&tpm_device); + device_unregister(&tpm_device); + tpm_fe_unregister_receiver(); +} + +fs_initcall(init_xen); +module_exit(cleanup_xen); + +MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)"); +MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL"); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/Makefile xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/Makefile --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/Makefile 2005-08-04 12:37:19.000000000 -0400 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/Makefile 2005-08-16 17:06:07.000000000 -0400 @@ -8,7 +8,9 @@ obj-y += xenbus/ obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/ obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/ +obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/ obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/ +obj-$(CONFIG_XEN_TPMDEV_FRONTEND) += tpmfront/ diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,101 @@ +/****************************************************************************** + * drivers/xen/tpmback/common.h + */ + +#ifndef __NETIF__BACKEND__COMMON_H__ +#define __NETIF__BACKEND__COMMON_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if 0 +#define ASSERT(_p) \ + if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \ + __LINE__, __FILE__); *(int*)0=0; } +#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \ + __FILE__ , __LINE__ , ## _a ) +#else +#define ASSERT(_p) ((void)0) +#define DPRINTK(_f, _a...) ((void)0) +#endif + +typedef struct tpmif_st { + /* Unique identifier for this interface. */ + domid_t domid; + unsigned int handle; + + /* Physical parameters of the comms window. */ + unsigned long tx_shmem_frame; + unsigned long rx_shmem_frame; + unsigned int evtchn; + + /* The shared rings and indexes. */ + tpmif_tx_interface_t *tx; + + /* Miscellaneous private stuff. */ + enum { DISCONNECTED, DISCONNECTING, CONNECTED } status; + int active; + /* + * DISCONNECT response is deferred until pending requests are ack'ed. + * We therefore need to store the id from the original request. + */ + u8 disconnect_rspid; + struct tpmif_st *hash_next; + struct list_head list; /* scheduling list */ + atomic_t refcnt; + + u32 tpm_instance; + unsigned long mmap_vstart; + + struct work_struct work; +#ifdef CONFIG_XEN_TPMDEV_GRANT + u16 shmem_handle; + memory_t shmem_vaddr; + grant_ref_t shmem_ref; +#endif + +} tpmif_t; + +int tpmif_create(ctrl_msg_t *msg); +int tpmif_destroy(ctrl_msg_t * msg); +void tpmif_connect(tpmif_be_connect_t * connect); +int tpmif_disconnect(tpmif_be_disconnect_t * disconnect, u8 rsp_id); +void tpmif_disconnect_complete(tpmif_t * tpmif); +tpmif_t *tpmif_find_by_handle(domid_t domid, unsigned int handle); +int tpmif_vtpm_open(tpmif_t *tpmif, ctrl_msg_t *msg); +int tpmif_vtpm_close(ctrl_msg_t *msg, u32 instid); + +int vtpm_release_packets(tpmif_t * tpmif, int send_msgs); + +#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt)) +#define tpmif_put(_b) \ + do { \ + if ( atomic_dec_and_test(&(_b)->refcnt) ) \ + tpmif_disconnect_complete(_b); \ + } while (0) + +void tpmif_interface_init(void); +void tpmif_ctrlif_init(void); + +void tpmif_schedule_work(tpmif_t * tpmif); +void tpmif_deschedule_work(tpmif_t * tpmif); + +void tpmif_xenbus_init(void); + +irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs); + +extern int num_frontends; + +#define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE)) + + +#endif /* __TPMIF__BACKEND__COMMON_H__ */ diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/control.c xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/control.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/control.c 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/control.c 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,81 @@ +/****************************************************************************** + * drivers/xen/tpmback/control.c + * + * Copyright (c) 2005, IBM Corporation + * + * Author: Stefan Berger, stefanb@us.ibm.com + * + * Routines for interfacing with the control plane. + * + * This code has been derived from drivers/xen/netback/control.c + * Copyright (c) 2004, Keir Fraser + */ + +#include "common.h" + +static void +tpmif_ctrlif_rx(ctrl_msg_t * msg, unsigned long id) +{ + switch (msg->subtype) { + case CMSG_TPMIF_BE_CREATE: + if (msg->length != sizeof (tpmif_be_create_t)) + goto parse_error; + if (0 == tpmif_create(msg)) { + /* delayed control msg delivery */ + return; + } + + break; + case CMSG_TPMIF_BE_DESTROY: + if (msg->length != sizeof (tpmif_be_destroy_t)) + goto parse_error; + if (0 == tpmif_destroy(msg)) { + /* delay control msg delivery */ + return; + } + break; + case CMSG_TPMIF_BE_CONNECT: + if (msg->length != sizeof (tpmif_be_connect_t)) + goto parse_error; + tpmif_connect((tpmif_be_connect_t *) & msg->msg[0]); + break; + case CMSG_TPMIF_BE_DISCONNECT: + if (msg->length != sizeof (tpmif_be_disconnect_t)) + goto parse_error; + if (!tpmif_disconnect + ((tpmif_be_disconnect_t *) & msg->msg[0], msg->id)) + return; /* Sending the response is deferred until later. */ + break; + default: + goto parse_error; + } + + ctrl_if_send_response(msg); + return; + +parse_error: + DPRINTK("Parse error while reading message subtype %d, len %d\n", + msg->subtype, msg->length); + msg->length = 0; + ctrl_if_send_response(msg); +} + +void +tpmif_ctrlif_init(void) +{ + ctrl_msg_t cmsg = { + .type = CMSG_TPMIF_BE, + .subtype = CMSG_TPMIF_BE_DRIVER_STATUS, + .length = sizeof(tpmif_be_driver_status_t), + }; + /* Send a driver-UP notification to the domain controller. */ + tpmif_be_driver_status_t st = { + .status = TPMIF_DRIVER_STATUS_UP, + }; + memcpy(cmsg.msg, &st, sizeof (st)); + + (void)ctrl_if_register_receiver(CMSG_TPMIF_BE, tpmif_ctrlif_rx, + CALLBACK_IN_BLOCKING_CONTEXT); + + ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); +} diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,416 @@ +/****************************************************************************** + * drivers/xen/tpmback/interface.c + * + * Vritual TPM interface management. + * + * Copyright (c) 2005, IBM Corporation + * + * Author: Stefan Berger, stefanb@us.ibm.com + * + * This code has been derived from drivers/xen/netback/interface.c + * Copyright (c) 2004, Keir Fraser + */ + +#include "common.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) +#define VMALLOC_VMADDR(x) ((unsigned long)(x)) +#endif + +#define TPMIF_HASHSZ (2 << 5) +#define TPMIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(TPMIF_HASHSZ-1)) + +static tpmif_t *tpmif_hash[TPMIF_HASHSZ]; +int num_frontends = 0; + +tpmif_t * +tpmif_find_by_handle(domid_t domid, unsigned int handle) +{ + tpmif_t *tpmif = tpmif_hash[TPMIF_HASH(domid, handle)]; + + while ((tpmif != NULL) && + ((tpmif->domid != domid) || (tpmif->handle != handle))) + tpmif = tpmif->hash_next; + return tpmif; +} + +static tpmif_t * +tpmif_find_by_instance(unsigned int instance) +{ + unsigned int index = 0; + tpmif_t *tpmif = NULL; + while (index < TPMIF_HASHSZ) { + tpmif = tpmif_hash[index]; + while ((tpmif != NULL) && (instance != tpmif->tpm_instance)) + tpmif = tpmif->hash_next; + if (tpmif) + break; + index++; + } + return tpmif; +} + +static void +__tpmif_up(tpmif_t * tpmif) +{ + if (tpmif->active == 0) { + tpmif->active = 1; + printk("TPMBE: Activating vIRQ for domain %d.\n", + tpmif->domid); + (void)bind_evtchn_to_irqhandler(tpmif->evtchn, + tpmif_be_int, + 0, + "TPMBE", + tpmif); + } +} + +static void +__tpmif_down(tpmif_t * tpmif) +{ + if (tpmif->active == 1) { + tpmif->active = 0; + printk("TPMBE: Deactivating vIRQ dor domain %d.\n", + tpmif->domid); + tpmif_deschedule_work(tpmif); + } +} + +static void +__tpmif_disconnect_complete(void *arg) +{ + tpmif_t *tpmif = (tpmif_t *) arg; + + ctrl_msg_t cmsg = { + .type = CMSG_TPMIF_BE, + .subtype = CMSG_TPMIF_BE_DISCONNECT, + .id = tpmif->disconnect_rspid, + .length = sizeof (tpmif_be_disconnect_t), + }; + + tpmif_be_disconnect_t disc = { + /* Construct the deferred response message. */ + .domid = tpmif->domid, + .tpmif_handle = tpmif->handle, + .status = TPMIF_BE_STATUS_OKAY, + }; + memcpy(cmsg.msg, &disc, sizeof (disc)); + + unbind_evtchn_from_irqhandler(tpmif->evtchn, tpmif); + vfree(tpmif->tx); /* Frees tpmif->rx as well. */ + +#ifdef CONFIG_XEN_TPMDEV_GRANT + { + /* + * Release the shared memory page. + */ + struct gnttab_unmap_grant_ref op; + DPRINTK("Freeing shared memory page with reference 0x%x, handle 0x%x\n", + tpmif->shmem_ref, + tpmif->shmem_handle); + + op.host_addr = tpmif->shmem_vaddr; + op.handle = tpmif->shmem_handle; + op.dev_bus_addr = 0; + + DPRINTK("TPM: Unmapping reference 0x%x, handle 0x%x, vaddr=0x%lx\n", + tpmif->shmem_ref, + tpmif->shmem_handle, + tpmif->shmem_vaddr); + + BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1) ); + } +#endif + + + /* + * Make sure message is constructed /before/ status change, because + * after the status change the 'tpmif' structure could be deallocated at + * any time. Also make sure we send the response /after/ status change, + * as otherwise a subsequent CONNECT request could spuriously fail if + * another CPU doesn't see the status change yet. + */ + mb(); + if (tpmif->status != DISCONNECTING) + BUG(); + tpmif->status = DISCONNECTED; + mb(); + + /* Send the successful response. */ + ctrl_if_send_response(&cmsg); +} + +void +tpmif_disconnect_complete(tpmif_t * tpmif) +{ + INIT_WORK(&tpmif->work, __tpmif_disconnect_complete, (void *)tpmif); + schedule_work(&tpmif->work); +} + +static void +tpmif_page_release(struct page *page) +{ + /* Ready for next use. */ + set_page_count(page, 1); +} + + +int +tpmif_create(ctrl_msg_t *msg) +{ + int i; + struct page *page; + + tpmif_be_create_t *create = (tpmif_be_create_t *)&msg->msg[0]; + domid_t domid = create->domid; + unsigned int handle = create->tpmif_handle; + unsigned int tpm_instance = create->tpm_instance; + tpmif_t **ptpmif, + *tpmif; + + + /* + * Does a tpm interface for the same instance already + * exist? If yes, then the booting partition that tries + * to connect to the backend won't be able to use the + * backend. + */ + if (NULL != tpmif_find_by_instance(tpm_instance)) { + create->status = TPMIF_BE_STATUS_INTERFACE_EXISTS; + return 1; + } + + tpmif = kmalloc(sizeof (tpmif_t), GFP_KERNEL); + memset(tpmif, 0, sizeof (*tpmif)); + tpmif->domid = domid; + tpmif->handle = handle; + tpmif->status = DISCONNECTED; + tpmif->tpm_instance = tpm_instance; + + if ((tpmif->mmap_vstart = + allocate_empty_lowmem_region(TPMIF_TX_RING_SIZE)) == 0) + BUG(); + + for (i = 0; i < TPMIF_TX_RING_SIZE; i++) { + page = virt_to_page(MMAP_VADDR(tpmif,i)); + set_page_count(page, 1); + SetPageForeign(page, tpmif_page_release); + } + DPRINTK("The tpm_instance is %d\n", tpm_instance); + atomic_set(&tpmif->refcnt, 0); + + ptpmif = &tpmif_hash[TPMIF_HASH(domid, handle)]; + while (*ptpmif != NULL) { + if (((*ptpmif)->domid == domid) && + ((*ptpmif)->handle == handle)) { + DPRINTK("Could not create tpmif: already exists\n"); + create->status = TPMIF_BE_STATUS_INTERFACE_EXISTS; + kfree(tpmif); + return 1; + } + ptpmif = &(*ptpmif)->hash_next; + } + + tpmif->hash_next = *ptpmif; + *ptpmif = tpmif; + num_frontends++; + + create->status = TPMIF_BE_STATUS_OKAY; + + /* + * Send packet to upper layer indicating that a new + * TPM instance should be created. + */ + if (0 == tpmif_vtpm_open(tpmif, msg)) { + /* + * The message was sent/queued successfully. + * Tell caller that the control message + * will be sent later. + */ + return 0; + } + + return 1; +} + +int +tpmif_destroy(ctrl_msg_t * msg) +{ + int rc = 1; + tpmif_be_destroy_t *destroy = (tpmif_be_destroy_t *)&msg->msg[0]; + domid_t domid = destroy->domid; + unsigned int handle = destroy->tpmif_handle; + u32 instid; + tpmif_t **ptpmif; + tpmif_t *tpmif; + + DPRINTK("Request to destroy BE!\n"); + + ptpmif = &tpmif_hash[TPMIF_HASH(domid, handle)]; + while ((tpmif = *ptpmif) != NULL) { + if ((tpmif->domid == domid) && (tpmif->handle == handle)) { + if (tpmif->status != DISCONNECTED) + goto still_connected; + goto destroy; + } + ptpmif = &tpmif->hash_next; + } + + destroy->status = TPMIF_BE_STATUS_INTERFACE_NOT_FOUND; + return rc; + +still_connected: + destroy->status = TPMIF_BE_STATUS_INTERFACE_CONNECTED; + return rc; + +destroy: + instid = tpmif->tpm_instance; + *ptpmif = tpmif->hash_next; + DPRINTK("Freeing tpm interface at %p\n",tpmif); + vtpm_release_packets(tpmif, 0); + kfree(tpmif); + num_frontends--; + destroy->status = TPMIF_BE_STATUS_OKAY; + + if (0 == tpmif_vtpm_close(msg, instid)) { + /* + * The message was sent/queued successfully. + * Tell caller that the control message + * will be sent later on. + */ + rc = 0; + } + return rc; +} + +void +tpmif_connect(tpmif_be_connect_t * connect) +{ + domid_t domid = connect->domid; + unsigned int evtchn = connect->evtchn; + unsigned long shmem_frame = connect->shmem_frame; +#ifdef CONFIG_XEN_TPMDEV_GRANT + int ref = connect->shmem_ref; +#else + pgprot_t prot; + int error; +#endif + unsigned int handle = connect->tpmif_handle; + struct vm_struct *vma; + tpmif_t *tpmif; + + tpmif = tpmif_find_by_handle(domid, handle); + if (unlikely(tpmif == NULL)) { + DPRINTK("tpmif_connect attempted for non-existent tpmif (%u,%u)\n", connect->domid, connect->tpmif_handle); + connect->status = TPMIF_BE_STATUS_INTERFACE_NOT_FOUND; + return; + } + + if (tpmif->status != DISCONNECTED) { + connect->status = TPMIF_BE_STATUS_INTERFACE_CONNECTED; + return; + } + + if ((vma = get_vm_area(1 * PAGE_SIZE, VM_IOREMAP)) == NULL) { + connect->status = TPMIF_BE_STATUS_OUT_OF_MEMORY; + return; + } + +#ifndef CONFIG_XEN_TPMDEV_GRANT + prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | + _PAGE_ACCESSED); + error = direct_remap_area_pages(&init_mm, + VMALLOC_VMADDR(vma->addr), + shmem_frame << PAGE_SHIFT, + PAGE_SIZE, prot, domid); + if (error != 0) { + if (error == -ENOMEM) + connect->status = TPMIF_BE_STATUS_OUT_OF_MEMORY; + else if (error == -EFAULT) + connect->status = TPMIF_BE_STATUS_MAPPING_ERROR; + else + connect->status = TPMIF_BE_STATUS_ERROR; + vfree(vma->addr); + return; + } +#else + { /* Map: Use the Grant table reference */ + struct gnttab_map_grant_ref op; + op.host_addr = VMALLOC_VMADDR(vma->addr); + op.flags = GNTMAP_host_map; + op.ref = ref; + op.dom = domid; + + BUG_ON( (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) ); + + handle = op.handle; + + if(op.handle < 0) { + DPRINTK(" Grant table operation failure !\n"); + connect->status = TPMIF_BE_STATUS_MAPPING_ERROR; + vfree(vma->addr); + return; + } + + tpmif->shmem_ref = ref; + tpmif->shmem_handle = handle; + tpmif->shmem_vaddr = VMALLOC_VMADDR(vma->addr); + + DPRINTK("TPM: Mapped reference 0x%x, handle 0x%x, vaddr=0x%lx\n", + tpmif->shmem_ref, + tpmif->shmem_handle, + tpmif->shmem_vaddr); + } +#endif + + tpmif->evtchn = evtchn; + tpmif->tx_shmem_frame = shmem_frame; + tpmif->status = CONNECTED; + tpmif->tx = (tpmif_tx_interface_t *) vma->addr; + + tpmif_get(tpmif); + + + wmb(); /* Other CPUs see new state before interface is started. */ + + __tpmif_up(tpmif); + + connect->status = TPMIF_BE_STATUS_OKAY; +} + +int +tpmif_disconnect(tpmif_be_disconnect_t * disconnect, u8 rsp_id) +{ + domid_t domid = disconnect->domid; + unsigned int handle = disconnect->tpmif_handle; + tpmif_t *tpmif; + + tpmif = tpmif_find_by_handle(domid, handle); + if (unlikely(tpmif == NULL)) { + DPRINTK("tpmif_disconnect attempted for non-existent tpmif" + " (%u,%u)\n", disconnect->domid, + disconnect->tpmif_handle); + disconnect->status = TPMIF_BE_STATUS_INTERFACE_NOT_FOUND; + return 1; /* Caller will send response error message. */ + } + + if (tpmif->status == CONNECTED) { + tpmif->status = DISCONNECTING; + tpmif->disconnect_rspid = rsp_id; + wmb(); + + __tpmif_down(tpmif); + + tpmif_put(tpmif); + return 0; /* Caller should not send response message. */ + } + + disconnect->status = TPMIF_BE_STATUS_OKAY; + return 1; +} + +void +tpmif_interface_init(void) +{ + memset(tpmif_hash, 0, sizeof (tpmif_hash)); +} diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/Makefile xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/Makefile --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/Makefile 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/Makefile 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,4 @@ + +obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmbk.o + +tpmbk-y += tpmback.o control.o interface.o diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,1170 @@ +/****************************************************************************** + * drivers/xen/tpmback/tpmback.c + * + * Copyright (c) 2005, IBM Corporation + * + * Author: Stefan Berger, stefanb@us.ibm.com + * Grant table support: Mahadevan Gomathisankaran + * + * This code has been derived from drivers/xen/netback/netback.c + * Copyright (c) 2002-2004, K A Fraser + * + */ + +#include "common.h" +#include + +#include +#include +#include +#include +#include + +#ifdef CONFIG_XEN_TPMDEV_GRANT +#include +#endif + +/* ============================================================ + * The file layer for reading data from this device + * ============================================================ + */ + +struct data_exchange { + struct list_head pending_pak; + struct list_head current_pak; + unsigned int copied_so_far; + u8 has_opener; + rwlock_t pak_lock; // protects all of the previous fields + wait_queue_head_t wait_queue; +}; + +struct packet { + struct list_head next; + unsigned int data_len; + u8 *data_buffer; + tpmif_t *tpmif; + u32 tpm_instance; + u8 req_tag; + u32 last_read; + u8 flags; + ctrl_msg_t ctrl_msg; + struct timer_list processing_timer; +}; + +enum { + PACKET_FLAG_DISCARD_RESPONSE = 1, + PACKET_FLAG_SEND_CONTROLMESSAGE = 2 +}; + +static struct data_exchange dataex; + +/* local function prototypes */ +static int vtpm_queue_packet(struct packet *pak); +static int _packet_write(struct packet *pak, + const char *data, size_t size, + int userbuffer); +static void processing_timeout(unsigned long ptr); +static int packet_read_shmem(struct packet *pak, + tpmif_t *tpmif, + u32 offset, + char *buffer, + int isuserbuffer, + u32 left); + + +#define MAX_PENDING_REQS TPMIF_TX_RING_SIZE + +static multicall_entry_t tx_mcl[MAX_PENDING_REQS]; + +#define MIN(x,y) (x) < (y) ? (x) : (y) + + + +static struct packet * +packet_find_instance(struct list_head *head, u32 tpm_instance) +{ + struct packet *pak; + struct list_head *p; + /* + * traverse the list of packets and return the first + * one with the given instance number + */ + list_for_each(p, head) { + pak = list_entry(p, struct packet, next); + if (pak->tpm_instance == tpm_instance) { + return pak; + } + } + return NULL; +} + +static struct packet * +packet_find_packet(struct list_head *head, void *packet) +{ + struct packet *pak; + struct list_head *p; + /* + * traverse the list of packets and return the first + * one with the given instance number + */ + list_for_each(p, head) { + pak = list_entry(p, struct packet, next); + if (pak == packet) { + return pak; + } + } + return NULL; +} + +static struct packet * +packet_alloc(tpmif_t *tpmif, u32 size, u8 req_tag, u8 flags) +{ + struct packet *pak = NULL; + pak = kmalloc(sizeof(struct packet), + GFP_KERNEL); + if (NULL != pak) { + memset(pak, 0x0, sizeof(*pak)); + if (tpmif) { + pak->tpmif = tpmif; + pak->tpm_instance = tpmif->tpm_instance; + } + pak->data_len = size; + pak->req_tag = req_tag; + pak->last_read = 0; + pak->flags = flags; + + /* + * cannot do tpmif_get(tpmif); bad things happen + * on the last tpmif_put() + */ + init_timer(&pak->processing_timer); + pak->processing_timer.function = processing_timeout; + pak->processing_timer.data = (unsigned long)pak; + } + return pak; +} + +static void inline +packet_reset(struct packet *pak) +{ + pak->last_read = 0; +} + +static void inline +packet_free(struct packet *pak) +{ + del_singleshot_timer_sync(&pak->processing_timer); + if (pak->data_buffer) { + kfree(pak->data_buffer); + } + /* + * cannot do tpmif_put(pak->tpmif); bad things happen + * on the last tpmif_put() + */ + kfree(pak); +} + +static int +packet_set(struct packet *pak, + const unsigned char *buffer, u32 size) +{ + int rc = 0; + unsigned char *buf = kmalloc(size, GFP_KERNEL); + if (NULL != buf) { + pak->data_buffer = buf; + memcpy(buf, buffer, size); + pak->data_len = size; + } else { + rc = -ENOMEM; + } + return rc; +} + + +/* + * Write data to the shared memory and send it off. + */ +static int +packet_write(struct packet *pak, + const char *data, size_t size, + int userbuffer) +{ + int rc = 0; + + DPRINTK("Supposed to send %d bytes to front-end!\n", + size); + + if (0 != (pak->flags & PACKET_FLAG_SEND_CONTROLMESSAGE)) { +#ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS + u32 res; + /* + * 2 cases: + * - The upper layer TPM does not know anything about this additonal + * command. An error should be indicated - just send control message + * - The upper layer TPM recognized the command and sent status. + */ + memcpy(&res, &data[2+4], sizeof(res)); + if (res != 0) { + u32 *status_ptr = &((tpmif_be_create_t *) &pak->ctrl_msg.msg[0])->status; + /* error from TPM - modify control message */ + res = TPMIF_BE_STATUS_ERROR; + memcpy(status_ptr, + &res, + sizeof(res)); + } +#endif + ctrl_if_send_response(&pak->ctrl_msg); + } + + + if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) { + /* Don't send a respone to this packet. Just acknowledge it. */ + rc = size; + } else { + rc = _packet_write(pak, data, size, userbuffer); + } + + return rc; +} + + +static int +_packet_write(struct packet *pak, + const char *data, size_t size, + int userbuffer) +{ + /* + * Write into the shared memory pages directly + * and send it to the front end. + */ + tpmif_t *tpmif = pak->tpmif; +#ifdef CONFIG_XEN_TPMDEV_GRANT + u16 handle; +#endif + int rc = 0; + unsigned int i = 0; + unsigned int offset = 0; + multicall_entry_t *mcl; + + if (tpmif == NULL) + return -EFAULT; + + if (tpmif->status != CONNECTED) { + return size; + } + + mcl = tx_mcl; + while (offset < size && i < TPMIF_TX_RING_SIZE) { + unsigned int tocopy; + tpmif_tx_request_t *tx; + + tx = &tpmif->tx->ring[i].req; + + if (0 == tx->addr) { + DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i); + return 0; + } + +#ifdef CONFIG_XEN_TPMDEV_GRANT + { /* Map: Use the Grant table reference */ + struct gnttab_map_grant_ref op; + op.host_addr = MMAP_VADDR(tpmif, i); + op.flags = GNTMAP_host_map; + op.ref = tx->ref; + op.dom = tpmif->domid; + + if(unlikely( + HYPERVISOR_grant_table_op( + GNTTABOP_map_grant_ref, + &op, + 1))) { + BUG(); + } + + handle = op.handle; + + if(op.handle < 0) { + DPRINTK(" Grant table operation failure !\n"); + return 0; + } + + } +#endif /* CONFIG_XEN_TPMDEV_GRANT */ + + /* + * Map that address, unless it has already been mapped + */ + if (tx->mapped == 0) { +#ifndef CONFIG_XEN_TPMDEV_GRANT + multicall_entry_t *mcl = tx_mcl; + + mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain; + mcl[0].args[0] = MMAP_VADDR(tpmif,i); + mcl[0].args[1] = (tx->addr & PAGE_MASK) | __PAGE_KERNEL; + mcl[0].args[2] = 0; + mcl[0].args[3] = tpmif->domid; + mcl++; + if (unlikely + (HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) + != 0)) + BUG(); + + if (unlikely(mcl[0].args[5] != 0)) { + DPRINTK("Bad page frame: %ld\n", + mcl[0].args[5]); + tpmif_put(tpmif); + return 0; + } +#endif /* CONFIG_XEN_TPMDEV_GRANT */ + tx->mapped = 1; + phys_to_machine_mapping[__pa(MMAP_VADDR(tpmif,i)) >> + PAGE_SHIFT] = + FOREIGN_FRAME(tx->addr >> PAGE_SHIFT); + } + + tocopy = size - offset; + if (tocopy > PAGE_SIZE) { + tocopy = PAGE_SIZE; + } + if (userbuffer) { + if (copy_from_user((void *)(MMAP_VADDR(tpmif,i) | (tx->addr & ~PAGE_MASK)), + (void __user *)&data[offset], + tocopy)) { + tpmif_put(tpmif); + return -EFAULT; + } + } else { + memcpy((void *)(MMAP_VADDR(tpmif,i) | + (tx->addr & ~PAGE_MASK)), + &data[offset], tocopy); + } + tx->size = tocopy; + +#ifdef CONFIG_XEN_TPMDEV_GRANT + { /* Unmap: Use the Grant Table ref */ + struct gnttab_unmap_grant_ref op; + op.host_addr = MMAP_VADDR(tpmif, i); + op.handle = handle; + op.dev_bus_addr = 0; + + if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))) { + BUG(); + } + } +#endif /* CONFIG_XEN_TPMDEV_GRANT */ + + offset += tocopy; + i++; + } + + rc = offset; + DPRINTK("Notifying frontend via event channel %d\n", + tpmif->evtchn); + notify_via_evtchn(tpmif->evtchn); + + return rc; +} + +/* + * Read data from the shared memory and copy it directly into the + * provided buffer. Advance the read_last indicator which tells + * how many bytes have already been read. + */ +static int +packet_read(struct packet *pak, size_t numbytes, + char *buffer, size_t buffersize, + int userbuffer) +{ + tpmif_t *tpmif = pak->tpmif; + /* + * I am supposed to read 'numbytes' of data from the + * buffer. + * The first 4 bytes that are read are the instance number in + * network byte order, after that comes the data from the + * shared memory buffer. + */ + u32 to_copy; + u32 offset = 0; + u32 room_left = buffersize; + /* + * Ensure that we see the request when we copy it. + */ + mb(); + + if (pak->last_read < 4) { + /* + * copy the instance number into the buffer + */ + u32 instance_no = htonl(pak->tpm_instance); + u32 last_read = pak->last_read; + to_copy = MIN(4 - last_read, numbytes); + + if (userbuffer) { + if (copy_to_user(&buffer[0], + &(((u8 *)&instance_no)[last_read]), + to_copy)) { + return -EFAULT; + } + } else { + memcpy(&buffer[0], + &(((u8 *)&instance_no)[last_read]), + to_copy); + } + + pak->last_read += to_copy; + offset += to_copy; + room_left -= to_copy; + } + + /* + * If the packet has a data buffer appended, read from it... + */ + + if (room_left > 0) { + if (pak->data_buffer) { + u32 to_copy = MIN(pak->data_len - offset, room_left); + u32 last_read = pak->last_read - 4; + if (userbuffer) { + if (copy_to_user(&buffer[offset], + &pak->data_buffer[last_read], + to_copy)) { + return -EFAULT; + } + } else { + memcpy(&buffer[offset], + &pak->data_buffer[last_read], + to_copy); + } + pak->last_read += to_copy; + offset += to_copy; + } else { + offset = packet_read_shmem(pak, + tpmif, + offset, + buffer, + userbuffer, + room_left); + } + } + return offset; +} + +static int +packet_read_shmem(struct packet *pak, + tpmif_t *tpmif, + u32 offset, + char *buffer, + int isuserbuffer, + u32 room_left) { + u32 last_read = pak->last_read - 4; + u32 i = (last_read / PAGE_SIZE); + u32 pg_offset = last_read & (PAGE_SIZE - 1); + u32 to_copy; +#ifdef CONFIG_XEN_TPMDEV_GRANT + u16 handle; +#endif + tpmif_tx_request_t *tx; + tx = &tpmif->tx->ring[0].req; + /* + * Start copying data at the page with index 'index' + * and within that page at offset 'offset'. + * Copy a maximum of 'room_left' bytes. + */ + to_copy = MIN(PAGE_SIZE - pg_offset, room_left); + while (to_copy > 0) { + void *src; + tx = &tpmif->tx->ring[i].req; + +#ifdef CONFIG_XEN_TPMDEV_GRANT + { /* Map the Grant table reference */ + struct gnttab_map_grant_ref op; + + op.host_addr = MMAP_VADDR(tpmif, i); + op.flags = GNTMAP_host_map; + op.ref = tx->ref; + op.dom = tpmif->domid; + + if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))) { + BUG(); + } + + if (op.handle < 0) { + DPRINTK(" Grant table operation failure !\n"); + return -EFAULT; + } + + handle = op.handle; + + } +#endif /* CONFIG_XEN_TPMDEV_GRANT */ + + if (tx->mapped == 0) { +#ifndef CONFIG_XEN_TPMDEV_GRANT + mcl = tx_mcl; + DPRINTK("Will try to map to %08lX\n", + MMAP_VADDR(tpmif,i) >> PAGE_SHIFT); + mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain; + mcl[0].args[0] = MMAP_VADDR(tpmif,i); + mcl[0].args[1] = (tx->addr & PAGE_MASK) | __PAGE_KERNEL; + mcl[0].args[2] = 0; + mcl[0].args[3] = tpmif->domid; + mcl++; + + if (unlikely + (HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0)) + BUG(); + + if (unlikely(mcl[0].args[5] != 0)) { + DPRINTK("Bad page frame: %ld\n", + mcl[0].args[5]); + tpmif_put(tpmif); + return; + } +#endif /* CONFIG_XEN_TPMDEV_GRANT */ + /* + * Mark as mapped + */ + tx->mapped = 1; + + phys_to_machine_mapping[__pa(MMAP_VADDR(tpmif,i)) >> + PAGE_SHIFT] = + FOREIGN_FRAME(tx->addr >> PAGE_SHIFT); + } + + if (to_copy > tx->size) { + /* + * This is the case when the user wants to read more + * than what we have. So we just give him what we + * have. + */ + to_copy = MIN(tx->size, to_copy); + } + + DPRINTK("Copying from mapped memory at %08lx\n", + (unsigned long)(MMAP_VADDR(tpmif,i) | + (tx->addr & ~PAGE_MASK))); + + src = (void *)(MMAP_VADDR(tpmif,i) | ((tx->addr & ~PAGE_MASK) + pg_offset)); + if (isuserbuffer) { + if (copy_to_user(&buffer[offset], + src, + to_copy)) { + return -EFAULT; + } + } else { + memcpy(&buffer[offset], + src, + to_copy); + } + +#ifdef CONFIG_XEN_TPMDEV_GRANT + + DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n", + tpmif->domid, buffer[offset], buffer[offset+1],buffer[offset+2],buffer[offset+3]); + + { /* Unmap the Grant Table ref */ + struct gnttab_unmap_grant_ref op; + + op.host_addr = MMAP_VADDR(tpmif, i); + op.handle = handle; + op.dev_bus_addr = 0; + + if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))) { + BUG(); + } + + } +#endif /* CONFIG_XEN_TPMDEV_GRANT */ + + offset += to_copy; + pg_offset = 0; + last_read += to_copy; + room_left -= to_copy; + + to_copy = MIN(PAGE_SIZE, room_left); + i++; + } /* while (to_copy > 0) */ + /* + * Adjust the last_read pointer + */ + pak->last_read = last_read + 4; + return offset; +} + + +/* + * The file layer + */ +static int +vtpm_op_open(struct inode *inode, struct file *f) +{ + int rc = 0; + unsigned long flags; + + write_lock_irqsave(&dataex.pak_lock, flags); + if (dataex.has_opener == 0) { + dataex.has_opener = 1; + } else { + rc = -EPERM; + } + write_unlock_irqrestore(&dataex.pak_lock, flags); + return rc; +} + +static ssize_t +vtpm_op_read(struct file *file, + char __user * data, size_t size, loff_t * offset) +{ + int ret_size = -ENODATA; + struct packet *pak = NULL; + unsigned long flags; + + write_lock_irqsave(&dataex.pak_lock, flags); + + if (list_empty(&dataex.pending_pak)) { + write_unlock_irqrestore(&dataex.pak_lock, flags); + wait_event_interruptible(dataex.wait_queue, + !list_empty(&dataex.pending_pak)); + write_lock_irqsave(&dataex.pak_lock, flags); + } + + if (!list_empty(&dataex.pending_pak)) { + unsigned int left; + pak = list_entry(dataex.pending_pak.next, struct packet, next); + + left = pak->data_len - dataex.copied_so_far; + + DPRINTK("size given by app: %d, available: %d\n", size, left); + + ret_size = MIN(size,left); + + ret_size = packet_read(pak, ret_size, data, size, 1); + if (ret_size < 0) { + ret_size = -EFAULT; + } else { + DPRINTK("Copied %d bytes to user buffer\n", ret_size); + + dataex.copied_so_far += ret_size; + if (dataex.copied_so_far >= pak->data_len + 4) { + DPRINTK("All data from this packet given to app.\n"); + /* All data given to app */ + + del_singleshot_timer_sync(&pak->processing_timer); + list_del(&pak->next); + list_add_tail(&pak->next, &dataex.current_pak); + /* + * The more fontends that are handled at the same time, + * the more time we give the TPM to process the request. + */ + mod_timer(&pak->processing_timer, + jiffies + (num_frontends * 10 * HZ)); + dataex.copied_so_far = 0; + } + } + } + write_unlock_irqrestore(&dataex.pak_lock, flags); + + DPRINTK("Returning result from read to app: %d\n", ret_size); + + return ret_size; +} + +/* + * Write operation - only works after a previous read operation! + */ +static ssize_t +vtpm_op_write(struct file *file, const char __user * data, size_t size, + loff_t * offset) +{ + struct packet *pak; + int rc = 0; + unsigned int off = 4; + unsigned long flags; + u32 instance_no = 0; + u32 len_no = 0; + + /* + * Minimum required packet size is: + * 4 bytes for instance number + * 2 bytes for tag + * 4 bytes for paramSize + * 4 bytes for the ordinal + * sum: 14 bytes + */ + if ( size < off + 10 ) { + return -EFAULT; + } + + if (copy_from_user(&instance_no, + (void __user *)&data[0], + 4)) { + return -EFAULT; + } + + if (copy_from_user(&len_no, + (void __user *)&data[off+2], + 4) || + (off + ntohl(len_no) != size)) { + return -EFAULT; + } + + write_lock_irqsave(&dataex.pak_lock, flags); + pak = packet_find_instance(&dataex.current_pak, ntohl(instance_no)); + + if (pak == NULL) { + write_unlock_irqrestore(&dataex.pak_lock, flags); + printk(KERN_ALERT "No associated packet!\n"); + return -EFAULT; + } else { + del_singleshot_timer_sync(&pak->processing_timer); + list_del(&pak->next); + } + + write_unlock_irqrestore(&dataex.pak_lock, flags); + + /* + * The first 'offset' bytes must be the instance number. + * I will just pull that from the packet. + */ + size -= off; + data = &data[off]; + + rc = packet_write(pak, data, size, 1); + + if (rc > 0) { + /* I neglected the first 4 bytes */ + rc += off; + } + packet_free(pak); + return rc; +} + +static int +vtpm_op_release(struct inode *inode, struct file *file) +{ + unsigned long flags; + vtpm_release_packets(NULL, 1); + write_lock_irqsave(&dataex.pak_lock, flags); + dataex.has_opener = 0; + write_unlock_irqrestore(&dataex.pak_lock, flags); + return 0; +} + +static unsigned int +vtpm_op_poll(struct file *file, struct poll_table_struct *pst) +{ + return 0; +} + +static struct file_operations vtpm_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = vtpm_op_open, + .read = vtpm_op_read, + .write = vtpm_op_write, + .release = vtpm_op_release, + .poll = vtpm_op_poll, +}; + +static struct miscdevice ibmvtpms_miscdevice = { + .minor = 225, + .name = "vtpm", + .fops = &vtpm_ops, +}; + +static u8 create_cmd[] = { + 1,193, /* 0: TPM_TAG_RQU_COMMAMD */ + 0,0,0,19, /* 2: length */ + 0,0,0,0x1, /* 6: VTPM_ORD_OPEN */ + 0, /* 10: VTPM type */ + 0,0,0,0, /* 11: domain id */ + 0,0,0,0 /* 15: instance id */ +}; + +static u8 destroy_cmd[] = { + 1,193, /* 0: TPM_TAG_RQU_COMMAMD */ + 0,0,0,14, /* 2: length */ + 0,0,0,0x2, /* 6: VTPM_ORD_CLOSE */ + 0,0,0,0 /* 10: instance id */ +}; + +int +tpmif_vtpm_open(tpmif_t *tpmif, ctrl_msg_t *msg) +{ + int rc = 0; + struct packet *pak = packet_alloc(tpmif, sizeof(create_cmd), create_cmd[0], + PACKET_FLAG_DISCARD_RESPONSE|PACKET_FLAG_SEND_CONTROLMESSAGE); + if (pak) { + tpmif_be_create_t *create = (tpmif_be_create_t *)&msg->msg[0]; + u8 buf[sizeof(create_cmd)]; + u32 domid_no = htonl(create->domid); + u32 instance_no = htonl(create->tpm_instance); + memcpy(buf, create_cmd, sizeof(create_cmd)); + + memcpy(&buf[11], &domid_no, sizeof(u32)); + memcpy(&buf[15], &instance_no, sizeof(u32)); + + /* also copy the contol message */ + memcpy(&pak->ctrl_msg, msg, sizeof(*msg)); + + /* copy the buffer into the packet */ + rc = packet_set(pak, buf, sizeof(buf)); + + if (rc == 0) { + rc = vtpm_queue_packet(pak); + pak->tpm_instance = 0; + } + if (rc < 0) { + /* could not be queued or built */ + packet_free(pak); + } + } else { + rc = -ENOMEM; + } + return rc; +} + +int +tpmif_vtpm_close(ctrl_msg_t *msg, u32 instid) +{ + int rc = 0; + struct packet *pak; + + pak = packet_alloc(NULL, + sizeof(create_cmd), + create_cmd[0], + PACKET_FLAG_DISCARD_RESPONSE|PACKET_FLAG_SEND_CONTROLMESSAGE); + if (pak) { + u8 buf[sizeof(destroy_cmd)]; + u32 instid_no = htonl(instid); + memcpy(buf, destroy_cmd, sizeof(destroy_cmd)); + memcpy(&buf[10], &instid_no, sizeof(u32)); + + /* also copy the contol message */ + memcpy(&pak->ctrl_msg, msg, sizeof(*msg)); + + /* copy the buffer into the packet */ + rc = packet_set(pak, buf, sizeof(buf)); + + if (rc == 0) { + rc = vtpm_queue_packet(pak); + pak->tpm_instance = 0; + } + if (rc < 0) { + /* could not be queued or built */ + packet_free(pak); + } + } else { + rc = -ENOMEM; + } + return rc; +} + + +static int +tpm_send_fail_message(struct packet *pak, u8 req_tag) +{ + int rc; + static const unsigned char tpm_error_message_fail[] = { + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */ + }; + unsigned char buffer[sizeof(tpm_error_message_fail)]; + + memcpy(buffer, tpm_error_message_fail, sizeof(tpm_error_message_fail)); + /* + * Insert the right response tag depending on the given tag + * All response tags are '+3' to the request tag. + */ + buffer[1] = req_tag + 3; + + /* + * Write the data to shared memory and notify the front-end + */ + rc = packet_write(pak, buffer, sizeof(buffer), 0); + + return rc; +} + +static void +_vtpm_release_packets(struct list_head *head, tpmif_t *tpmif, int send_msgs) +{ + struct packet *pak; + struct list_head *pos, *tmp; + + list_for_each_safe(pos, tmp, head) { + pak = list_entry(pos, struct packet, next); + if (tpmif == NULL || pak->tpmif == tpmif) { + int can_send = 0; + del_singleshot_timer_sync(&pak->processing_timer); + list_del(&pak->next); + + if (pak->tpmif && pak->tpmif->status == CONNECTED) { + can_send = 1; + } + + if (send_msgs && can_send) { + tpm_send_fail_message(pak, pak->req_tag); + } + packet_free(pak); + } + } +} + +int +vtpm_release_packets(tpmif_t *tpmif, int send_msgs) +{ + unsigned long flags; + + write_lock_irqsave(&dataex.pak_lock, flags); + + _vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs); + _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs); + + write_unlock_irqrestore(&dataex.pak_lock, + flags); + return 0; +} + +static int +vtpm_queue_packet(struct packet *pak) +{ + int rc = 0; + if (dataex.has_opener) { + unsigned long flags; + write_lock_irqsave(&dataex.pak_lock, flags); + list_add_tail(&pak->next, &dataex.pending_pak); + /* give the TPM some time to pick up the request */ + mod_timer(&pak->processing_timer, jiffies + (10 * HZ)); + write_unlock_irqrestore(&dataex.pak_lock, + flags); + + wake_up_interruptible(&dataex.wait_queue); + } else { + rc = -EFAULT; + } + return rc; +} + +static int +vtpm_receive(tpmif_t *tpmif, u32 size) +{ + int rc = 0; + unsigned char buffer[10]; + __be32 *native_size; + + struct packet *pak = packet_alloc(tpmif, size, buffer[4], 0); + if (NULL == pak) { + return -ENOMEM; + } + /* + * Read 10 bytes from the received buffer to test its + * content for validity. + */ + if (sizeof(buffer) != packet_read(pak, sizeof(buffer), buffer, sizeof(buffer), 0)) { + goto failexit; + } + /* + * Reset the packet read pointer so we can read all its + * contents again. + */ + packet_reset(pak); + + native_size = (__force __be32 *)(&buffer[4+2]); + /* + * Verify that the size of the packet is correct + * as indicated and that there's actually someone reading packets. + * The minimum size of the packet is '10' for tag, size indicator + * and ordinal. + */ + if (size < 10 || + be32_to_cpu(*native_size) != size || + 0 == dataex.has_opener) { + rc = -EINVAL; + goto failexit; + } else { + if ((rc = vtpm_queue_packet(pak)) < 0) { + goto failexit; + } + } + + return 0; + +failexit: + if (pak) { + tpm_send_fail_message(pak, buffer[4+1]); + packet_free(pak); + } + return rc; +} + + +/* + * Time function that gets invoked when a packet has not been processed + * during the timeout period. + * The packet must be on a list when this function is invoked. This + * also means that once its taken off a list, the timer must be + * destroyed as well. + */ +static void processing_timeout(unsigned long ptr) +{ + struct packet *pak = (struct packet *)ptr; + unsigned long flags; + write_lock_irqsave(&dataex.pak_lock, flags); + /* + * The packet needs to be searched whether it + * is still on the list. + */ + if (pak == packet_find_packet(&dataex.pending_pak, pak) || + pak == packet_find_packet(&dataex.current_pak, pak) ) { + list_del(&pak->next); + tpm_send_fail_message(pak, pak->req_tag); + packet_free(pak); + } + + write_unlock_irqrestore(&dataex.pak_lock, flags); +} + + + +static void tpm_tx_action(unsigned long unused); +static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0); + +#define MAX_PENDING_REQS TPMIF_TX_RING_SIZE + +static multicall_entry_t tx_mcl[MAX_PENDING_REQS]; + +static struct list_head tpm_schedule_list; +static spinlock_t tpm_schedule_list_lock; + +static inline void +maybe_schedule_tx_action(void) +{ + smp_mb(); + tasklet_schedule(&tpm_tx_tasklet); +} + +static int +__on_tpm_schedule_list(tpmif_t * tpmif) +{ + return tpmif->list.next != NULL; +} + +static void +remove_from_tpm_schedule_list(tpmif_t * tpmif) +{ + spin_lock_irq(&tpm_schedule_list_lock); + if (likely(__on_tpm_schedule_list(tpmif))) { + list_del(&tpmif->list); + tpmif->list.next = NULL; + tpmif_put(tpmif); + } + spin_unlock_irq(&tpm_schedule_list_lock); +} + +static void +add_to_tpm_schedule_list_tail(tpmif_t * tpmif) +{ + if (__on_tpm_schedule_list(tpmif)) + return; + + spin_lock_irq(&tpm_schedule_list_lock); + if (!__on_tpm_schedule_list(tpmif) && tpmif->active) { + list_add_tail(&tpmif->list, &tpm_schedule_list); + tpmif_get(tpmif); + } + spin_unlock_irq(&tpm_schedule_list_lock); +} + +void +tpmif_schedule_work(tpmif_t * tpmif) +{ + add_to_tpm_schedule_list_tail(tpmif); + maybe_schedule_tx_action(); +} + +void +tpmif_deschedule_work(tpmif_t * tpmif) +{ + remove_from_tpm_schedule_list(tpmif); +} + + +static void +tpm_tx_action(unsigned long unused) +{ + struct list_head *ent; + tpmif_t *tpmif; + tpmif_tx_request_t *tx; + + DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__); + + while (!list_empty(&tpm_schedule_list)) { + /* Get a tpmif from the list with work to do. */ + ent = tpm_schedule_list.next; + tpmif = list_entry(ent, tpmif_t, list); + tpmif_get(tpmif); + remove_from_tpm_schedule_list(tpmif); + /* + * Ensure that we see the request when we read from it. + */ + mb(); + + tx = &tpmif->tx->ring[0].req; + + /* pass it up */ + vtpm_receive(tpmif, tx->size); + + tpmif_put(tpmif); + } +} + +irqreturn_t +tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs) +{ + tpmif_t *tpmif = dev_id; + add_to_tpm_schedule_list_tail(tpmif); + maybe_schedule_tx_action(); + return IRQ_HANDLED; +} + +static int __init +tpmback_init(void) +{ + int rc; + if (!(xen_start_info.flags & SIF_TPM_BE_DOMAIN) && + !(xen_start_info.flags & SIF_INITDOMAIN)) { + printk(KERN_ALERT "Neither TPM-BE Domain nor INIT domain!\n"); + return 0; + } + + if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) { + printk(KERN_ALERT "Could not register misc device for TPM BE.\n"); + return rc; + } + + INIT_LIST_HEAD(&dataex.pending_pak); + INIT_LIST_HEAD(&dataex.current_pak); + dataex.has_opener = 0; + rwlock_init(&dataex.pak_lock); + init_waitqueue_head(&dataex.wait_queue); + + tpmif_interface_init(); + + spin_lock_init(&tpm_schedule_list_lock); + INIT_LIST_HEAD(&tpm_schedule_list); + + tpmif_ctrlif_init(); + +// tpmif_xenbus_init(); + + printk(KERN_ALERT "Successfully initialized TPM backend driver.\n"); +#ifdef CONFIG_XEN_TPMDEV_GRANT + printk(KERN_ALERT "TPM backend is using grant tables.\n"); +#endif + + return 0; +} + +__initcall(tpmback_init); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,2 @@ + +obj-$(CONFIG_XEN_TPMDEV_FRONTEND) += tpmfront.o diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c --- xen-unstable.hg.orig/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c 2005-08-16 17:07:18.000000000 -0400 @@ -0,0 +1,1016 @@ +/* + * Copyright (c) 2005, IBM Corporation + * + * Author: Stefan Berger, stefanb@us.ibm.com + * Grant table support: Mahadevan Gomathisankaran + * + * This code has been derived from drivers/xen/netfront/netfront.c + * + * Copyright (c) 2002-2004, K A Fraser + * + * This file may be distributed separately from the Linux kernel, or + * incorporated into other software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_XEN_TPMDEV_GRANT +#include +#endif + +#undef DEBUG + +#if 1 +#define ASSERT(_p) \ + if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \ + __LINE__, __FILE__); *(int*)0=0; } +#else +#define ASSERT(_p) +#endif + +static struct list_head dev_list; + +static void tpmif_rx_action(unsigned long unused); + +static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0); + +struct tx_buffer { + unsigned int size; // available space in data + unsigned int len; // used space in data + unsigned char *data; // pointer to a page +}; + +#ifdef CONFIG_XEN_TPMDEV_GRANT +static grant_ref_t gref_head, gref_terminal; +#endif + +static inline int +tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len, int userbuffer) +{ + int copied = len; + + if (len > txb->size) { + copied = txb->size; + } + if (userbuffer) { + if (copy_from_user(txb->data, + src, + copied)) { + return -EFAULT; + } + } else { + memcpy(txb->data, src, copied); + } + txb->len = len; + return copied; +} + +static inline struct tx_buffer * +tx_buffer_alloc(void) +{ + struct tx_buffer *txb = kmalloc(sizeof (struct tx_buffer), + GFP_KERNEL); + + if (txb) { + txb->len = 0; + txb->size = PAGE_SIZE; + txb->data = (unsigned char *)__get_free_page(GFP_KERNEL); + if (txb->data == NULL) { + kfree(txb); + txb = NULL; + } + } + return txb; +} + +struct tpm_private { + struct list_head list; + + tpmif_tx_interface_t *tx; + + spinlock_t tx_lock; + + unsigned int handle; + unsigned int evtchn; + unsigned int irq; + + /* What is the status of our connection to the remote backend? */ +#define BEST_CLOSED 0 +#define BEST_DISCONNECTED 1 +#define BEST_CONNECTED 2 + unsigned int backend_state; + + struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE]; + + atomic_t tx_busy; + void *tx_remember; +#ifdef CONFIG_XEN_TPMDEV_GRANT + int shmem_ref; + domid_t domid; +#endif +}; + +static char *status_name[] = { + [TPMIF_INTERFACE_STATUS_CLOSED] = "closed", + [TPMIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected", + [TPMIF_INTERFACE_STATUS_CONNECTED] = "connected", + [TPMIF_INTERFACE_STATUS_CHANGED] = "changed", +}; + +static char *be_state_name[] = { + [BEST_CLOSED] = "closed", + [BEST_DISCONNECTED] = "disconnected", + [BEST_CONNECTED] = "connected", +}; + +#if DEBUG +#define DPRINTK(fmt, args...) \ + printk(KERN_ALERT "xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args) +#else +#define DPRINTK(fmt, args...) ((void)0) +#endif +#define IPRINTK(fmt, args...) \ + printk(KERN_INFO "xen_tpm_fr: " fmt, ##args) +#define WPRINTK(fmt, args...) \ + printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args) + +/* + * local function prototypes + */ +static int tpm_start_xmit(struct tpm_private *tp, + const u8 * buf, size_t count, int userbuffer, + void *remember); + +/************************************************************** + + The interface to let the tpm plugin register its callback + function and send data to another partition using this module + +**************************************************************/ + +static DECLARE_MUTEX(upperlayer_lock); +static struct tpmfe_device *upperlayer_tpmfe; + +/* + * Send data via this module by calling this function + */ +int +tpm_fe_send(const u8 * buf, size_t count, void *ptr) +{ + int sent = 0; + struct tpm_private *tp; + tp = list_entry(dev_list.next, struct tpm_private, list); + + if (tp) { + sent = tpm_start_xmit(tp, buf, count, 0, ptr); + } + return sent; +} + +EXPORT_SYMBOL(tpm_fe_send); + +/* + * Register a callback for receiving data from this module + */ +int +tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev) +{ + int rc = 0; + + down(&upperlayer_lock); + if (NULL == upperlayer_tpmfe) { + upperlayer_tpmfe = tpmfe_dev; + tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE; + } else { + rc = -EBUSY; + } + up(&upperlayer_lock); + return rc; +} + +EXPORT_SYMBOL(tpm_fe_register_receiver); + +/* + * Unregister the callback for receiving data from this module + */ +void +tpm_fe_unregister_receiver(void) +{ + down(&upperlayer_lock); + upperlayer_tpmfe = NULL; + up(&upperlayer_lock); +} + +EXPORT_SYMBOL(tpm_fe_unregister_receiver); + +/* + * Call this function to send data to the upper layer's + * registered receiver function. + */ +static int +tpm_fe_send_upperlayer(const u8 * buf, size_t count, const void *ptr) +{ + int rc; + + down(&upperlayer_lock); + + if (upperlayer_tpmfe && upperlayer_tpmfe->receive) { + rc = upperlayer_tpmfe->receive(buf, count, ptr); + } else { + rc = 0; + } + + up(&upperlayer_lock); + return rc; +} + +/* ============================================================ + * Lower-layer functions. + * ============================================================ + */ + +static struct tpm_private * +find_dev_by_handle(unsigned int handle) +{ + struct list_head *ent; + struct tpm_private *tp; + + list_for_each(ent, &dev_list) { + tp = list_entry(ent, struct tpm_private, list); + + if (tp->handle == handle) + return tp; + } + return NULL; +} + +/** TPM interface info. */ +struct tpmif_ctrl { + /** Number of interfaces. */ + int interface_n; + /** Number of connected interfaces. */ + int connected_n; + /** Error code. */ + int err; + int up; +}; + +static struct tpmif_ctrl tpmctrl; + +/************************************************************** + +**************************************************************/ + +static void +tpmctrl_init(void) +{ + memset(&tpmctrl, 0, sizeof (tpmctrl)); + tpmctrl.up = TPMIF_DRIVER_STATUS_DOWN; +} + +static int +tpm_allocate_buffers(struct tpm_private *tp) +{ + unsigned int i; + + i = 0; + while (i < TPMIF_TX_RING_SIZE) { + tp->tx_buffers[i] = tx_buffer_alloc(); + i++; + } + + return 1; +} + +static void +tpmif_rx_action(unsigned long unused) +{ + struct tpm_private *tp; + + tp = list_entry(dev_list.next, struct tpm_private, list); + + if (tp) { + int i = 0; + unsigned int received; + unsigned int offset = 0; + u8 *buffer; + tpmif_tx_request_t *tx; + tx = &tp->tx->ring[i].req; + + received = tx->size; + + buffer = kmalloc(received, GFP_KERNEL); + if (NULL == buffer) { + goto exit; + } + + i = 0; + while (i < TPMIF_TX_RING_SIZE && + offset < received) { + struct tx_buffer *txb = tp->tx_buffers[i]; + tpmif_tx_request_t *tx; + unsigned int tocopy; + + tx = &tp->tx->ring[i].req; + + tocopy = tx->size; + if (tocopy > PAGE_SIZE) { + tocopy = PAGE_SIZE; + } + + memcpy(&buffer[offset], txb->data, tocopy); + +#ifdef CONFIG_XEN_TPMDEV_GRANT + gnttab_release_grant_reference(&gref_head, tx->ref); +#endif + + offset += tocopy; + i++; + } + + DPRINTK("Sending %d bytes to upper layer!\n", offset); + + tpm_fe_send_upperlayer(buffer, received, tp->tx_remember); + + kfree(buffer); + +exit: + atomic_set(&tp->tx_busy, 0); + } +} + +static irqreturn_t +tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs) +{ + struct tpm_private *tp = tpm_priv; + unsigned long flags; + + spin_lock_irqsave(&tp->tx_lock, flags); + tasklet_schedule(&tpmif_rx_tasklet); + spin_unlock_irqrestore(&tp->tx_lock, flags); + + return IRQ_HANDLED; +} + +static int +tpm_start_xmit(struct tpm_private *tp, + const u8 * buf, size_t count, int userbuffer, + void *remember) +{ + tpmif_tx_request_t *tx; + TPMIF_RING_IDX i; + unsigned int offset = 0; + + if (NULL == tp) { + DPRINTK("%s: ERROR: tp=NULL\n", __FUNCTION__); + return 0; + } + + DPRINTK("%s: using tpm_private at %p\n", __FUNCTION__, tp); + spin_lock_irq(&tp->tx_lock); + + if (unlikely(atomic_read(&tp->tx_busy))) { + printk("There's an outstanding request/response on the way!\n"); + spin_unlock_irq(&tp->tx_lock); + return -EBUSY; + } + + if (tp->backend_state != BEST_CONNECTED) { + printk("Not connected to backend!\n"); + spin_unlock_irq(&tp->tx_lock); + return -EIO; + } + + i = 0; + while (count > 0 && i < TPMIF_TX_RING_SIZE) { + struct tx_buffer *txb = tp->tx_buffers[i]; + int copied; + + if (NULL == txb) { + DPRINTK("txb (i=%d) is NULL. buffers initilized?\n", i); + DPRINTK("Not transmittin anything!\n"); + spin_unlock_irq(&tp->tx_lock); + return -EFAULT; + } + copied = tx_buffer_copy(txb, &buf[offset], count, userbuffer); + if (copied < 0) { + /* An error occurred */ + return copied; + } + count -= copied; + offset += copied; + + tx = &tp->tx->ring[i].req; + + tx->id = i; + tx->addr = virt_to_machine(txb->data); + tx->size = txb->len; + +#ifdef CONFIG_XEN_TPMDEV_GRANT + DPRINTK("First 4 characters sent by TPM-FE of domain %d are 0x%02x 0x%02x 0x%02x 0x%02x\n", + tp->domid,txb->data[0],txb->data[1],txb->data[2],txb->data[3]); + + /* get the granttable reference for this page */ + tx->ref = gnttab_claim_grant_reference( &gref_head, gref_terminal ); + + if(-ENOSPC == tx->ref ) { + DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__); + return -ENOSPC; + } + gnttab_grant_foreign_access_ref ( tx->ref , tp->domid, (tx->addr >> PAGE_SHIFT), 0 /*RW*/); +#endif + i++; + wmb(); + } + + atomic_set(&tp->tx_busy, 1); + tp->tx_remember = remember; + mb(); + + DPRINTK("Notifying backend via event channel %d\n", + tp->evtchn); + + notify_via_evtchn(tp->evtchn); + + spin_unlock_irq(&tp->tx_lock); + + return offset; +} + +static void +tpm_notify_upperlayer(struct tpm_private *tp) +{ + /* + * Notify upper layer about this + */ + down(&upperlayer_lock); + + if (upperlayer_tpmfe != NULL) { + switch (tp->backend_state) { + case BEST_CONNECTED: + upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED); + break; + + default: + upperlayer_tpmfe->status(0); + break; + } + } + up(&upperlayer_lock); +} + +static void +tpm_connect(struct tpm_private *tp, tpmif_fe_interface_status_t * status) +{ + spin_lock_irq(&tp->tx_lock); + tp->backend_state = BEST_CONNECTED; +#ifdef CONFIG_XEN_TPMDEV_GRANT + tp->domid = status->domid; +#endif + spin_unlock_irq(&tp->tx_lock); + + tpm_notify_upperlayer(tp); +} + +static void +tpmif_show(struct tpm_private *tp) +{ +#if DEBUG + if (tp) { + IPRINTK("\n", + tp->handle, + be_state_name[tp->backend_state], + tp->evtchn, tp->irq, tp->tx); + } else { + IPRINTK("\n"); + } +#endif +} + +/* ================================================================= + * Function for sending and receiving of control messages + * ================================================================= + */ + +/* Send a connect message to xend to tell it to bring up the interface. */ +static void +send_interface_connect(struct tpm_private *tp) +{ + ctrl_msg_t cmsg = { + .type = CMSG_TPMIF_FE, + .subtype = CMSG_TPMIF_FE_INTERFACE_CONNECT, + .length = sizeof (tpmif_fe_interface_connect_t), + }; + tpmif_fe_interface_connect_t *msg = (void *)cmsg.msg; + + DPRINTK("Sending interface connect message!\n"); + + DPRINTK(">\n"); + tpmif_show(tp); + msg->handle = tp->handle; + msg->shmem_frame = (virt_to_machine(tp->tx) >> PAGE_SHIFT); + +#ifdef CONFIG_XEN_TPMDEV_GRANT + tp->shmem_ref = msg->shmem_ref = gnttab_claim_grant_reference( &gref_head, gref_terminal ); + ASSERT( tp->shmem_ref != -ENOSPC ); + gnttab_grant_foreign_access_ref ( msg->shmem_ref , tp->domid, msg->shmem_frame, 0 ); +#endif + + DPRINTK("message handle: %d\n", msg->handle); + DPRINTK("tx shared memory: virtual=%p machine=0x%08lx\n", + tp->tx, msg->shmem_frame); + + ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); + DPRINTK("<\n"); +} + +/* Send a driver status notification to the domain controller. */ +static int +send_driver_status(int ok) +{ + int err = 0; + ctrl_msg_t cmsg = { + .type = CMSG_TPMIF_FE, + .subtype = CMSG_TPMIF_FE_DRIVER_STATUS, + .length = sizeof (tpmif_fe_driver_status_t), + }; + tpmif_fe_driver_status_t *msg = (void *)cmsg.msg; + + DPRINTK("Sending driver status!\n"); + DPRINTK(" type=CMSG_TPMIF_FE (=%d)\n", cmsg.type); + + msg->status = (ok ? TPMIF_DRIVER_STATUS_UP : TPMIF_DRIVER_STATUS_DOWN); + DPRINTK(" status=%d\n", msg->status); + err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); + return err; +} + +/* Stop virtual TPM device and free tx queues and irq. + */ +static void +vtpm_release(struct tpm_private *tp) +{ + /* Stop old i/f to prevent errors whilst we rebuild the state. */ + spin_lock_irq(&tp->tx_lock); + + tp->backend_state = BEST_DISCONNECTED; + spin_unlock_irq(&tp->tx_lock); + + tpm_notify_upperlayer(tp); + + /* Free resources. */ + if (tp->tx != NULL) { + free_irq(tp->irq, tp); + unbind_evtchn_from_irq(tp->evtchn); + free_page((unsigned long)tp->tx); + tp->irq = 0; + tp->evtchn = 0; + tp->tx = NULL; + } +} + +/* Release vif resources and close it down completely. + */ +static void +tpmif_close(struct tpm_private *tp) +{ + DPRINTK(">\n"); + tpmif_show(tp); + WPRINTK("Unexpected tpmif-CLOSED message in state %s\n", + be_state_name[tp->backend_state]); + vtpm_release(tp); + tp->backend_state = BEST_CLOSED; + /* todo: take dev down and free. */ + + tpm_notify_upperlayer(tp); + + tpmif_show(tp); + DPRINTK("<\n"); +} + +/* Move the vif into disconnected state. + * Allocates tx/rx pages. + * Sends connect message to xend. + */ +static void +vtpm_disconnect(struct tpm_private *tp) +{ + DPRINTK(">\n"); + if (tp->tx) + free_page((unsigned long)tp->tx); + // Before this tp->tx and tp->rx had better be null. + tp->tx = (tpmif_tx_interface_t *) __get_free_page(GFP_KERNEL); + memset(tp->tx, 0, PAGE_SIZE); + tp->backend_state = BEST_DISCONNECTED; + + tpm_notify_upperlayer(tp); + + send_interface_connect(tp); + tpmif_show(tp); + DPRINTK("<\n"); +} + +/* Begin interface recovery. + * + */ +static void +vtpm_reset(struct tpm_private *np) +{ + DPRINTK(">\n"); + IPRINTK("Attempting to reconnect TPM interface: handle=%u\n", + np->handle); + vtpm_release(np); + vtpm_disconnect(np); + tpmif_show(np); + DPRINTK("<\n"); +} + +/* Move the vtpm into connected state. + * Sets the event channel from the message. + * Binds the irq to the event channel. + */ +static void +vtpm_connect(struct tpm_private *tp, tpmif_fe_interface_status_t * status) +{ + DPRINTK(">\n"); + DPRINTK("%s\n", __FUNCTION__); + tpm_connect(tp, status); + + tp->evtchn = status->evtchn; + tp->irq = bind_evtchn_to_irq(tp->evtchn); + DPRINTK("evtchn = %d\n", tp->evtchn); + DPRINTK("irq = %d\n", tp->irq); + (void)request_irq(tp->irq, tpmif_int, SA_SAMPLE_RANDOM, + "me" /*dev->name */ , tp); + + tpmif_show(tp); + DPRINTK("<\n"); +} + +static struct tpm_private * +create_tpm_private(int handle) +{ + struct tpm_private *tp = NULL; + tp = kmalloc(sizeof (struct tpm_private), GFP_KERNEL); + if (tp) { + memset(tp, 0x0, sizeof (*tp)); + tp->backend_state = BEST_CLOSED; + tp->handle = handle; + DPRINTK("Creating tpm_private structure: %p\n", tp); + DPRINTK(" with handle = %d\n", handle); + spin_lock_init(&tp->tx_lock); + + tp->tx = (tpmif_tx_interface_t *) __get_free_page(GFP_KERNEL); + + tpm_allocate_buffers(tp); + + list_add(&tp->list, &dev_list); + + tpmctrl.interface_n++; + } + return tp; +} + +/* Get the target interface for a status message. + * Creates the interface when it makes sense. + * The returned interface may be null when there is no error. + * + * @param status status message + * @param np return parameter for interface state + * @return 0 on success, error code otherwise + */ +static int +target_vtpm(tpmif_fe_interface_status_t * status, struct tpm_private **res) +{ + int err = 0; + struct tpm_private *tp = NULL; + + DPRINTK("> handle=%d\n", status->handle); + if (status->handle < 0) { + err = -EINVAL; + goto exit; + } + + if ((tp = find_dev_by_handle(status->handle)) != NULL) + goto exit; + + if (status->status == TPMIF_INTERFACE_STATUS_CLOSED) + goto exit; + if (status->status == TPMIF_INTERFACE_STATUS_CHANGED) + goto exit; + + /* It's a new interface in a good state - create it. */ + DPRINTK("> create tpm_private...\n"); + + tp = create_tpm_private(status->handle); + if (NULL == tp) { + err = -ENOMEM; + } else { + tp->domid = status->domid; + } + +exit: + if (NULL != res) { + *res = tp; + } + DPRINTK("< err=%d\n", err); + return err; +} + +/* Handle an interface status message. */ +static void +tpmif_interface_status(tpmif_fe_interface_status_t * status) +{ + int err = 0; + struct tpm_private *tp = NULL; + + DPRINTK(">\n"); + DPRINTK("> status=%s handle=%d\n", + status_name[status->status], status->handle); + + if ((err = target_vtpm(status, &tp)) != 0) { + WPRINTK("Invalid tpmif: handle=%u\n", status->handle); + return; + } + + if (tp == NULL) { + DPRINTK("> no vif\n"); + return; + } + + DPRINTK(">\n"); + tpmif_show(tp); + + switch (status->status) { + case TPMIF_INTERFACE_STATUS_CLOSED: + DPRINTK("> Got status: TPMIF_INTERFACE_STATUS_CLOSED:\n"); + switch (tp->backend_state) { + case BEST_CLOSED: + DPRINTK("> BEST_CLOSED\n"); + tpmif_close(tp); + break; + case BEST_DISCONNECTED: + DPRINTK("> BEST_DISCONNECTED\n"); + tpmif_close(tp); + break; + case BEST_CONNECTED: + DPRINTK("> BEST_CONNECTED\n"); + tpmif_close(tp); + break; + } + break; + + case TPMIF_INTERFACE_STATUS_DISCONNECTED: + DPRINTK("> Got status: TPMIF_INTERFACE_STATUS_DISCONNECTED:\n"); + switch (tp->backend_state) { + case BEST_CLOSED: + DPRINTK("> BEST_CLOSED\n"); + vtpm_disconnect(tp); + break; + case BEST_DISCONNECTED: + DPRINTK("> BEST_DISCONNECTED\n"); + vtpm_reset(tp); + case BEST_CONNECTED: + DPRINTK("> BEST_CONNECTED\n"); + vtpm_reset(tp); + break; + } + break; + + case TPMIF_INTERFACE_STATUS_CONNECTED: + DPRINTK("> Got status: TPMIF_INTERFACE_STATUS_CONNECTED\n"); + switch (tp->backend_state) { + case BEST_CLOSED: + DPRINTK("> BEST_CLOSED\n"); + WPRINTK("Unexpected tpmif status %s in state %s\n", + status_name[status->status], + be_state_name[tp->backend_state]); + vtpm_disconnect(tp); + vtpm_connect(tp, status); + break; + case BEST_DISCONNECTED: + DPRINTK("> BEST_DISCONNECTED\n"); + vtpm_connect(tp, status); + break; + } + break; + + case TPMIF_INTERFACE_STATUS_CHANGED: + DPRINTK("> Got status: TPMIF_INTERFACE_STATUS_CHANGED!\n"); + /* + * The domain controller is notifying us that a device has been + * added or removed. + */ + break; + + default: + WPRINTK("Invalid tpmif status code %d\n", status->status); + break; + } + tpmif_show(tp); + DPRINTK("<\n"); +} + +/* + * Initialize the vTPM control interface. + */ +static void +tpmif_driver_status(tpmif_fe_driver_status_t * status) +{ + DPRINTK("> status=%d\n", status->status); + tpmctrl.up = status->status; +} + +/* + * Wake up the vTPM front-end interface if it's not already + * associated with a back-end. + */ +static void +tpmif_driver_wakeup(void) +{ + struct tpm_private *tp; + tp = list_entry(dev_list.next, struct tpm_private, list); + if (NULL == tp) { + printk("Sending driver status after wakeup call.\n"); + send_driver_status(1); + } +} + +/* Receive handler for control messages. */ +static void +tpmif_ctrlif_rx(ctrl_msg_t * msg, unsigned long id) +{ + + switch (msg->subtype) { + case CMSG_TPMIF_FE_INTERFACE_STATUS: + if (msg->length != sizeof (tpmif_fe_interface_status_t)) + goto error; + DPRINTK("Received a message: CMSG_TPMIF_FE_INTERFACE_STATUS\n"); + tpmif_interface_status((tpmif_fe_interface_status_t *) + & msg->msg[0]); + break; + + case CMSG_TPMIF_FE_DRIVER_STATUS: + if (msg->length != sizeof (tpmif_fe_driver_status_t)) + goto error; + DPRINTK("Received a message: CMSG_TPMIF_FE_DRIVER_STATUS\n"); + tpmif_driver_status((tpmif_fe_driver_status_t *) + & msg->msg[0]); + break; + + + case CMSG_TPMIF_FE_STARTUP: + DPRINTK("Received a message: CMSG_TPMIF_FE_STARTUP\n"); + tpmif_driver_wakeup(); + break; + +error: + default: + DPRINTK("Received an unknown message!\n"); + msg->length = 0; + break; + } + + DPRINTK("Sending control message response.\n"); + ctrl_if_send_response(msg); +} + + +static int __init +tpmif_init(void) +{ + int err = 0; + + IPRINTK("Initialising the vTPM driver.\n"); +#ifdef CONFIG_XEN_TPMDEV_GRANT + if ( 0 > gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE, + &gref_head, + &gref_terminal )) { + err = -EFAULT; + } + printk("TPM-FE has grant table support!\n"); +#else + printk("TPM-FE does NOT have grant table support!\n"); +#endif + INIT_LIST_HEAD(&dev_list); + tpmctrl_init(); + (void)ctrl_if_register_receiver(CMSG_TPMIF_FE, tpmif_ctrlif_rx, + CALLBACK_IN_BLOCKING_CONTEXT); + + /* + * Only don't send the driver status when we are in the + * INIT domain. + */ + if (!(xen_start_info.flags & SIF_INITDOMAIN)) { + DPRINTK("calling send_driver_status(1) now.\n"); + send_driver_status(1); + } + + DPRINTK("< err=%d\n", err); + return err; +} + +static void +vtpm_suspend(struct tpm_private *tp) +{ + // Avoid having tx/rx stuff happen until we're ready. + DPRINTK(">\n"); + DPRINTK("FREEING IRQ: %d\n", tp->irq); + free_irq(tp->irq, tp); + DPRINTK("UNBINDING FROM IRQ: %d\n", tp->evtchn); + unbind_evtchn_from_irq(tp->evtchn); + + gnttab_release_grant_reference(&gref_head, tp->shmem_ref); + + DPRINTK("<\n"); +} + +static void +vtpm_resume(struct tpm_private *tp) +{ + // Stop bad things from happening until we're back up. + DPRINTK(">\n"); + tp->backend_state = BEST_DISCONNECTED; + + send_interface_connect(tp); + DPRINTK("<\n"); +} + +void +tpmif_suspend(void) +{ + struct list_head *ent; + + DPRINTK(">\n"); + list_for_each(ent, &dev_list) { + struct tpm_private *tp; + tp = list_entry(ent, struct tpm_private, list); + + vtpm_suspend(tp); + } + DPRINTK("<\n"); +} + +void +tpmif_resume(void) +{ + struct list_head *ent; + + DPRINTK(">\n"); + list_for_each(ent, &dev_list) { + unsigned int i = 0; + struct tpm_private *tp; + + tp = list_entry(ent, struct tpm_private, list); + + /* + * Mark all request buffers as unmapped + * so the backend will start mapping them + * again. + */ + while (i < TPMIF_TX_RING_SIZE) { + tpmif_tx_request_t *tx = &tp->tx->ring[i].req; + tx->mapped = 0; + i++; + } + vtpm_resume(tp); + } + DPRINTK("<\n"); +} + +__initcall(tpmif_init); diff -uprN xen-unstable.hg.orig/linux-2.6-xen-sparse/include/linux/tpmfe.h xen-unstable.hg/linux-2.6-xen-sparse/include/linux/tpmfe.h --- xen-unstable.hg.orig/linux-2.6-xen-sparse/include/linux/tpmfe.h 1969-12-31 19:00:00.000000000 -0500 +++ xen-unstable.hg/linux-2.6-xen-sparse/include/linux/tpmfe.h 2005-08-16 17:06:07.000000000 -0400 @@ -0,0 +1,33 @@ +#ifndef TPM_FE_H +#define TPM_FE_H + +struct tpmfe_device { + /* + * Let upper layer receive data from front-end + */ + int (*receive)(const u8 *buffer, size_t count, const void *ptr); + /* + * Indicate the status of the front-end to the upper + * layer. + */ + void (*status)(unsigned int flags); + + /* + * This field indicates the maximum size the driver can + * transfer in one chunk. It is filled out by the front-end + * driver and should be propagated to the generic tpm driver + * for allocation of buffers. + */ + unsigned int max_tx_size; +}; + +enum { + TPMFE_STATUS_DISCONNECTED = 0x0, + TPMFE_STATUS_CONNECTED = 0x1 +}; + +int tpm_fe_send(const u8 * buf, size_t count, void *ptr); +int tpm_fe_register_receiver(struct tpmfe_device *); +void tpm_fe_unregister_receiver(void); + +#endif