aboutsummaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/decserial.c38
-rw-r--r--drivers/char/drm/drm_sman.c1
-rw-r--r--drivers/char/drm/drm_vm.c8
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/char/hvcs.c426
-rw-r--r--drivers/char/hw_random/Kconfig19
-rw-r--r--drivers/char/hw_random/Makefile3
-rw-r--r--drivers/char/ip2/i2cmd.h5
-rw-r--r--drivers/char/ip2/i2lib.c1
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c641
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c25
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c18
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c724
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c114
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c376
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c14
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c121
-rw-r--r--drivers/char/istallion.c4
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/mmtimer.c23
-rw-r--r--drivers/char/moxa.c5
-rw-r--r--drivers/char/pcmcia/synclink_cs.c26
-rw-r--r--drivers/char/rio/rio_linux.c8
-rw-r--r--drivers/char/riscom8.c5
-rw-r--r--drivers/char/synclink.c26
-rw-r--r--drivers/char/synclink_gt.c27
-rw-r--r--drivers/char/synclinkmp.c26
-rw-r--r--drivers/char/sysrq.c14
-rw-r--r--drivers/char/toshiba.c1
-rw-r--r--drivers/char/tpm/tpm.c1
-rw-r--r--drivers/char/vt.c16
-rw-r--r--drivers/char/watchdog/pcwd_usb.c2
33 files changed, 1791 insertions, 932 deletions
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 00b17ae3973..2f2c4efff8a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -459,7 +459,7 @@ static const struct aper_size_info_32 nforce3_sizes[5] =
/* Handle shadow device of the Nvidia NForce3 */
/* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
-static int __devinit nforce3_agp_init(struct pci_dev *pdev)
+static int nforce3_agp_init(struct pci_dev *pdev)
{
u32 tmp, apbase, apbar, aplimit;
struct pci_dev *dev1;
diff --git a/drivers/char/decserial.c b/drivers/char/decserial.c
index 85f404e25c7..8ea2bea2b18 100644
--- a/drivers/char/decserial.c
+++ b/drivers/char/decserial.c
@@ -23,20 +23,12 @@
extern int zs_init(void);
#endif
-#ifdef CONFIG_DZ
-extern int dz_init(void);
-#endif
-
#ifdef CONFIG_SERIAL_CONSOLE
#ifdef CONFIG_ZS
extern void zs_serial_console_init(void);
#endif
-#ifdef CONFIG_DZ
-extern void dz_serial_console_init(void);
-#endif
-
#endif
/* rs_init - starts up the serial interface -
@@ -46,23 +38,11 @@ extern void dz_serial_console_init(void);
int __init rs_init(void)
{
-
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
+#ifdef CONFIG_ZS
if (IOASIC)
return zs_init();
- else
- return dz_init();
-#else
-
-#ifdef CONFIG_ZS
- return zs_init();
-#endif
-
-#ifdef CONFIG_DZ
- return dz_init();
-#endif
-
#endif
+ return -ENXIO;
}
__initcall(rs_init);
@@ -76,21 +56,9 @@ __initcall(rs_init);
*/
static int __init decserial_console_init(void)
{
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
+#ifdef CONFIG_ZS
if (IOASIC)
zs_serial_console_init();
- else
- dz_serial_console_init();
-#else
-
-#ifdef CONFIG_ZS
- zs_serial_console_init();
-#endif
-
-#ifdef CONFIG_DZ
- dz_serial_console_init();
-#endif
-
#endif
return 0;
}
diff --git a/drivers/char/drm/drm_sman.c b/drivers/char/drm/drm_sman.c
index 425c82336ee..19c81d2e13d 100644
--- a/drivers/char/drm/drm_sman.c
+++ b/drivers/char/drm/drm_sman.c
@@ -162,6 +162,7 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager,
return 0;
}
+EXPORT_SYMBOL(drm_sman_set_manager);
static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman,
unsigned long owner)
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index b40ae438f53..ae2691942dd 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -147,14 +147,14 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map)
- return NOPAGE_OOM; /* Nothing allocated */
+ return NOPAGE_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = (map->type == _DRM_CONSISTENT) ?
virt_to_page((void *)i) : vmalloc_to_page((void *)i);
if (!page)
- return NOPAGE_OOM;
+ return NOPAGE_SIGBUS;
get_page(page);
DRM_DEBUG("shm_nopage 0x%lx\n", address);
@@ -272,7 +272,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dma->pagelist)
- return NOPAGE_OOM; /* Nothing allocated */
+ return NOPAGE_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT;
@@ -310,7 +310,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!entry->pagelist)
- return NOPAGE_OOM; /* Nothing allocated */
+ return NOPAGE_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 9902ffad3b1..cc2cd46bedc 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -38,6 +38,7 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
+#include <linux/freezer.h>
#include <asm/uaccess.h>
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 8728255c946..d090622f1de 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -337,11 +337,6 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp);
static void hvcs_close(struct tty_struct *tty, struct file *filp);
static void hvcs_hangup(struct tty_struct * tty);
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd);
-static void hvcs_remove_device_attrs(struct vio_dev *vdev);
-static void hvcs_create_driver_attrs(void);
-static void hvcs_remove_driver_attrs(void);
-
static int __devinit hvcs_probe(struct vio_dev *dev,
const struct vio_device_id *id);
static int __devexit hvcs_remove(struct vio_dev *dev);
@@ -353,6 +348,172 @@ static void __exit hvcs_module_exit(void);
#define HVCS_TRY_WRITE 0x00000004
#define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ)
+static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
+{
+ return viod->dev.driver_data;
+}
+/* The sysfs interface for the driver and devices */
+
+static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
+
+static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
+
+static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
+ size_t count)
+{
+ /*
+ * Don't need this feature at the present time because firmware doesn't
+ * yet support multiple partners.
+ */
+ printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
+ return -EPERM;
+}
+
+static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+
+static DEVICE_ATTR(current_vty,
+ S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
+
+static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+
+ /* writing a '0' to this sysfs entry will result in the disconnect. */
+ if (simple_strtol(buf, NULL, 0) != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+ if (hvcsd->open_count > 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ printk(KERN_INFO "HVCS: vterm state unchanged. "
+ "The hvcs device node is still in use.\n");
+ return -EPERM;
+ }
+
+ if (hvcsd->connected == 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ printk(KERN_INFO "HVCS: vterm state unchanged. The"
+ " vty-server is not connected to a vty.\n");
+ return -EPERM;
+ }
+
+ hvcs_partner_free(hvcsd);
+ printk(KERN_INFO "HVCS: Closed vty-server@%X and"
+ " partner vty@%X:%d connection.\n",
+ hvcsd->vdev->unit_address,
+ hvcsd->p_unit_address,
+ (uint32_t)hvcsd->p_partition_ID);
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return count;
+}
+
+static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%d\n", hvcsd->connected);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
+ hvcs_vterm_state_show, hvcs_vterm_state_store);
+
+static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct vio_dev *viod = to_vio_dev(dev);
+ struct hvcs_struct *hvcsd = from_vio_dev(viod);
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ retval = sprintf(buf, "%d\n", hvcsd->index);
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return retval;
+}
+
+static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
+
+static struct attribute *hvcs_attrs[] = {
+ &dev_attr_partner_vtys.attr,
+ &dev_attr_partner_clcs.attr,
+ &dev_attr_current_vty.attr,
+ &dev_attr_vterm_state.attr,
+ &dev_attr_index.attr,
+ NULL,
+};
+
+static struct attribute_group hvcs_attr_group = {
+ .attrs = hvcs_attrs,
+};
+
+static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
+{
+ /* A 1 means it is updating, a 0 means it is done updating */
+ return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
+}
+
+static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
+ size_t count)
+{
+ if ((simple_strtol(buf, NULL, 0) != 1)
+ && (hvcs_rescan_status != 0))
+ return -EINVAL;
+
+ hvcs_rescan_status = 1;
+ printk(KERN_INFO "HVCS: rescanning partner info for all"
+ " vty-servers.\n");
+ hvcs_rescan_devices_list();
+ hvcs_rescan_status = 0;
+ return count;
+}
+
+static DRIVER_ATTR(rescan,
+ S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
+
static void hvcs_kick(void)
{
hvcs_kicked = 1;
@@ -575,7 +736,7 @@ static void destroy_hvcs_struct(struct kobject *kobj)
spin_unlock_irqrestore(&hvcsd->lock, flags);
spin_unlock(&hvcs_structs_lock);
- hvcs_remove_device_attrs(vdev);
+ sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
kfree(hvcsd);
}
@@ -608,6 +769,7 @@ static int __devinit hvcs_probe(
{
struct hvcs_struct *hvcsd;
int index;
+ int retval;
if (!dev || !id) {
printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
@@ -658,14 +820,16 @@ static int __devinit hvcs_probe(
* the hvcs_struct has been added to the devices list then the user app
* will get -ENODEV.
*/
-
spin_lock(&hvcs_structs_lock);
-
list_add_tail(&(hvcsd->next), &hvcs_structs);
-
spin_unlock(&hvcs_structs_lock);
- hvcs_create_device_attrs(hvcsd);
+ retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group);
+ if (retval) {
+ printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n",
+ hvcsd->vdev->unit_address);
+ return retval;
+ }
printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address);
@@ -1354,8 +1518,10 @@ static int __init hvcs_module_init(void)
if (!hvcs_tty_driver)
return -ENOMEM;
- if (hvcs_alloc_index_list(num_ttys_to_alloc))
- return -ENOMEM;
+ if (hvcs_alloc_index_list(num_ttys_to_alloc)) {
+ rc = -ENOMEM;
+ goto index_fail;
+ }
hvcs_tty_driver->owner = THIS_MODULE;
@@ -1385,41 +1551,57 @@ static int __init hvcs_module_init(void)
* dynamically assigned major and minor numbers for our devices.
*/
if (tty_register_driver(hvcs_tty_driver)) {
- printk(KERN_ERR "HVCS: registration "
- " as a tty driver failed.\n");
- hvcs_free_index_list();
- put_tty_driver(hvcs_tty_driver);
- return -EIO;
+ printk(KERN_ERR "HVCS: registration as a tty driver failed.\n");
+ rc = -EIO;
+ goto register_fail;
}
hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!hvcs_pi_buff) {
- tty_unregister_driver(hvcs_tty_driver);
- hvcs_free_index_list();
- put_tty_driver(hvcs_tty_driver);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto buff_alloc_fail;
}
hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
if (IS_ERR(hvcs_task)) {
printk(KERN_ERR "HVCS: khvcsd creation failed. Driver not loaded.\n");
- kfree(hvcs_pi_buff);
- tty_unregister_driver(hvcs_tty_driver);
- hvcs_free_index_list();
- put_tty_driver(hvcs_tty_driver);
- return -EIO;
+ rc = -EIO;
+ goto kthread_fail;
}
rc = vio_register_driver(&hvcs_vio_driver);
+ if (rc) {
+ printk(KERN_ERR "HVCS: can't register vio driver\n");
+ goto vio_fail;
+ }
/*
* This needs to be done AFTER the vio_register_driver() call or else
* the kobjects won't be initialized properly.
*/
- hvcs_create_driver_attrs();
+ rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
+ if (rc) {
+ printk(KERN_ERR "HVCS: sysfs attr create failed\n");
+ goto attr_fail;
+ }
printk(KERN_INFO "HVCS: driver module inserted.\n");
+ return 0;
+
+attr_fail:
+ vio_unregister_driver(&hvcs_vio_driver);
+vio_fail:
+ kthread_stop(hvcs_task);
+kthread_fail:
+ kfree(hvcs_pi_buff);
+buff_alloc_fail:
+ tty_unregister_driver(hvcs_tty_driver);
+register_fail:
+ hvcs_free_index_list();
+index_fail:
+ put_tty_driver(hvcs_tty_driver);
+ hvcs_tty_driver = NULL;
return rc;
}
@@ -1441,7 +1623,7 @@ static void __exit hvcs_module_exit(void)
hvcs_pi_buff = NULL;
spin_unlock(&hvcs_pi_lock);
- hvcs_remove_driver_attrs();
+ driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan);
vio_unregister_driver(&hvcs_vio_driver);
@@ -1456,191 +1638,3 @@ static void __exit hvcs_module_exit(void)
module_init(hvcs_module_init);
module_exit(hvcs_module_exit);
-
-static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
-{
- return viod->dev.driver_data;
-}
-/* The sysfs interface for the driver and devices */
-
-static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
-
-static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
-
-static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
- size_t count)
-{
- /*
- * Don't need this feature at the present time because firmware doesn't
- * yet support multiple partners.
- */
- printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
- return -EPERM;
-}
-
-static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-
-static DEVICE_ATTR(current_vty,
- S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
-
-static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
-
- /* writing a '0' to this sysfs entry will result in the disconnect. */
- if (simple_strtol(buf, NULL, 0) != 0)
- return -EINVAL;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
- if (hvcsd->open_count > 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_INFO "HVCS: vterm state unchanged. "
- "The hvcs device node is still in use.\n");
- return -EPERM;
- }
-
- if (hvcsd->connected == 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_INFO "HVCS: vterm state unchanged. The"
- " vty-server is not connected to a vty.\n");
- return -EPERM;
- }
-
- hvcs_partner_free(hvcsd);
- printk(KERN_INFO "HVCS: Closed vty-server@%X and"
- " partner vty@%X:%d connection.\n",
- hvcsd->vdev->unit_address,
- hvcsd->p_unit_address,
- (uint32_t)hvcsd->p_partition_ID);
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return count;
-}
-
-static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%d\n", hvcsd->connected);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
- hvcs_vterm_state_show, hvcs_vterm_state_store);
-
-static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct vio_dev *viod = to_vio_dev(dev);
- struct hvcs_struct *hvcsd = from_vio_dev(viod);
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- retval = sprintf(buf, "%d\n", hvcsd->index);
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return retval;
-}
-
-static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
-
-static struct attribute *hvcs_attrs[] = {
- &dev_attr_partner_vtys.attr,
- &dev_attr_partner_clcs.attr,
- &dev_attr_current_vty.attr,
- &dev_attr_vterm_state.attr,
- &dev_attr_index.attr,
- NULL,
-};
-
-static struct attribute_group hvcs_attr_group = {
- .attrs = hvcs_attrs,
-};
-
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd)
-{
- struct vio_dev *vdev = hvcsd->vdev;
- sysfs_create_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static void hvcs_remove_device_attrs(struct vio_dev *vdev)
-{
- sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
-{
- /* A 1 means it is updating, a 0 means it is done updating */
- return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
-}
-
-static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
- size_t count)
-{
- if ((simple_strtol(buf, NULL, 0) != 1)
- && (hvcs_rescan_status != 0))
- return -EINVAL;
-
- hvcs_rescan_status = 1;
- printk(KERN_INFO "HVCS: rescanning partner info for all"
- " vty-servers.\n");
- hvcs_rescan_devices_list();
- hvcs_rescan_status = 0;
- return count;
-}
-static DRIVER_ATTR(rescan,
- S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
-
-static void hvcs_create_driver_attrs(void)
-{
- struct device_driver *driverfs = &(hvcs_vio_driver.driver);
- driver_create_file(driverfs, &driver_attr_rescan);
-}
-
-static void hvcs_remove_driver_attrs(void)
-{
- struct device_driver *driverfs = &(hvcs_vio_driver.driver);
- driver_remove_file(driverfs, &driver_attr_rescan);
-}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 9f7635f7517..5f3acd8e64b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -3,17 +3,20 @@
#
config HW_RANDOM
- bool "Hardware Random Number Generator Core support"
- default y
+ tristate "Hardware Random Number Generator Core support"
+ default m
---help---
Hardware Random Number Generator Core infrastructure.
+ To compile this driver as a module, choose M here: the
+ module will be called rng-core.
+
If unsure, say Y.
config HW_RANDOM_INTEL
tristate "Intel HW Random Number Generator support"
depends on HW_RANDOM && (X86 || IA64) && PCI
- default y
+ default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Intel i8xx-based motherboards.
@@ -26,7 +29,7 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support"
depends on HW_RANDOM && X86 && PCI
- default y
+ default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on AMD 76x-based motherboards.
@@ -39,7 +42,7 @@ config HW_RANDOM_AMD
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
depends on HW_RANDOM && X86 && PCI
- default y
+ default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on the AMD Geode LX.
@@ -52,7 +55,7 @@ config HW_RANDOM_GEODE
config HW_RANDOM_VIA
tristate "VIA HW Random Number Generator support"
depends on HW_RANDOM && X86_32
- default y
+ default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on VIA based motherboards.
@@ -65,7 +68,7 @@ config HW_RANDOM_VIA
config HW_RANDOM_IXP4XX
tristate "Intel IXP4xx NPU HW Random Number Generator support"
depends on HW_RANDOM && ARCH_IXP4XX
- default y
+ default HW_RANDOM
---help---
This driver provides kernel-side support for the Random
Number Generator hardware found on the Intel IXP4xx NPU.
@@ -78,7 +81,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX)
- default y
+ default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on OMAP16xx and OMAP24xx multimedia
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e263ae96f94..c41fa19454e 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -2,7 +2,8 @@
# Makefile for HW Random Number Generator (RNG) device drivers.
#
-obj-$(CONFIG_HW_RANDOM) += core.o
+obj-$(CONFIG_HW_RANDOM) += rng-core.o
+rng-core-y := core.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
diff --git a/drivers/char/ip2/i2cmd.h b/drivers/char/ip2/i2cmd.h
index baa4e721b75..29277ec6b8e 100644
--- a/drivers/char/ip2/i2cmd.h
+++ b/drivers/char/ip2/i2cmd.h
@@ -367,11 +367,6 @@ static UCHAR cc02[];
#define CSE_NULL 3 // Replace with a null
#define CSE_MARK 4 // Replace with a 3-character sequence (as Unix)
-#define CMD_SET_REPLACEMENT(arg,ch) \
- (((cmdSyntaxPtr)(ct36a))->cmd[1] = (arg), \
- (((cmdSyntaxPtr)(ct36a))->cmd[2] = (ch), \
- (cmdSyntaxPtr)(ct36a))
-
#define CSE_REPLACE 0x8 // Replace the errored character with the
// replacement character defined here
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index c213fdbdb2b..78045767ec3 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -1016,7 +1016,6 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
unsigned short channel;
unsigned short stuffIndex;
unsigned long flags;
- int rc = 0;
int bailout = 10;
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 0030cd8e2e9..6c59baa887a 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -33,11 +33,13 @@
#include <linux/ipmi_msgdefs.h> /* for completion codes */
#include "ipmi_si_sm.h"
-static int bt_debug = 0x00; /* Production value 0, see following flags */
+#define BT_DEBUG_OFF 0 /* Used in production */
+#define BT_DEBUG_ENABLE 1 /* Generic messages */
+#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */
+#define BT_DEBUG_STATES 4 /* Verbose look at state changes */
+
+static int bt_debug = BT_DEBUG_OFF;
-#define BT_DEBUG_ENABLE 1
-#define BT_DEBUG_MSG 2
-#define BT_DEBUG_STATES 4
module_param(bt_debug, int, 0644);
MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
@@ -47,38 +49,54 @@ MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
Since the Open IPMI architecture is single-message oriented at this
stage, the queue depth of BT is of no concern. */
-#define BT_NORMAL_TIMEOUT 5000000 /* seconds in microseconds */
-#define BT_RETRY_LIMIT 2
-#define BT_RESET_DELAY 6000000 /* 6 seconds after warm reset */
+#define BT_NORMAL_TIMEOUT 5 /* seconds */
+#define BT_NORMAL_RETRY_LIMIT 2
+#define BT_RESET_DELAY 6 /* seconds after warm reset */
+
+/* States are written in chronological order and usually cover
+ multiple rows of the state table discussion in the IPMI spec. */
enum bt_states {
- BT_STATE_IDLE,
+ BT_STATE_IDLE = 0, /* Order is critical in this list */
BT_STATE_XACTION_START,
BT_STATE_WRITE_BYTES,
- BT_STATE_WRITE_END,
BT_STATE_WRITE_CONSUME,
- BT_STATE_B2H_WAIT,
- BT_STATE_READ_END,
- BT_STATE_RESET1, /* These must come last */
+ BT_STATE_READ_WAIT,
+ BT_STATE_CLEAR_B2H,
+ BT_STATE_READ_BYTES,
+ BT_STATE_RESET1, /* These must come last */
BT_STATE_RESET2,
BT_STATE_RESET3,
BT_STATE_RESTART,
- BT_STATE_HOSED
+ BT_STATE_PRINTME,
+ BT_STATE_CAPABILITIES_BEGIN,
+ BT_STATE_CAPABILITIES_END,
+ BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
};
+/* Macros seen at the end of state "case" blocks. They help with legibility
+ and debugging. */
+
+#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; }
+
+#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; }
+
struct si_sm_data {
enum bt_states state;
- enum bt_states last_state; /* assist printing and resets */
unsigned char seq; /* BT sequence number */
struct si_sm_io *io;
- unsigned char write_data[IPMI_MAX_MSG_LENGTH];
- int write_count;
- unsigned char read_data[IPMI_MAX_MSG_LENGTH];
- int read_count;
- int truncated;
- long timeout;
- unsigned int error_retries; /* end of "common" fields */
+ unsigned char write_data[IPMI_MAX_MSG_LENGTH];
+ int write_count;
+ unsigned char read_data[IPMI_MAX_MSG_LENGTH];
+ int read_count;
+ int truncated;
+ long timeout; /* microseconds countdown */
+ int error_retries; /* end of "common" fields */
int nonzero_status; /* hung BMCs stay all 0 */
+ enum bt_states complete; /* to divert the state machine */
+ int BT_CAP_outreqs;
+ long BT_CAP_req2rsp;
+ int BT_CAP_retries; /* Recommended retries */
};
#define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */
@@ -111,86 +129,118 @@ struct si_sm_data {
static char *state2txt(unsigned char state)
{
switch (state) {
- case BT_STATE_IDLE: return("IDLE");
- case BT_STATE_XACTION_START: return("XACTION");
- case BT_STATE_WRITE_BYTES: return("WR_BYTES");
- case BT_STATE_WRITE_END: return("WR_END");
- case BT_STATE_WRITE_CONSUME: return("WR_CONSUME");
- case BT_STATE_B2H_WAIT: return("B2H_WAIT");
- case BT_STATE_READ_END: return("RD_END");
- case BT_STATE_RESET1: return("RESET1");
- case BT_STATE_RESET2: return("RESET2");
- case BT_STATE_RESET3: return("RESET3");
- case BT_STATE_RESTART: return("RESTART");
- case BT_STATE_HOSED: return("HOSED");
+ case BT_STATE_IDLE: return("IDLE");
+ case BT_STATE_XACTION_START: return("XACTION");
+ case BT_STATE_WRITE_BYTES: return("WR_BYTES");
+ case BT_STATE_WRITE_CONSUME: return("WR_CONSUME");
+ case BT_STATE_READ_WAIT: return("RD_WAIT");
+ case BT_STATE_CLEAR_B2H: return("CLEAR_B2H");
+ case BT_STATE_READ_BYTES: return("RD_BYTES");
+ case BT_STATE_RESET1: return("RESET1");
+ case BT_STATE_RESET2: return("RESET2");
+ case BT_STATE_RESET3: return("RESET3");
+ case BT_STATE_RESTART: return("RESTART");
+ case BT_STATE_LONG_BUSY: return("LONG_BUSY");
+ case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
+ case BT_STATE_CAPABILITIES_END: return("CAP_END");
}
return("BAD STATE");
}
#define STATE2TXT state2txt(bt->state)
-static char *status2txt(unsigned char status, char *buf)
+static char *status2txt(unsigned char status)
{
+ /*
+ * This cannot be called by two threads at the same time and
+ * the buffer is always consumed immediately, so the static is
+ * safe to use.
+ */
+ static char buf[40];
+
strcpy(buf, "[ ");
- if (status & BT_B_BUSY) strcat(buf, "B_BUSY ");
- if (status & BT_H_BUSY) strcat(buf, "H_BUSY ");
- if (status & BT_OEM0) strcat(buf, "OEM0 ");
- if (status & BT_SMS_ATN) strcat(buf, "SMS ");
- if (status & BT_B2H_ATN) strcat(buf, "B2H ");
- if (status & BT_H2B_ATN) strcat(buf, "H2B ");
+ if (status & BT_B_BUSY)
+ strcat(buf, "B_BUSY ");
+ if (status & BT_H_BUSY)
+ strcat(buf, "H_BUSY ");
+ if (status & BT_OEM0)
+ strcat(buf, "OEM0 ");
+ if (status & BT_SMS_ATN)
+ strcat(buf, "SMS ");
+ if (status & BT_B2H_ATN)
+ strcat(buf, "B2H ");
+ if (status & BT_H2B_ATN)
+ strcat(buf, "H2B ");
strcat(buf, "]");
return buf;
}
-#define STATUS2TXT(buf) status2txt(status, buf)
+#define STATUS2TXT status2txt(status)
+
+/* called externally at insmod time, and internally on cleanup */
-/* This will be called from within this module on a hosed condition */
-#define FIRST_SEQ 0
static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
{
- bt->state = BT_STATE_IDLE;
- bt->last_state = BT_STATE_IDLE;
- bt->seq = FIRST_SEQ;
- bt->io = io;
- bt->write_count = 0;
- bt->read_count = 0;
- bt->error_retries = 0;
- bt->nonzero_status = 0;
- bt->truncated = 0;
- bt->timeout = BT_NORMAL_TIMEOUT;
+ memset(bt, 0, sizeof(struct si_sm_data));
+ if (bt->io != io) { /* external: one-time only things */
+ bt->io = io;
+ bt->seq = 0;
+ }
+ bt->state = BT_STATE_IDLE; /* start here */
+ bt->complete = BT_STATE_IDLE; /* end here */
+ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000;
+ bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
+ /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
return 3; /* We claim 3 bytes of space; ought to check SPMI table */
}
+/* Jam a completion code (probably an error) into a response */
+
+static void force_result(struct si_sm_data *bt, unsigned char completion_code)
+{
+ bt->read_data[0] = 4; /* # following bytes */
+ bt->read_data[1] = bt->write_data[1] | 4; /* Odd NetFn/LUN */
+ bt->read_data[2] = bt->write_data[2]; /* seq (ignored) */
+ bt->read_data[3] = bt->write_data[3]; /* Command */
+ bt->read_data[4] = completion_code;
+ bt->read_count = 5;
+}
+
+/* The upper state machine starts here */
+
static int bt_start_transaction(struct si_sm_data *bt,
unsigned char *data,
unsigned int size)
{
unsigned int i;
- if ((size < 2) || (size > (IPMI_MAX_MSG_LENGTH - 2)))
- return -1;
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > IPMI_MAX_MSG_LENGTH)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
- if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED))
- return -2;
+ if (bt->state == BT_STATE_LONG_BUSY)
+ return IPMI_NODE_BUSY_ERR;
+
+ if (bt->state != BT_STATE_IDLE)
+ return IPMI_NOT_IN_MY_STATE_ERR;
if (bt_debug & BT_DEBUG_MSG) {
- printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n");
- printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq);
+ printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
+ printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
for (i = 0; i < size; i ++)
- printk (" %02x", data[i]);
+ printk (" %02x", data[i]);
printk("\n");
}
bt->write_data[0] = size + 1; /* all data plus seq byte */
bt->write_data[1] = *data; /* NetFn/LUN */
- bt->write_data[2] = bt->seq;
+ bt->write_data[2] = bt->seq++;
memcpy(bt->write_data + 3, data + 1, size - 1);
bt->write_count = size + 2;
-
bt->error_retries = 0;
bt->nonzero_status = 0;
- bt->read_count = 0;
bt->truncated = 0;
bt->state = BT_STATE_XACTION_START;
- bt->last_state = BT_STATE_IDLE;
- bt->timeout = BT_NORMAL_TIMEOUT;
+ bt->timeout = bt->BT_CAP_req2rsp;
+ force_result(bt, IPMI_ERR_UNSPECIFIED);
return 0;
}
@@ -198,38 +248,30 @@ static int bt_start_transaction(struct si_sm_data *bt,
it calls this. Strip out the length and seq bytes. */
static int bt_get_result(struct si_sm_data *bt,
- unsigned char *data,
- unsigned int length)
+ unsigned char *data,
+ unsigned int length)
{
int i, msg_len;
msg_len = bt->read_count - 2; /* account for length & seq */
- /* Always NetFn, Cmd, cCode */
if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
- printk(KERN_DEBUG "BT results: bad msg_len = %d\n", msg_len);
- data[0] = bt->write_data[1] | 0x4; /* Kludge a response */
- data[1] = bt->write_data[3];
- data[2] = IPMI_ERR_UNSPECIFIED;
+ force_result(bt, IPMI_ERR_UNSPECIFIED);
msg_len = 3;
- } else {
- data[0] = bt->read_data[1];
- data[1] = bt->read_data[3];
- if (length < msg_len)
- bt->truncated = 1;
- if (bt->truncated) { /* can be set in read_all_bytes() */
- data[2] = IPMI_ERR_MSG_TRUNCATED;
- msg_len = 3;
- } else
- memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+ }
+ data[0] = bt->read_data[1];
+ data[1] = bt->read_data[3];
+ if (length < msg_len || bt->truncated) {
+ data[2] = IPMI_ERR_MSG_TRUNCATED;
+ msg_len = 3;
+ } else
+ memcpy(data + 2, bt->read_data + 4, msg_len - 2);
- if (bt_debug & BT_DEBUG_MSG) {
- printk (KERN_WARNING "BT: res (raw)");
- for (i = 0; i < msg_len; i++)
- printk(" %02x", data[i]);
- printk ("\n");
- }
+ if (bt_debug & BT_DEBUG_MSG) {
+ printk (KERN_WARNING "BT: result %d bytes:", msg_len);
+ for (i = 0; i < msg_len; i++)
+ printk(" %02x", data[i]);
+ printk ("\n");
}
- bt->read_count = 0; /* paranoia */
return msg_len;
}
@@ -238,22 +280,40 @@ static int bt_get_result(struct si_sm_data *bt,
static void reset_flags(struct si_sm_data *bt)
{
+ if (bt_debug)
+ printk(KERN_WARNING "IPMI BT: flag reset %s\n",
+ status2txt(BT_STATUS));
if (BT_STATUS & BT_H_BUSY)
- BT_CONTROL(BT_H_BUSY);
- if (BT_STATUS & BT_B_BUSY)
- BT_CONTROL(BT_B_BUSY);
- BT_CONTROL(BT_CLR_WR_PTR);
- BT_CONTROL(BT_SMS_ATN);
-
- if (BT_STATUS & BT_B2H_ATN) {
- int i;
- BT_CONTROL(BT_H_BUSY);
- BT_CONTROL(BT_B2H_ATN);
- BT_CONTROL(BT_CLR_RD_PTR);
- for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++)
- BMC2HOST;
- BT_CONTROL(BT_H_BUSY);
- }
+ BT_CONTROL(BT_H_BUSY); /* force clear */
+ BT_CONTROL(BT_CLR_WR_PTR); /* always reset */
+ BT_CONTROL(BT_SMS_ATN); /* always clear */
+ BT_INTMASK_W(BT_BMC_HWRST);
+}
+
+/* Get rid of an unwanted/stale response. This should only be needed for
+ BMCs that support multiple outstanding requests. */
+
+static void drain_BMC2HOST(struct si_sm_data *bt)
+{
+ int i, size;
+
+ if (!(BT_STATUS & BT_B2H_ATN)) /* Not signalling a response */
+ return;
+
+ BT_CONTROL(BT_H_BUSY); /* now set */
+ BT_CONTROL(BT_B2H_ATN); /* always clear */
+ BT_STATUS; /* pause */
+ BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */
+ BT_CONTROL(BT_CLR_RD_PTR); /* always reset */
+ if (bt_debug)
+ printk(KERN_WARNING "IPMI BT: stale response %s; ",
+ status2txt(BT_STATUS));
+ size = BMC2HOST;
+ for (i = 0; i < size ; i++)
+ BMC2HOST;
+ BT_CONTROL(BT_H_BUSY); /* now clear */
+ if (bt_debug)
+ printk("drained %d bytes\n", size + 1);
}
static inline void write_all_bytes(struct si_sm_data *bt)
@@ -261,201 +321,256 @@ static inline void write_all_bytes(struct si_sm_data *bt)
int i;
if (bt_debug & BT_DEBUG_MSG) {
- printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
+ printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
bt->write_count, bt->seq);
for (i = 0; i < bt->write_count; i++)
printk (" %02x", bt->write_data[i]);
printk ("\n");
}
for (i = 0; i < bt->write_count; i++)
- HOST2BMC(bt->write_data[i]);
+ HOST2BMC(bt->write_data[i]);
}
static inline int read_all_bytes(struct si_sm_data *bt)
{
unsigned char i;
+ /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+ Keep layout of first four bytes aligned with write_data[] */
+
bt->read_data[0] = BMC2HOST;
bt->read_count = bt->read_data[0];
- if (bt_debug & BT_DEBUG_MSG)
- printk(KERN_WARNING "BT: read %d bytes:", bt->read_count);
- /* minimum: length, NetFn, Seq, Cmd, cCode == 5 total, or 4 more
- following the length byte. */
if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
if (bt_debug & BT_DEBUG_MSG)
- printk("bad length %d\n", bt->read_count);
+ printk(KERN_WARNING "BT: bad raw rsp len=%d\n",
+ bt->read_count);
bt->truncated = 1;
return 1; /* let next XACTION START clean it up */
}
for (i = 1; i <= bt->read_count; i++)
- bt->read_data[i] = BMC2HOST;
- bt->read_count++; /* account for the length byte */
+ bt->read_data[i] = BMC2HOST;
+ bt->read_count++; /* Account internally for length byte */
if (bt_debug & BT_DEBUG_MSG) {
- for (i = 0; i < bt->read_count; i++)
+ int max = bt->read_count;
+
+ printk(KERN_WARNING "BT: got %d bytes seq=0x%02X",
+ max, bt->read_data[2]);
+ if (max > 16)
+ max = 16;
+ for (i = 0; i < max; i++)
printk (" %02x", bt->read_data[i]);
- printk ("\n");
+ printk ("%s\n", bt->read_count == max ? "" : " ...");
}
- if (bt->seq != bt->write_data[2]) /* idiot check */
- printk(KERN_DEBUG "BT: internal error: sequence mismatch\n");
- /* per the spec, the (NetFn, Seq, Cmd) tuples should match */
- if ((bt->read_data[3] == bt->write_data[3]) && /* Cmd */
- (bt->read_data[2] == bt->write_data[2]) && /* Sequence */
- ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+ /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
+ if ((bt->read_data[3] == bt->write_data[3]) &&
+ (bt->read_data[2] == bt->write_data[2]) &&
+ ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
return 1;
if (bt_debug & BT_DEBUG_MSG)
- printk(KERN_WARNING "BT: bad packet: "
+ printk(KERN_WARNING "IPMI BT: bad packet: "
"want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
- bt->write_data[1], bt->write_data[2], bt->write_data[3],
+ bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3],
bt->read_data[1], bt->read_data[2], bt->read_data[3]);
return 0;
}
-/* Modifies bt->state appropriately, need to get into the bt_event() switch */
+/* Restart if retries are left, or return an error completion code */
-static void error_recovery(struct si_sm_data *bt, char *reason)
+static enum si_sm_result error_recovery(struct si_sm_data *bt,
+ unsigned char status,
+ unsigned char cCode)
{
- unsigned char status;
- char buf[40]; /* For getting status */
+ char *reason;
- bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */
+ bt->timeout = bt->BT_CAP_req2rsp;
- status = BT_STATUS;
- printk(KERN_DEBUG "BT: %s in %s %s\n", reason, STATE2TXT,
- STATUS2TXT(buf));
+ switch (cCode) {
+ case IPMI_TIMEOUT_ERR:
+ reason = "timeout";
+ break;
+ default:
+ reason = "internal error";
+ break;
+ }
+
+ printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */
+ reason, STATE2TXT, STATUS2TXT);
+ /* Per the IPMI spec, retries are based on the sequence number
+ known only to this module, so manage a restart here. */
(bt->error_retries)++;
- if (bt->error_retries > BT_RETRY_LIMIT) {
- printk(KERN_DEBUG "retry limit (%d) exceeded\n", BT_RETRY_LIMIT);
- bt->state = BT_STATE_HOSED;
- if (!bt->nonzero_status)
- printk(KERN_ERR "IPMI: BT stuck, try power cycle\n");
- else if (bt->error_retries <= BT_RETRY_LIMIT + 1) {
- printk(KERN_DEBUG "IPMI: BT reset (takes 5 secs)\n");
- bt->state = BT_STATE_RESET1;
- }
- return;
+ if (bt->error_retries < bt->BT_CAP_retries) {
+ printk("%d retries left\n",
+ bt->BT_CAP_retries - bt->error_retries);
+ bt->state = BT_STATE_RESTART;
+ return SI_SM_CALL_WITHOUT_DELAY;
}
- /* Sometimes the BMC queues get in an "off-by-one" state...*/
- if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) {
- printk(KERN_DEBUG "retry B2H_WAIT\n");
- return;
+ printk("failed %d retries, sending error response\n",
+ bt->BT_CAP_retries);
+ if (!bt->nonzero_status)
+ printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
+
+ /* this is most likely during insmod */
+ else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
+ printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
+ bt->state = BT_STATE_RESET1;
+ return SI_SM_CALL_WITHOUT_DELAY;
}
- printk(KERN_DEBUG "restart command\n");
- bt->state = BT_STATE_RESTART;
+ /* Concoct a useful error message, set up the next state, and
+ be done with this sequence. */
+
+ bt->state = BT_STATE_IDLE;
+ switch (cCode) {
+ case IPMI_TIMEOUT_ERR:
+ if (status & BT_B_BUSY) {
+ cCode = IPMI_NODE_BUSY_ERR;
+ bt->state = BT_STATE_LONG_BUSY;
+ }
+ break;
+ default:
+ break;
+ }
+ force_result(bt, cCode);
+ return SI_SM_TRANSACTION_COMPLETE;
}
-/* Check the status and (possibly) advance the BT state machine. The
- default return is SI_SM_CALL_WITH_DELAY. */
+/* Check status and (usually) take action and change this state machine. */
static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
{
- unsigned char status;
- char buf[40]; /* For getting status */
+ unsigned char status, BT_CAP[8];
+ static enum bt_states last_printed = BT_STATE_PRINTME;
int i;
status = BT_STATUS;
bt->nonzero_status |= status;
-
- if ((bt_debug & BT_DEBUG_STATES) && (bt->state != bt->last_state))
+ if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
STATE2TXT,
- STATUS2TXT(buf),
+ STATUS2TXT,
bt->timeout,
time);
- bt->last_state = bt->state;
+ last_printed = bt->state;
+ }
- if (bt->state == BT_STATE_HOSED)
- return SI_SM_HOSED;
+ /* Commands that time out may still (eventually) provide a response.
+ This stale response will get in the way of a new response so remove
+ it if possible (hopefully during IDLE). Even if it comes up later
+ it will be rejected by its (now-forgotten) seq number. */
+
+ if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
+ drain_BMC2HOST(bt);
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ }
- if (bt->state != BT_STATE_IDLE) { /* do timeout test */
+ if ((bt->state != BT_STATE_IDLE) &&
+ (bt->state < BT_STATE_PRINTME)) { /* check timeout */
bt->timeout -= time;
- if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
- error_recovery(bt, "timed out");
- return SI_SM_CALL_WITHOUT_DELAY;
- }
+ if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
+ return error_recovery(bt,
+ status,
+ IPMI_TIMEOUT_ERR);
}
switch (bt->state) {
- case BT_STATE_IDLE: /* check for asynchronous messages */
+ /* Idle state first checks for asynchronous messages from another
+ channel, then does some opportunistic housekeeping. */
+
+ case BT_STATE_IDLE:
if (status & BT_SMS_ATN) {
BT_CONTROL(BT_SMS_ATN); /* clear it */
return SI_SM_ATTN;
}
- return SI_SM_IDLE;
- case BT_STATE_XACTION_START:
- if (status & BT_H_BUSY) {
+ if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
BT_CONTROL(BT_H_BUSY);
- break;
- }
- if (status & BT_B2H_ATN)
- break;
- bt->state = BT_STATE_WRITE_BYTES;
- return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
- case BT_STATE_WRITE_BYTES:
+ /* Read BT capabilities if it hasn't been done yet */
+ if (!bt->BT_CAP_outreqs)
+ BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
+ SI_SM_CALL_WITHOUT_DELAY);
+ bt->timeout = bt->BT_CAP_req2rsp;
+ BT_SI_SM_RETURN(SI_SM_IDLE);
+
+ case BT_STATE_XACTION_START:
if (status & (BT_B_BUSY | BT_H2B_ATN))
- break;
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ if (BT_STATUS & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* force clear */
+ BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_WRITE_BYTES:
+ if (status & BT_H_BUSY)
+ BT_CONTROL(BT_H_BUSY); /* clear */
BT_CONTROL(BT_CLR_WR_PTR);
write_all_bytes(bt);
- BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */
- bt->state = BT_STATE_WRITE_CONSUME;
- return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */
-
- case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */
- if (status & (BT_H2B_ATN | BT_B_BUSY))
- break;
- bt->state = BT_STATE_B2H_WAIT;
- /* fall through with status */
-
- /* Stay in BT_STATE_B2H_WAIT until a packet matches. However, spinning
- hard here, constantly reading status, seems to hold off the
- generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */
-
- case BT_STATE_B2H_WAIT:
- if (!(status & BT_B2H_ATN))
- break;
-
- /* Assume ordered, uncached writes: no need to wait */
- if (!(status & BT_H_BUSY))
- BT_CONTROL(BT_H_BUSY); /* set */
- BT_CONTROL(BT_B2H_ATN); /* clear it, ACK to the BMC */
- BT_CONTROL(BT_CLR_RD_PTR); /* reset the queue */
- i = read_all_bytes(bt);
- BT_CONTROL(BT_H_BUSY); /* clear */
- if (!i) /* Try this state again */
- break;
- bt->state = BT_STATE_READ_END;
- return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
-
- case BT_STATE_READ_END:
-
- /* I could wait on BT_H_BUSY to go clear for a truly clean
- exit. However, this is already done in XACTION_START
- and the (possible) extra loop/status/possible wait affects
- performance. So, as long as it works, just ignore H_BUSY */
-
-#ifdef MAKE_THIS_TRUE_IF_NECESSARY
+ BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */
+ BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
+ SI_SM_CALL_WITHOUT_DELAY);
- if (status & BT_H_BUSY)
- break;
-#endif
- bt->seq++;
- bt->state = BT_STATE_IDLE;
- return SI_SM_TRANSACTION_COMPLETE;
+ case BT_STATE_WRITE_CONSUME:
+ if (status & (BT_B_BUSY | BT_H2B_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ /* Spinning hard can suppress B2H_ATN and force a timeout */
+
+ case BT_STATE_READ_WAIT:
+ if (!(status & BT_B2H_ATN))
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ BT_CONTROL(BT_H_BUSY); /* set */
+
+ /* Uncached, ordered writes should just proceeed serially but
+ some BMCs don't clear B2H_ATN with one hit. Fast-path a
+ workaround without too much penalty to the general case. */
+
+ BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */
+ BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_CLEAR_B2H:
+ if (status & BT_B2H_ATN) { /* keep hitting it */
+ BT_CONTROL(BT_B2H_ATN);
+ BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+ }
+ BT_STATE_CHANGE(BT_STATE_READ_BYTES,
+ SI_SM_CALL_WITHOUT_DELAY);
+
+ case BT_STATE_READ_BYTES:
+ if (!(status & BT_H_BUSY)) /* check in case of retry */
+ BT_CONTROL(BT_H_BUSY);
+ BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */
+ i = read_all_bytes(bt); /* true == packet seq match */
+ BT_CONTROL(BT_H_BUSY); /* NOW clear */
+ if (!i) /* Not my message */
+ BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+ SI_SM_CALL_WITHOUT_DELAY);
+ bt->state = bt->complete;
+ return bt->state == BT_STATE_IDLE ? /* where to next? */
+ SI_SM_TRANSACTION_COMPLETE : /* normal */
+ SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */
+
+ case BT_STATE_LONG_BUSY: /* For example: after FW update */
+ if (!(status & BT_B_BUSY)) {
+ reset_flags(bt); /* next state is now IDLE */
+ bt_init_data(bt, bt->io);
+ }
+ return SI_SM_CALL_WITH_DELAY; /* No repeat printing */
case BT_STATE_RESET1:
- reset_flags(bt);
- bt->timeout = BT_RESET_DELAY;
- bt->state = BT_STATE_RESET2;
- break;
+ reset_flags(bt);
+ drain_BMC2HOST(bt);
+ BT_STATE_CHANGE(BT_STATE_RESET2,
+ SI_SM_CALL_WITH_DELAY);
case BT_STATE_RESET2: /* Send a soft reset */
BT_CONTROL(BT_CLR_WR_PTR);
@@ -464,29 +579,59 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
HOST2BMC(42); /* Sequence number */
HOST2BMC(3); /* Cmd == Soft reset */
BT_CONTROL(BT_H2B_ATN);
- bt->state = BT_STATE_RESET3;
- break;
+ bt->timeout = BT_RESET_DELAY * 1000000;
+ BT_STATE_CHANGE(BT_STATE_RESET3,
+ SI_SM_CALL_WITH_DELAY);
- case BT_STATE_RESET3:
+ case BT_STATE_RESET3: /* Hold off everything for a bit */
if (bt->timeout > 0)
- return SI_SM_CALL_WITH_DELAY;
- bt->state = BT_STATE_RESTART; /* printk in debug modes */
- break;
+ return SI_SM_CALL_WITH_DELAY;
+ drain_BMC2HOST(bt);
+ BT_STATE_CHANGE(BT_STATE_RESTART,
+ SI_SM_CALL_WITH_DELAY);
- case BT_STATE_RESTART: /* don't reset retries! */
- reset_flags(bt);
- bt->write_data[2] = ++bt->seq;
+ case BT_STATE_RESTART: /* don't reset retries or seq! */
bt->read_count = 0;
bt->nonzero_status = 0;
- bt->timeout = BT_NORMAL_TIMEOUT;
- bt->state = BT_STATE_XACTION_START;
- break;
-
- default: /* HOSED is supposed to be caught much earlier */
- error_recovery(bt, "internal logic error");
- break;
- }
- return SI_SM_CALL_WITH_DELAY;
+ bt->timeout = bt->BT_CAP_req2rsp;
+ BT_STATE_CHANGE(BT_STATE_XACTION_START,
+ SI_SM_CALL_WITH_DELAY);
+
+ /* Get BT Capabilities, using timing of upper level state machine.
+ Set outreqs to prevent infinite loop on timeout. */
+ case BT_STATE_CAPABILITIES_BEGIN:
+ bt->BT_CAP_outreqs = 1;
+ {
+ unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+ bt->state = BT_STATE_IDLE;
+ bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+ }
+ bt->complete = BT_STATE_CAPABILITIES_END;
+ BT_STATE_CHANGE(BT_STATE_XACTION_START,
+ SI_SM_CALL_WITH_DELAY);
+
+ case BT_STATE_CAPABILITIES_END:
+ i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+ bt_init_data(bt, bt->io);
+ if ((i == 8) && !BT_CAP[2]) {
+ bt->BT_CAP_outreqs = BT_CAP[3];
+ bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000;
+ bt->BT_CAP_retries = BT_CAP[7];
+ } else
+ printk(KERN_WARNING "IPMI BT: using default values\n");
+ if (!bt->BT_CAP_outreqs)
+ bt->BT_CAP_outreqs = 1;
+ printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
+ bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries);
+ bt->timeout = bt->BT_CAP_req2rsp;
+ return SI_SM_CALL_WITHOUT_DELAY;
+
+ default: /* should never occur */
+ return error_recovery(bt,
+ status,
+ IPMI_ERR_UNSPECIFIED);
+ }
+ return SI_SM_CALL_WITH_DELAY;
}
static int bt_detect(struct si_sm_data *bt)
@@ -497,7 +642,7 @@ static int bt_detect(struct si_sm_data *bt)
test that first. The calling routine uses negative logic. */
if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
- return 1;
+ return 1;
reset_flags(bt);
return 0;
}
@@ -513,11 +658,11 @@ static int bt_size(void)
struct si_sm_handlers bt_smi_handlers =
{
- .init_data = bt_init_data,
- .start_transaction = bt_start_transaction,
- .get_result = bt_get_result,
- .event = bt_event,
- .detect = bt_detect,
- .cleanup = bt_cleanup,
- .size = bt_size,
+ .init_data = bt_init_data,
+ .start_transaction = bt_start_transaction,
+ .get_result = bt_get_result,
+ .event = bt_event,
+ .detect = bt_detect,
+ .cleanup = bt_cleanup,
+ .size = bt_size,
};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 81fcf0ce21d..375d3378eec 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -596,6 +596,31 @@ static int ipmi_ioctl(struct inode *inode,
rv = 0;
break;
}
+
+ case IPMICTL_GET_MAINTENANCE_MODE_CMD:
+ {
+ int mode;
+
+ mode = ipmi_get_maintenance_mode(priv->user);
+ if (copy_to_user(arg, &mode, sizeof(mode))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = 0;
+ break;
+ }
+
+ case IPMICTL_SET_MAINTENANCE_MODE_CMD:
+ {
+ int mode;
+
+ if (copy_from_user(&mode, arg, sizeof(mode))) {
+ rv = -EFAULT;
+ break;
+ }
+ rv = ipmi_set_maintenance_mode(priv->user, mode);
+ break;
+ }
}
return rv;
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 2062675f9e9..c1b8228cb7b 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -93,8 +93,8 @@ enum kcs_states {
state machine. */
};
-#define MAX_KCS_READ_SIZE 80
-#define MAX_KCS_WRITE_SIZE 80
+#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
+#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
/* Timeouts in microseconds. */
#define IBF_RETRY_TIMEOUT 1000000
@@ -261,12 +261,14 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
{
unsigned int i;
- if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) {
- return -1;
- }
- if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
- return -2;
- }
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > MAX_KCS_WRITE_SIZE)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED))
+ return IPMI_NOT_IN_MY_STATE_ERR;
+
if (kcs_debug & KCS_DEBUG_MSG) {
printk(KERN_DEBUG "start_kcs_transaction -");
for (i = 0; i < size; i ++) {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c47add8e47d..5703ee28e1c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -48,7 +48,7 @@
#define PFX "IPMI message handler: "
-#define IPMI_DRIVER_VERSION "39.0"
+#define IPMI_DRIVER_VERSION "39.1"
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
@@ -59,6 +59,9 @@ static int initialized = 0;
static struct proc_dir_entry *proc_ipmi_root = NULL;
#endif /* CONFIG_PROC_FS */
+/* Remain in auto-maintenance mode for this amount of time (in ms). */
+#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
+
#define MAX_EVENTS_IN_QUEUE 25
/* Don't let a message sit in a queue forever, always time it with at lest
@@ -193,17 +196,28 @@ struct ipmi_smi
struct kref refcount;
+ /* Used for a list of interfaces. */
+ struct list_head link;
+
/* The list of upper layers that are using me. seq_lock
* protects this. */
struct list_head users;
+ /* Information to supply to users. */
+ unsigned char ipmi_version_major;
+ unsigned char ipmi_version_minor;
+
/* Used for wake ups at startup. */
wait_queue_head_t waitq;
struct bmc_device *bmc;
char *my_dev_name;
+ char *sysfs_name;
- /* This is the lower-layer's sender routine. */
+ /* This is the lower-layer's sender routine. Note that you
+ * must either be holding the ipmi_interfaces_mutex or be in
+ * an umpreemptible region to use this. You must fetch the
+ * value into a local variable and make sure it is not NULL. */
struct ipmi_smi_handlers *handlers;
void *send_info;
@@ -242,6 +256,7 @@ struct ipmi_smi
spinlock_t events_lock; /* For dealing with event stuff. */
struct list_head waiting_events;
unsigned int waiting_events_count; /* How many events in queue? */
+ int delivering_events;
/* The event receiver for my BMC, only really used at panic
shutdown as a place to store this. */
@@ -250,6 +265,12 @@ struct ipmi_smi
unsigned char local_sel_device;
unsigned char local_event_generator;
+ /* For handling of maintenance mode. */
+ int maintenance_mode;
+ int maintenance_mode_enable;
+ int auto_maintenance_timeout;
+ spinlock_t maintenance_mode_lock; /* Used in a timer... */
+
/* A cheap hack, if this is non-null and a message to an
interface comes in with a NULL user, call this routine with
it. Note that the message will still be freed by the
@@ -338,13 +359,6 @@ struct ipmi_smi
};
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
-/* Used to mark an interface entry that cannot be used but is not a
- * free entry, either, primarily used at creation and deletion time so
- * a slot doesn't get reused too quickly. */
-#define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
-#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
- || (i == IPMI_INVALID_INTERFACE_ENTRY))
-
/**
* The driver model view of the IPMI messaging driver.
*/
@@ -354,16 +368,13 @@ static struct device_driver ipmidriver = {
};
static DEFINE_MUTEX(ipmidriver_mutex);
-#define MAX_IPMI_INTERFACES 4
-static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
-
-/* Directly protects the ipmi_interfaces data structure. */
-static DEFINE_SPINLOCK(interfaces_lock);
+static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
+static DEFINE_MUTEX(ipmi_interfaces_mutex);
/* List of watchers that want to know when smi's are added and
deleted. */
static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
-static DECLARE_RWSEM(smi_watchers_sem);
+static DEFINE_MUTEX(smi_watchers_mutex);
static void free_recv_msg_list(struct list_head *q)
@@ -423,48 +434,84 @@ static void intf_free(struct kref *ref)
kfree(intf);
}
+struct watcher_entry {
+ int intf_num;
+ ipmi_smi_t intf;
+ struct list_head link;
+};
+
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
- int i;
- unsigned long flags;
+ ipmi_smi_t intf;
+ struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
+ struct watcher_entry *e, *e2;
+
+ mutex_lock(&smi_watchers_mutex);
+
+ mutex_lock(&ipmi_interfaces_mutex);
- down_write(&smi_watchers_sem);
- list_add(&(watcher->link), &smi_watchers);
- up_write(&smi_watchers_sem);
- spin_lock_irqsave(&interfaces_lock, flags);
- for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
- ipmi_smi_t intf = ipmi_interfaces[i];
- if (IPMI_INVALID_INTERFACE(intf))
+ /* Build a list of things to deliver. */
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == -1)
continue;
- spin_unlock_irqrestore(&interfaces_lock, flags);
- watcher->new_smi(i, intf->si_dev);
- spin_lock_irqsave(&interfaces_lock, flags);
+ e = kmalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto out_err;
+ kref_get(&intf->refcount);
+ e->intf = intf;
+ e->intf_num = intf->intf_num;
+ list_add_tail(&e->link, &to_deliver);
}
- spin_unlock_irqrestore(&interfaces_lock, flags);
+
+ /* We will succeed, so add it to the list. */
+ list_add(&watcher->link, &smi_watchers);
+
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ list_for_each_entry_safe(e, e2, &to_deliver, link) {
+ list_del(&e->link);
+ watcher->new_smi(e->intf_num, e->intf->si_dev);
+ kref_put(&e->intf->refcount, intf_free);
+ kfree(e);
+ }
+
+ mutex_unlock(&smi_watchers_mutex);
+
return 0;
+
+ out_err:
+ mutex_unlock(&ipmi_interfaces_mutex);
+ mutex_unlock(&smi_watchers_mutex);
+ list_for_each_entry_safe(e, e2, &to_deliver, link) {
+ list_del(&e->link);
+ kref_put(&e->intf->refcount, intf_free);
+ kfree(e);
+ }
+ return -ENOMEM;
}
int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
{
- down_write(&smi_watchers_sem);
+ mutex_lock(&smi_watchers_mutex);
list_del(&(watcher->link));
- up_write(&smi_watchers_sem);
+ mutex_unlock(&smi_watchers_mutex);
return 0;
}
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
static void
call_smi_watchers(int i, struct device *dev)
{
struct ipmi_smi_watcher *w;
- down_read(&smi_watchers_sem);
list_for_each_entry(w, &smi_watchers, link) {
if (try_module_get(w->owner)) {
w->new_smi(i, dev);
module_put(w->owner);
}
}
- up_read(&smi_watchers_sem);
}
static int
@@ -590,6 +637,17 @@ static void deliver_response(struct ipmi_recv_msg *msg)
}
}
+static void
+deliver_err_response(struct ipmi_recv_msg *msg, int err)
+{
+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ msg->msg_data[0] = err;
+ msg->msg.netfn |= 1; /* Convert to a response. */
+ msg->msg.data_len = 1;
+ msg->msg.data = msg->msg_data;
+ deliver_response(msg);
+}
+
/* Find the next sequence number not being used and add the given
message with the given timeout to the sequence table. This must be
called with the interface's seq_lock held. */
@@ -727,14 +785,8 @@ static int intf_err_seq(ipmi_smi_t intf,
}
spin_unlock_irqrestore(&(intf->seq_lock), flags);
- if (msg) {
- msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
- msg->msg_data[0] = err;
- msg->msg.netfn |= 1; /* Convert to a response. */
- msg->msg.data_len = 1;
- msg->msg.data = msg->msg_data;
- deliver_response(msg);
- }
+ if (msg)
+ deliver_err_response(msg, err);
return rv;
}
@@ -776,17 +828,18 @@ int ipmi_create_user(unsigned int if_num,
if (!new_user)
return -ENOMEM;
- spin_lock_irqsave(&interfaces_lock, flags);
- intf = ipmi_interfaces[if_num];
- if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
- spin_unlock_irqrestore(&interfaces_lock, flags);
- rv = -EINVAL;
- goto out_kfree;
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num)
+ goto found;
}
+ /* Not found, return an error */
+ rv = -EINVAL;
+ goto out_kfree;
+ found:
/* Note that each existing user holds a refcount to the interface. */
kref_get(&intf->refcount);
- spin_unlock_irqrestore(&interfaces_lock, flags);
kref_init(&new_user->refcount);
new_user->handler = handler;
@@ -807,6 +860,10 @@ int ipmi_create_user(unsigned int if_num,
}
}
+ /* Hold the lock so intf->handlers is guaranteed to be good
+ * until now */
+ mutex_unlock(&ipmi_interfaces_mutex);
+
new_user->valid = 1;
spin_lock_irqsave(&intf->seq_lock, flags);
list_add_rcu(&new_user->link, &intf->users);
@@ -817,6 +874,7 @@ int ipmi_create_user(unsigned int if_num,
out_kref:
kref_put(&intf->refcount, intf_free);
out_kfree:
+ mutex_unlock(&ipmi_interfaces_mutex);
kfree(new_user);
return rv;
}
@@ -846,6 +904,7 @@ int ipmi_destroy_user(ipmi_user_t user)
&& (intf->seq_table[i].recv_msg->user == user))
{
intf->seq_table[i].inuse = 0;
+ ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
spin_unlock_irqrestore(&intf->seq_lock, flags);
@@ -872,9 +931,13 @@ int ipmi_destroy_user(ipmi_user_t user)
kfree(rcvr);
}
- module_put(intf->handlers->owner);
- if (intf->handlers->dec_usecount)
- intf->handlers->dec_usecount(intf->send_info);
+ mutex_lock(&ipmi_interfaces_mutex);
+ if (intf->handlers) {
+ module_put(intf->handlers->owner);
+ if (intf->handlers->dec_usecount)
+ intf->handlers->dec_usecount(intf->send_info);
+ }
+ mutex_unlock(&ipmi_interfaces_mutex);
kref_put(&intf->refcount, intf_free);
@@ -887,8 +950,8 @@ void ipmi_get_version(ipmi_user_t user,
unsigned char *major,
unsigned char *minor)
{
- *major = ipmi_version_major(&user->intf->bmc->id);
- *minor = ipmi_version_minor(&user->intf->bmc->id);
+ *major = user->intf->ipmi_version_major;
+ *minor = user->intf->ipmi_version_minor;
}
int ipmi_set_my_address(ipmi_user_t user,
@@ -931,6 +994,65 @@ int ipmi_get_my_LUN(ipmi_user_t user,
return 0;
}
+int ipmi_get_maintenance_mode(ipmi_user_t user)
+{
+ int mode;
+ unsigned long flags;
+
+ spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
+ mode = user->intf->maintenance_mode;
+ spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
+
+ return mode;
+}
+EXPORT_SYMBOL(ipmi_get_maintenance_mode);
+
+static void maintenance_mode_update(ipmi_smi_t intf)
+{
+ if (intf->handlers->set_maintenance_mode)
+ intf->handlers->set_maintenance_mode(
+ intf->send_info, intf->maintenance_mode_enable);
+}
+
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
+{
+ int rv = 0;
+ unsigned long flags;
+ ipmi_smi_t intf = user->intf;
+
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ if (intf->maintenance_mode != mode) {
+ switch (mode) {
+ case IPMI_MAINTENANCE_MODE_AUTO:
+ intf->maintenance_mode = mode;
+ intf->maintenance_mode_enable
+ = (intf->auto_maintenance_timeout > 0);
+ break;
+
+ case IPMI_MAINTENANCE_MODE_OFF:
+ intf->maintenance_mode = mode;
+ intf->maintenance_mode_enable = 0;
+ break;
+
+ case IPMI_MAINTENANCE_MODE_ON:
+ intf->maintenance_mode = mode;
+ intf->maintenance_mode_enable = 1;
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto out_unlock;
+ }
+
+ maintenance_mode_update(intf);
+ }
+ out_unlock:
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_set_maintenance_mode);
+
int ipmi_set_gets_events(ipmi_user_t user, int val)
{
unsigned long flags;
@@ -943,20 +1065,33 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
spin_lock_irqsave(&intf->events_lock, flags);
user->gets_events = val;
- if (val) {
- /* Deliver any queued events. */
+ if (intf->delivering_events)
+ /*
+ * Another thread is delivering events for this, so
+ * let it handle any new events.
+ */
+ goto out;
+
+ /* Deliver any queued events. */
+ while (user->gets_events && !list_empty(&intf->waiting_events)) {
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
list_move_tail(&msg->link, &msgs);
intf->waiting_events_count = 0;
- }
- /* Hold the events lock while doing this to preserve order. */
- list_for_each_entry_safe(msg, msg2, &msgs, link) {
- msg->user = user;
- kref_get(&user->refcount);
- deliver_response(msg);
+ intf->delivering_events = 1;
+ spin_unlock_irqrestore(&intf->events_lock, flags);
+
+ list_for_each_entry_safe(msg, msg2, &msgs, link) {
+ msg->user = user;
+ kref_get(&user->refcount);
+ deliver_response(msg);
+ }
+
+ spin_lock_irqsave(&intf->events_lock, flags);
+ intf->delivering_events = 0;
}
+ out:
spin_unlock_irqrestore(&intf->events_lock, flags);
return 0;
@@ -1067,7 +1202,8 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
{
ipmi_smi_t intf = user->intf;
- intf->handlers->set_run_to_completion(intf->send_info, val);
+ if (intf->handlers)
+ intf->handlers->set_run_to_completion(intf->send_info, val);
}
static unsigned char
@@ -1178,10 +1314,11 @@ static int i_ipmi_request(ipmi_user_t user,
int retries,
unsigned int retry_time_ms)
{
- int rv = 0;
- struct ipmi_smi_msg *smi_msg;
- struct ipmi_recv_msg *recv_msg;
- unsigned long flags;
+ int rv = 0;
+ struct ipmi_smi_msg *smi_msg;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned long flags;
+ struct ipmi_smi_handlers *handlers;
if (supplied_recv) {
@@ -1204,6 +1341,13 @@ static int i_ipmi_request(ipmi_user_t user,
}
}
+ rcu_read_lock();
+ handlers = intf->handlers;
+ if (!handlers) {
+ rv = -ENODEV;
+ goto out_err;
+ }
+
recv_msg->user = user;
if (user)
kref_get(&user->refcount);
@@ -1246,6 +1390,24 @@ static int i_ipmi_request(ipmi_user_t user,
goto out_err;
}
+ if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
+ && ((msg->cmd == IPMI_COLD_RESET_CMD)
+ || (msg->cmd == IPMI_WARM_RESET_CMD)))
+ || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
+ {
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ intf->auto_maintenance_timeout
+ = IPMI_MAINTENANCE_MODE_TIMEOUT;
+ if (!intf->maintenance_mode
+ && !intf->maintenance_mode_enable)
+ {
+ intf->maintenance_mode_enable = 1;
+ maintenance_mode_update(intf);
+ }
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
+ }
+
if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
spin_lock_irqsave(&intf->counter_lock, flags);
intf->sent_invalid_commands++;
@@ -1520,11 +1682,14 @@ static int i_ipmi_request(ipmi_user_t user,
printk("\n");
}
#endif
- intf->handlers->sender(intf->send_info, smi_msg, priority);
+
+ handlers->sender(intf->send_info, smi_msg, priority);
+ rcu_read_unlock();
return 0;
out_err:
+ rcu_read_unlock();
ipmi_free_smi_msg(smi_msg);
ipmi_free_recv_msg(recv_msg);
return rv;
@@ -1604,6 +1769,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
-1, 0);
}
+#ifdef CONFIG_PROC_FS
static int ipmb_file_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
@@ -1692,6 +1858,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
return (out - ((char *) page));
}
+#endif /* CONFIG_PROC_FS */
int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
read_proc_t *read_proc, write_proc_t *write_proc,
@@ -1817,13 +1984,12 @@ static int __find_bmc_prod_dev_id(struct device *dev, void *data)
struct bmc_device *bmc = dev_get_drvdata(dev);
return (bmc->id.product_id == id->product_id
- && bmc->id.product_id == id->product_id
&& bmc->id.device_id == id->device_id);
}
static struct bmc_device *ipmi_find_bmc_prod_dev_id(
struct device_driver *drv,
- unsigned char product_id, unsigned char device_id)
+ unsigned int product_id, unsigned char device_id)
{
struct prod_dev_id id = {
.product_id = product_id,
@@ -1940,6 +2106,9 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
static void remove_files(struct bmc_device *bmc)
{
+ if (!bmc->dev)
+ return;
+
device_remove_file(&bmc->dev->dev,
&bmc->device_id_attr);
device_remove_file(&bmc->dev->dev,
@@ -1973,7 +2142,8 @@ cleanup_bmc_device(struct kref *ref)
bmc = container_of(ref, struct bmc_device, refcount);
remove_files(bmc);
- platform_device_unregister(bmc->dev);
+ if (bmc->dev)
+ platform_device_unregister(bmc->dev);
kfree(bmc);
}
@@ -1981,7 +2151,11 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
{
struct bmc_device *bmc = intf->bmc;
- sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+ if (intf->sysfs_name) {
+ sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
+ kfree(intf->sysfs_name);
+ intf->sysfs_name = NULL;
+ }
if (intf->my_dev_name) {
sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
kfree(intf->my_dev_name);
@@ -1990,6 +2164,7 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
mutex_lock(&ipmidriver_mutex);
kref_put(&bmc->refcount, cleanup_bmc_device);
+ intf->bmc = NULL;
mutex_unlock(&ipmidriver_mutex);
}
@@ -1997,6 +2172,56 @@ static int create_files(struct bmc_device *bmc)
{
int err;
+ bmc->device_id_attr.attr.name = "device_id";
+ bmc->device_id_attr.attr.owner = THIS_MODULE;
+ bmc->device_id_attr.attr.mode = S_IRUGO;
+ bmc->device_id_attr.show = device_id_show;
+
+ bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
+ bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
+ bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
+ bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
+
+ bmc->revision_attr.attr.name = "revision";
+ bmc->revision_attr.attr.owner = THIS_MODULE;
+ bmc->revision_attr.attr.mode = S_IRUGO;
+ bmc->revision_attr.show = revision_show;
+
+ bmc->firmware_rev_attr.attr.name = "firmware_revision";
+ bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
+ bmc->firmware_rev_attr.attr.mode = S_IRUGO;
+ bmc->firmware_rev_attr.show = firmware_rev_show;
+
+ bmc->version_attr.attr.name = "ipmi_version";
+ bmc->version_attr.attr.owner = THIS_MODULE;
+ bmc->version_attr.attr.mode = S_IRUGO;
+ bmc->version_attr.show = ipmi_version_show;
+
+ bmc->add_dev_support_attr.attr.name = "additional_device_support";
+ bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
+ bmc->add_dev_support_attr.attr.mode = S_IRUGO;
+ bmc->add_dev_support_attr.show = add_dev_support_show;
+
+ bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
+ bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
+ bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
+ bmc->manufacturer_id_attr.show = manufacturer_id_show;
+
+ bmc->product_id_attr.attr.name = "product_id";
+ bmc->product_id_attr.attr.owner = THIS_MODULE;
+ bmc->product_id_attr.attr.mode = S_IRUGO;
+ bmc->product_id_attr.show = product_id_show;
+
+ bmc->guid_attr.attr.name = "guid";
+ bmc->guid_attr.attr.owner = THIS_MODULE;
+ bmc->guid_attr.attr.mode = S_IRUGO;
+ bmc->guid_attr.show = guid_show;
+
+ bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
+ bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
+ bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
+ bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
+
err = device_create_file(&bmc->dev->dev,
&bmc->device_id_attr);
if (err) goto out;
@@ -2066,7 +2291,8 @@ out:
return err;
}
-static int ipmi_bmc_register(ipmi_smi_t intf)
+static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
+ const char *sysfs_name)
{
int rv;
struct bmc_device *bmc = intf->bmc;
@@ -2106,9 +2332,39 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
bmc->id.product_id,
bmc->id.device_id);
} else {
- bmc->dev = platform_device_alloc("ipmi_bmc",
- bmc->id.device_id);
+ char name[14];
+ unsigned char orig_dev_id = bmc->id.device_id;
+ int warn_printed = 0;
+
+ snprintf(name, sizeof(name),
+ "ipmi_bmc.%4.4x", bmc->id.product_id);
+
+ while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
+ bmc->id.product_id,
+ bmc->id.device_id))
+ {
+ if (!warn_printed) {
+ printk(KERN_WARNING PFX
+ "This machine has two different BMCs"
+ " with the same product id and device"
+ " id. This is an error in the"
+ " firmware, but incrementing the"
+ " device id to work around the problem."
+ " Prod ID = 0x%x, Dev ID = 0x%x\n",
+ bmc->id.product_id, bmc->id.device_id);
+ warn_printed = 1;
+ }
+ bmc->id.device_id++; /* Wraps at 255 */
+ if (bmc->id.device_id == orig_dev_id) {
+ printk(KERN_ERR PFX
+ "Out of device ids!\n");
+ break;
+ }
+ }
+
+ bmc->dev = platform_device_alloc(name, bmc->id.device_id);
if (!bmc->dev) {
+ mutex_unlock(&ipmidriver_mutex);
printk(KERN_ERR
"ipmi_msghandler:"
" Unable to allocate platform device\n");
@@ -2121,6 +2377,8 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
rv = platform_device_add(bmc->dev);
mutex_unlock(&ipmidriver_mutex);
if (rv) {
+ platform_device_put(bmc->dev);
+ bmc->dev = NULL;
printk(KERN_ERR
"ipmi_msghandler:"
" Unable to register bmc device: %d\n",
@@ -2130,57 +2388,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
return rv;
}
- bmc->device_id_attr.attr.name = "device_id";
- bmc->device_id_attr.attr.owner = THIS_MODULE;
- bmc->device_id_attr.attr.mode = S_IRUGO;
- bmc->device_id_attr.show = device_id_show;
-
- bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
- bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
- bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
- bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
-
- bmc->revision_attr.attr.name = "revision";
- bmc->revision_attr.attr.owner = THIS_MODULE;
- bmc->revision_attr.attr.mode = S_IRUGO;
- bmc->revision_attr.show = revision_show;
-
- bmc->firmware_rev_attr.attr.name = "firmware_revision";
- bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
- bmc->firmware_rev_attr.attr.mode = S_IRUGO;
- bmc->firmware_rev_attr.show = firmware_rev_show;
-
- bmc->version_attr.attr.name = "ipmi_version";
- bmc->version_attr.attr.owner = THIS_MODULE;
- bmc->version_attr.attr.mode = S_IRUGO;
- bmc->version_attr.show = ipmi_version_show;
-
- bmc->add_dev_support_attr.attr.name
- = "additional_device_support";
- bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
- bmc->add_dev_support_attr.attr.mode = S_IRUGO;
- bmc->add_dev_support_attr.show = add_dev_support_show;
-
- bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
- bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
- bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
- bmc->manufacturer_id_attr.show = manufacturer_id_show;
-
- bmc->product_id_attr.attr.name = "product_id";
- bmc->product_id_attr.attr.owner = THIS_MODULE;
- bmc->product_id_attr.attr.mode = S_IRUGO;
- bmc->product_id_attr.show = product_id_show;
-
- bmc->guid_attr.attr.name = "guid";
- bmc->guid_attr.attr.owner = THIS_MODULE;
- bmc->guid_attr.attr.mode = S_IRUGO;
- bmc->guid_attr.show = guid_show;
-
- bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
- bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
- bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
- bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
-
rv = create_files(bmc);
if (rv) {
mutex_lock(&ipmidriver_mutex);
@@ -2202,29 +2409,44 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
* create symlink from system interface device to bmc device
* and back.
*/
+ intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
+ if (!intf->sysfs_name) {
+ rv = -ENOMEM;
+ printk(KERN_ERR
+ "ipmi_msghandler: allocate link to BMC: %d\n",
+ rv);
+ goto out_err;
+ }
+
rv = sysfs_create_link(&intf->si_dev->kobj,
- &bmc->dev->dev.kobj, "bmc");
+ &bmc->dev->dev.kobj, intf->sysfs_name);
if (rv) {
+ kfree(intf->sysfs_name);
+ intf->sysfs_name = NULL;
printk(KERN_ERR
"ipmi_msghandler: Unable to create bmc symlink: %d\n",
rv);
goto out_err;
}
- size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
+ size = snprintf(dummy, 0, "ipmi%d", ifnum);
intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
if (!intf->my_dev_name) {
+ kfree(intf->sysfs_name);
+ intf->sysfs_name = NULL;
rv = -ENOMEM;
printk(KERN_ERR
"ipmi_msghandler: allocate link from BMC: %d\n",
rv);
goto out_err;
}
- snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
+ snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
intf->my_dev_name);
if (rv) {
+ kfree(intf->sysfs_name);
+ intf->sysfs_name = NULL;
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
printk(KERN_ERR
@@ -2409,17 +2631,14 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *si_dev,
+ const char *sysfs_name,
unsigned char slave_addr)
{
int i, j;
int rv;
ipmi_smi_t intf;
- unsigned long flags;
- int version_major;
- int version_minor;
-
- version_major = ipmi_version_major(device_id);
- version_minor = ipmi_version_minor(device_id);
+ ipmi_smi_t tintf;
+ struct list_head *link;
/* Make sure the driver is actually initialized, this handles
problems with initialization order. */
@@ -2437,12 +2656,16 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
if (!intf)
return -ENOMEM;
memset(intf, 0, sizeof(*intf));
+
+ intf->ipmi_version_major = ipmi_version_major(device_id);
+ intf->ipmi_version_minor = ipmi_version_minor(device_id);
+
intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
if (!intf->bmc) {
kfree(intf);
return -ENOMEM;
}
- intf->intf_num = -1;
+ intf->intf_num = -1; /* Mark it invalid for now. */
kref_init(&intf->refcount);
intf->bmc->id = *device_id;
intf->si_dev = si_dev;
@@ -2470,26 +2693,30 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0;
mutex_init(&intf->cmd_rcvrs_mutex);
+ spin_lock_init(&intf->maintenance_mode_lock);
INIT_LIST_HEAD(&intf->cmd_rcvrs);
init_waitqueue_head(&intf->waitq);
spin_lock_init(&intf->counter_lock);
intf->proc_dir = NULL;
- rv = -ENOMEM;
- spin_lock_irqsave(&interfaces_lock, flags);
- for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
- if (ipmi_interfaces[i] == NULL) {
- intf->intf_num = i;
- /* Reserve the entry till we are done. */
- ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
- rv = 0;
+ mutex_lock(&smi_watchers_mutex);
+ mutex_lock(&ipmi_interfaces_mutex);
+ /* Look for a hole in the numbers. */
+ i = 0;
+ link = &ipmi_interfaces;
+ list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
+ if (tintf->intf_num != i) {
+ link = &tintf->link;
break;
}
+ i++;
}
- spin_unlock_irqrestore(&interfaces_lock, flags);
- if (rv)
- goto out;
+ /* Add the new interface in numeric order. */
+ if (i == 0)
+ list_add_rcu(&intf->link, &ipmi_interfaces);
+ else
+ list_add_tail_rcu(&intf->link, link);
rv = handlers->start_processing(send_info, intf);
if (rv)
@@ -2497,8 +2724,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
get_guid(intf);
- if ((version_major > 1)
- || ((version_major == 1) && (version_minor >= 5)))
+ if ((intf->ipmi_version_major > 1)
+ || ((intf->ipmi_version_major == 1)
+ && (intf->ipmi_version_minor >= 5)))
{
/* Start scanning the channels to see what is
available. */
@@ -2521,64 +2749,67 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
if (rv == 0)
rv = add_proc_entries(intf, i);
- rv = ipmi_bmc_register(intf);
+ rv = ipmi_bmc_register(intf, i, sysfs_name);
out:
if (rv) {
if (intf->proc_dir)
remove_proc_entries(intf);
+ intf->handlers = NULL;
+ list_del_rcu(&intf->link);
+ mutex_unlock(&ipmi_interfaces_mutex);
+ mutex_unlock(&smi_watchers_mutex);
+ synchronize_rcu();
kref_put(&intf->refcount, intf_free);
- if (i < MAX_IPMI_INTERFACES) {
- spin_lock_irqsave(&interfaces_lock, flags);
- ipmi_interfaces[i] = NULL;
- spin_unlock_irqrestore(&interfaces_lock, flags);
- }
} else {
- spin_lock_irqsave(&interfaces_lock, flags);
- ipmi_interfaces[i] = intf;
- spin_unlock_irqrestore(&interfaces_lock, flags);
+ /* After this point the interface is legal to use. */
+ intf->intf_num = i;
+ mutex_unlock(&ipmi_interfaces_mutex);
call_smi_watchers(i, intf->si_dev);
+ mutex_unlock(&smi_watchers_mutex);
}
return rv;
}
+static void cleanup_smi_msgs(ipmi_smi_t intf)
+{
+ int i;
+ struct seq_table *ent;
+
+ /* No need for locks, the interface is down. */
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+ ent = &(intf->seq_table[i]);
+ if (!ent->inuse)
+ continue;
+ deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+ }
+}
+
int ipmi_unregister_smi(ipmi_smi_t intf)
{
- int i;
struct ipmi_smi_watcher *w;
- unsigned long flags;
+ int intf_num = intf->intf_num;
ipmi_bmc_unregister(intf);
- spin_lock_irqsave(&interfaces_lock, flags);
- for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
- if (ipmi_interfaces[i] == intf) {
- /* Set the interface number reserved until we
- * are done. */
- ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
- intf->intf_num = -1;
- break;
- }
- }
- spin_unlock_irqrestore(&interfaces_lock,flags);
+ mutex_lock(&smi_watchers_mutex);
+ mutex_lock(&ipmi_interfaces_mutex);
+ intf->intf_num = -1;
+ intf->handlers = NULL;
+ list_del_rcu(&intf->link);
+ mutex_unlock(&ipmi_interfaces_mutex);
+ synchronize_rcu();
- if (i == MAX_IPMI_INTERFACES)
- return -ENODEV;
+ cleanup_smi_msgs(intf);
remove_proc_entries(intf);
/* Call all the watcher interfaces to tell them that
an interface is gone. */
- down_read(&smi_watchers_sem);
list_for_each_entry(w, &smi_watchers, link)
- w->smi_gone(i);
- up_read(&smi_watchers_sem);
-
- /* Allow the entry to be reused now. */
- spin_lock_irqsave(&interfaces_lock, flags);
- ipmi_interfaces[i] = NULL;
- spin_unlock_irqrestore(&interfaces_lock,flags);
+ w->smi_gone(intf_num);
+ mutex_unlock(&smi_watchers_mutex);
kref_put(&intf->refcount, intf_free);
return 0;
@@ -2660,6 +2891,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
struct ipmi_ipmb_addr *ipmb_addr;
struct ipmi_recv_msg *recv_msg;
unsigned long flags;
+ struct ipmi_smi_handlers *handlers;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
@@ -2716,10 +2948,16 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
printk("\n");
}
#endif
- intf->handlers->sender(intf->send_info, msg, 0);
-
- rv = -1; /* We used the message, so return the value that
- causes it to not be freed or queued. */
+ rcu_read_lock();
+ handlers = intf->handlers;
+ if (handlers) {
+ handlers->sender(intf->send_info, msg, 0);
+ /* We used the message, so return the value
+ that causes it to not be freed or
+ queued. */
+ rv = -1;
+ }
+ rcu_read_unlock();
} else {
/* Deliver the message to the user. */
spin_lock_irqsave(&intf->counter_lock, flags);
@@ -3309,16 +3547,6 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
rcu_read_unlock();
}
-static void
-handle_msg_timeout(struct ipmi_recv_msg *msg)
-{
- msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
- msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
- msg->msg.netfn |= 1; /* Convert to a response. */
- msg->msg.data_len = 1;
- msg->msg.data = msg->msg_data;
- deliver_response(msg);
-}
static struct ipmi_smi_msg *
smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
@@ -3350,7 +3578,11 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
struct list_head *timeouts, long timeout_period,
int slot, unsigned long *flags)
{
- struct ipmi_recv_msg *msg;
+ struct ipmi_recv_msg *msg;
+ struct ipmi_smi_handlers *handlers;
+
+ if (intf->intf_num == -1)
+ return;
if (!ent->inuse)
return;
@@ -3393,13 +3625,19 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
return;
spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
/* Send the new message. We send with a zero
* priority. It timed out, I doubt time is
* that critical now, and high priority
* messages are really only for messages to the
* local MC, which don't get resent. */
- intf->handlers->sender(intf->send_info,
- smi_msg, 0);
+ handlers = intf->handlers;
+ if (handlers)
+ intf->handlers->sender(intf->send_info,
+ smi_msg, 0);
+ else
+ ipmi_free_smi_msg(smi_msg);
+
spin_lock_irqsave(&intf->seq_lock, *flags);
}
}
@@ -3411,18 +3649,12 @@ static void ipmi_timeout_handler(long timeout_period)
struct ipmi_recv_msg *msg, *msg2;
struct ipmi_smi_msg *smi_msg, *smi_msg2;
unsigned long flags;
- int i, j;
+ int i;
INIT_LIST_HEAD(&timeouts);
- spin_lock(&interfaces_lock);
- for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
- intf = ipmi_interfaces[i];
- if (IPMI_INVALID_INTERFACE(intf))
- continue;
- kref_get(&intf->refcount);
- spin_unlock(&interfaces_lock);
-
+ rcu_read_lock();
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
/* See if any waiting messages need to be processed. */
spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
list_for_each_entry_safe(smi_msg, smi_msg2,
@@ -3442,35 +3674,60 @@ static void ipmi_timeout_handler(long timeout_period)
have timed out, putting them in the timeouts
list. */
spin_lock_irqsave(&intf->seq_lock, flags);
- for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
- check_msg_timeout(intf, &(intf->seq_table[j]),
- &timeouts, timeout_period, j,
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+ check_msg_timeout(intf, &(intf->seq_table[i]),
+ &timeouts, timeout_period, i,
&flags);
spin_unlock_irqrestore(&intf->seq_lock, flags);
list_for_each_entry_safe(msg, msg2, &timeouts, link)
- handle_msg_timeout(msg);
-
- kref_put(&intf->refcount, intf_free);
- spin_lock(&interfaces_lock);
+ deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
+
+ /*
+ * Maintenance mode handling. Check the timeout
+ * optimistically before we claim the lock. It may
+ * mean a timeout gets missed occasionally, but that
+ * only means the timeout gets extended by one period
+ * in that case. No big deal, and it avoids the lock
+ * most of the time.
+ */
+ if (intf->auto_maintenance_timeout > 0) {
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+ if (intf->auto_maintenance_timeout > 0) {
+ intf->auto_maintenance_timeout
+ -= timeout_period;
+ if (!intf->maintenance_mode
+ && (intf->auto_maintenance_timeout <= 0))
+ {
+ intf->maintenance_mode_enable = 0;
+ maintenance_mode_update(intf);
+ }
+ }
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
+ }
}
- spin_unlock(&interfaces_lock);
+ rcu_read_unlock();
}
static void ipmi_request_event(void)
{
- ipmi_smi_t intf;
- int i;
+ ipmi_smi_t intf;
+ struct ipmi_smi_handlers *handlers;
- spin_lock(&interfaces_lock);
- for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
- intf = ipmi_interfaces[i];
- if (IPMI_INVALID_INTERFACE(intf))
+ rcu_read_lock();
+ /* Called from the timer, no need to check if handlers is
+ * valid. */
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ /* No event requests when in maintenance mode. */
+ if (intf->maintenance_mode_enable)
continue;
- intf->handlers->request_events(intf->send_info);
+ handlers = intf->handlers;
+ if (handlers)
+ handlers->request_events(intf->send_info);
}
- spin_unlock(&interfaces_lock);
+ rcu_read_unlock();
}
static struct timer_list ipmi_timer;
@@ -3599,7 +3856,6 @@ static void send_panic_events(char *str)
struct kernel_ipmi_msg msg;
ipmi_smi_t intf;
unsigned char data[16];
- int i;
struct ipmi_system_interface_addr *si;
struct ipmi_addr addr;
struct ipmi_smi_msg smi_msg;
@@ -3633,9 +3889,9 @@ static void send_panic_events(char *str)
recv_msg.done = dummy_recv_done_handler;
/* For every registered interface, send the event. */
- for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
- intf = ipmi_interfaces[i];
- if (IPMI_INVALID_INTERFACE(intf))
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (!intf->handlers)
+ /* Interface is not ready. */
continue;
/* Send the event announcing the panic. */
@@ -3660,13 +3916,14 @@ static void send_panic_events(char *str)
if (!str)
return;
- for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
+ /* For every registered interface, send the event. */
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
char *p = str;
struct ipmi_ipmb_addr *ipmb;
int j;
- intf = ipmi_interfaces[i];
- if (IPMI_INVALID_INTERFACE(intf))
+ if (intf->intf_num == -1)
+ /* Interface was not ready yet. */
continue;
/* First job here is to figure out where to send the
@@ -3792,7 +4049,6 @@ static int panic_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
- int i;
ipmi_smi_t intf;
if (has_panicked)
@@ -3800,9 +4056,9 @@ static int panic_event(struct notifier_block *this,
has_panicked = 1;
/* For every registered interface, set it to run to completion. */
- for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
- intf = ipmi_interfaces[i];
- if (IPMI_INVALID_INTERFACE(intf))
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (!intf->handlers)
+ /* Interface is not ready. */
continue;
intf->handlers->set_run_to_completion(intf->send_info, 1);
@@ -3823,7 +4079,6 @@ static struct notifier_block panic_block = {
static int ipmi_init_msghandler(void)
{
- int i;
int rv;
if (initialized)
@@ -3838,9 +4093,6 @@ static int ipmi_init_msghandler(void)
printk(KERN_INFO "ipmi message handler version "
IPMI_DRIVER_VERSION "\n");
- for (i = 0; i < MAX_IPMI_INTERFACES; i++)
- ipmi_interfaces[i] = NULL;
-
#ifdef CONFIG_PROC_FS
proc_ipmi_root = proc_mkdir("ipmi", NULL);
if (!proc_ipmi_root) {
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 8d941db8345..597eb4f88b8 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -43,6 +43,9 @@
#define PFX "IPMI poweroff: "
+static void ipmi_po_smi_gone(int if_num);
+static void ipmi_po_new_smi(int if_num, struct device *device);
+
/* Definitions for controlling power off (if the system supports it). It
* conveniently matches the IPMI chassis control values. */
#define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */
@@ -51,6 +54,37 @@
/* the IPMI data command */
static int poweroff_powercycle;
+/* Which interface to use, -1 means the first we see. */
+static int ifnum_to_use = -1;
+
+/* Our local state. */
+static int ready = 0;
+static ipmi_user_t ipmi_user;
+static int ipmi_ifnum;
+static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+static int set_param_ifnum(const char *val, struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+ if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
+ return 0;
+
+ ipmi_po_smi_gone(ipmi_ifnum);
+ ipmi_po_new_smi(ifnum_to_use, NULL);
+ return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
+ &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+ "timer. Setting to -1 defaults to the first registered "
+ "interface");
+
/* parameter definition to allow user to flag power cycle */
module_param(poweroff_powercycle, int, 0644);
MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
@@ -142,6 +176,42 @@ static int ipmi_request_in_rc_mode(ipmi_user_t user,
#define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01
#define IPMI_PICMG_ID 0
+#define IPMI_NETFN_OEM 0x2e
+#define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11
+#define IPMI_ATCA_PPS_IANA "\x00\x40\x0A"
+#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1
+#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051
+
+static void (*atca_oem_poweroff_hook)(ipmi_user_t user) = NULL;
+
+static void pps_poweroff_atca (ipmi_user_t user)
+{
+ struct ipmi_system_interface_addr smi_addr;
+ struct kernel_ipmi_msg send_msg;
+ int rv;
+ /*
+ * Configure IPMI address for local access
+ */
+ smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr.channel = IPMI_BMC_CHANNEL;
+ smi_addr.lun = 0;
+
+ printk(KERN_INFO PFX "PPS powerdown hook used");
+
+ send_msg.netfn = IPMI_NETFN_OEM;
+ send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+ send_msg.data = IPMI_ATCA_PPS_IANA;
+ send_msg.data_len = 3;
+ rv = ipmi_request_in_rc_mode(user,
+ (struct ipmi_addr *) &smi_addr,
+ &send_msg);
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+ printk(KERN_ERR PFX "Unable to send ATCA ,"
+ " IPMI error 0x%x\n", rv);
+ }
+ return;
+}
+
static int ipmi_atca_detect (ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
@@ -167,6 +237,13 @@ static int ipmi_atca_detect (ipmi_user_t user)
rv = ipmi_request_wait_for_response(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
+
+ printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
+ if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+ && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+ printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n");
+ atca_oem_poweroff_hook = pps_poweroff_atca;
+ }
return !rv;
}
@@ -200,12 +277,19 @@ static void ipmi_poweroff_atca (ipmi_user_t user)
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
- if (rv) {
+ /** At this point, the system may be shutting down, and most
+ ** serial drivers (if used) will have interrupts turned off
+ ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+ ** return code
+ **/
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
" IPMI error 0x%x\n", rv);
goto out;
}
+ if(atca_oem_poweroff_hook)
+ return atca_oem_poweroff_hook(user);
out:
return;
}
@@ -440,15 +524,6 @@ static struct poweroff_function poweroff_functions[] = {
/ sizeof(struct poweroff_function))
-/* Our local state. */
-static int ready = 0;
-static ipmi_user_t ipmi_user;
-static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
-
-/* Holds the old poweroff function so we can restore it on removal. */
-static void (*old_poweroff_func)(void);
-
-
/* Called on a powerdown request. */
static void ipmi_poweroff_function (void)
{
@@ -473,6 +548,9 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
if (ready)
return;
+ if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
+ return;
+
rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
&ipmi_user);
if (rv) {
@@ -481,6 +559,8 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
return;
}
+ ipmi_ifnum = if_num;
+
/*
* Do a get device ide and store some results, since this is
* used by several functions.
@@ -541,9 +621,15 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
static void ipmi_po_smi_gone(int if_num)
{
- /* This can never be called, because once poweroff driver is
- registered, the interface can't go away until the power
- driver is unregistered. */
+ if (!ready)
+ return;
+
+ if (ipmi_ifnum != if_num)
+ return;
+
+ ready = 0;
+ ipmi_destroy_user(ipmi_user);
+ pm_power_off = old_poweroff_func;
}
static struct ipmi_smi_watcher smi_watcher =
@@ -616,9 +702,9 @@ static int ipmi_poweroff_init (void)
printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
goto out_err;
}
-#endif
out_err:
+#endif
return rv;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index bb1fac104fd..81a0c89598e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -61,6 +61,10 @@
#include "ipmi_si_sm.h"
#include <linux/init.h>
#include <linux/dmi.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#define PFX "ipmi_si: "
/* Measure times between events in the driver. */
#undef DEBUG_TIMING
@@ -92,7 +96,7 @@ enum si_intf_state {
enum si_type {
SI_KCS, SI_SMIC, SI_BT
};
-static char *si_to_str[] = { "KCS", "SMIC", "BT" };
+static char *si_to_str[] = { "kcs", "smic", "bt" };
#define DEVICE_NAME "ipmi_si"
@@ -222,7 +226,10 @@ struct smi_info
static int force_kipmid[SI_MAX_PARMS];
static int num_force_kipmid;
+static int unload_when_empty = 1;
+
static int try_smi_init(struct smi_info *smi);
+static void cleanup_one_si(struct smi_info *to_clean);
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block * nb)
@@ -240,14 +247,18 @@ static void deliver_recv_msg(struct smi_info *smi_info,
spin_lock(&(smi_info->si_lock));
}
-static void return_hosed_msg(struct smi_info *smi_info)
+static void return_hosed_msg(struct smi_info *smi_info, int cCode)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
+ if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
+ cCode = IPMI_ERR_UNSPECIFIED;
+ /* else use it as is */
+
/* Make it a reponse */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
- msg->rsp[2] = 0xFF; /* Unknown error. */
+ msg->rsp[2] = cCode;
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
@@ -298,7 +309,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
if (err) {
- return_hosed_msg(smi_info);
+ return_hosed_msg(smi_info, err);
}
rv = SI_SM_CALL_WITHOUT_DELAY;
@@ -640,7 +651,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
/* If we were handling a user message, format
a response to send to the upper layer to
tell it about the error. */
- return_hosed_msg(smi_info);
+ return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
}
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
}
@@ -684,22 +695,24 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
{
/* We are idle and the upper layer requested that I fetch
events, so do so. */
- unsigned char msg[2];
+ atomic_set(&smi_info->req_events, 0);
- spin_lock(&smi_info->count_lock);
- smi_info->flag_fetches++;
- spin_unlock(&smi_info->count_lock);
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg)
+ goto out;
- atomic_set(&smi_info->req_events, 0);
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ smi_info->curr_msg->data_size = 2;
smi_info->handlers->start_transaction(
- smi_info->si_sm, msg, 2);
- smi_info->si_state = SI_GETTING_FLAGS;
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_EVENTS;
goto restart;
}
-
+ out:
return si_sm_result;
}
@@ -714,6 +727,15 @@ static void sender(void *send_info,
struct timeval t;
#endif
+ if (atomic_read(&smi_info->stop_operation)) {
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ deliver_recv_msg(smi_info, msg);
+ return;
+ }
+
spin_lock_irqsave(&(smi_info->msg_lock), flags);
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
@@ -805,13 +827,21 @@ static void poll(void *send_info)
{
struct smi_info *smi_info = send_info;
- smi_event_handler(smi_info, 0);
+ /*
+ * Make sure there is some delay in the poll loop so we can
+ * drive time forward and timeout things.
+ */
+ udelay(10);
+ smi_event_handler(smi_info, 10);
}
static void request_events(void *send_info)
{
struct smi_info *smi_info = send_info;
+ if (atomic_read(&smi_info->stop_operation))
+ return;
+
atomic_set(&smi_info->req_events, 1);
}
@@ -949,12 +979,21 @@ static int smi_start_processing(void *send_info,
return 0;
}
+static void set_maintenance_mode(void *send_info, int enable)
+{
+ struct smi_info *smi_info = send_info;
+
+ if (!enable)
+ atomic_set(&smi_info->req_events, 0);
+}
+
static struct ipmi_smi_handlers handlers =
{
.owner = THIS_MODULE,
.start_processing = smi_start_processing,
.sender = sender,
.request_events = request_events,
+ .set_maintenance_mode = set_maintenance_mode,
.set_run_to_completion = set_run_to_completion,
.poll = poll,
};
@@ -987,6 +1026,16 @@ static int num_regshifts = 0;
static int slave_addrs[SI_MAX_PARMS];
static int num_slave_addrs = 0;
+#define IPMI_IO_ADDR_SPACE 0
+#define IPMI_MEM_ADDR_SPACE 1
+static char *addr_space_to_str[] = { "I/O", "mem" };
+
+static int hotmod_handler(const char *val, struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
+ " Documentation/IPMI.txt in the kernel sources for the"
+ " gory details.");
module_param_named(trydefaults, si_trydefaults, bool, 0);
MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
@@ -1038,12 +1087,12 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0);
MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
" disabled(0). Normally the IPMI driver auto-detects"
" this, but the value may be overridden by this parm.");
+module_param(unload_when_empty, int, 0);
+MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
+ " specified or found, default is 1. Setting to 0"
+ " is useful for hot add of devices using hotmod.");
-#define IPMI_IO_ADDR_SPACE 0
-#define IPMI_MEM_ADDR_SPACE 1
-static char *addr_space_to_str[] = { "I/O", "memory" };
-
static void std_irq_cleanup(struct smi_info *info)
{
if (info->si_type == SI_BT)
@@ -1317,6 +1366,234 @@ static int mem_setup(struct smi_info *info)
return 0;
}
+/*
+ * Parms come in as <op1>[:op2[:op3...]]. ops are:
+ * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ * rsp=<regspacing>
+ * rsi=<regsize>
+ * rsh=<regshift>
+ * irq=<irq>
+ * ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+ char *name;
+ int val;
+};
+static struct hotmod_vals hotmod_ops[] = {
+ { "add", HM_ADD },
+ { "remove", HM_REMOVE },
+ { NULL }
+};
+static struct hotmod_vals hotmod_si[] = {
+ { "kcs", SI_KCS },
+ { "smic", SI_SMIC },
+ { "bt", SI_BT },
+ { NULL }
+};
+static struct hotmod_vals hotmod_as[] = {
+ { "mem", IPMI_MEM_ADDR_SPACE },
+ { "i/o", IPMI_IO_ADDR_SPACE },
+ { NULL }
+};
+static int ipmi_strcasecmp(const char *s1, const char *s2)
+{
+ while (*s1 || *s2) {
+ if (!*s1)
+ return -1;
+ if (!*s2)
+ return 1;
+ if (*s1 != *s2)
+ return *s1 - *s2;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
+{
+ char *s;
+ int i;
+
+ s = strchr(*curr, ',');
+ if (!s) {
+ printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
+ return -EINVAL;
+ }
+ *s = '\0';
+ s++;
+ for (i = 0; hotmod_ops[i].name; i++) {
+ if (ipmi_strcasecmp(*curr, v[i].name) == 0) {
+ *val = v[i].val;
+ *curr = s;
+ return 0;
+ }
+ }
+
+ printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
+ return -EINVAL;
+}
+
+static int hotmod_handler(const char *val, struct kernel_param *kp)
+{
+ char *str = kstrdup(val, GFP_KERNEL);
+ int rv = -EINVAL;
+ char *next, *curr, *s, *n, *o;
+ enum hotmod_op op;
+ enum si_type si_type;
+ int addr_space;
+ unsigned long addr;
+ int regspacing;
+ int regsize;
+ int regshift;
+ int irq;
+ int ipmb;
+ int ival;
+ struct smi_info *info;
+
+ if (!str)
+ return -ENOMEM;
+
+ /* Kill any trailing spaces, as we can get a "\n" from echo. */
+ ival = strlen(str) - 1;
+ while ((ival >= 0) && isspace(str[ival])) {
+ str[ival] = '\0';
+ ival--;
+ }
+
+ for (curr = str; curr; curr = next) {
+ regspacing = 1;
+ regsize = 1;
+ regshift = 0;
+ irq = 0;
+ ipmb = 0x20;
+
+ next = strchr(curr, ':');
+ if (next) {
+ *next = '\0';
+ next++;
+ }
+
+ rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+ if (rv)
+ break;
+ op = ival;
+
+ rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+ if (rv)
+ break;
+ si_type = ival;
+
+ rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
+ if (rv)
+ break;
+
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ addr = simple_strtoul(curr, &n, 0);
+ if ((*n != '\0') || (*curr == '\0')) {
+ printk(KERN_WARNING PFX "Invalid hotmod address"
+ " '%s'\n", curr);
+ break;
+ }
+
+ while (s) {
+ curr = s;
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ o = strchr(curr, '=');
+ if (o) {
+ *o = '\0';
+ o++;
+ }
+#define HOTMOD_INT_OPT(name, val) \
+ if (ipmi_strcasecmp(curr, name) == 0) { \
+ if (!o) { \
+ printk(KERN_WARNING PFX \
+ "No option given for '%s'\n", \
+ curr); \
+ goto out; \
+ } \
+ val = simple_strtoul(o, &n, 0); \
+ if ((*n != '\0') || (*o == '\0')) { \
+ printk(KERN_WARNING PFX \
+ "Bad option given for '%s'\n", \
+ curr); \
+ goto out; \
+ } \
+ }
+
+ HOTMOD_INT_OPT("rsp", regspacing)
+ else HOTMOD_INT_OPT("rsi", regsize)
+ else HOTMOD_INT_OPT("rsh", regshift)
+ else HOTMOD_INT_OPT("irq", irq)
+ else HOTMOD_INT_OPT("ipmb", ipmb)
+ else {
+ printk(KERN_WARNING PFX
+ "Invalid hotmod option '%s'\n",
+ curr);
+ goto out;
+ }
+#undef HOTMOD_INT_OPT
+ }
+
+ if (op == HM_ADD) {
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ info->addr_source = "hotmod";
+ info->si_type = si_type;
+ info->io.addr_data = addr;
+ info->io.addr_type = addr_space;
+ if (addr_space == IPMI_MEM_ADDR_SPACE)
+ info->io_setup = mem_setup;
+ else
+ info->io_setup = port_setup;
+
+ info->io.addr = NULL;
+ info->io.regspacing = regspacing;
+ if (!info->io.regspacing)
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = regsize;
+ if (!info->io.regsize)
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = regshift;
+ info->irq = irq;
+ if (info->irq)
+ info->irq_setup = std_irq_setup;
+ info->slave_addr = ipmb;
+
+ try_smi_init(info);
+ } else {
+ /* remove */
+ struct smi_info *e, *tmp_e;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+ if (e->io.addr_type != addr_space)
+ continue;
+ if (e->si_type != si_type)
+ continue;
+ if (e->io.addr_data == addr)
+ cleanup_one_si(e);
+ }
+ mutex_unlock(&smi_infos_lock);
+ }
+ }
+ out:
+ kfree(str);
+ return rv;
+}
static __devinit void hardcode_find_bmc(void)
{
@@ -1333,11 +1610,11 @@ static __devinit void hardcode_find_bmc(void)
info->addr_source = "hardcoded";
- if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+ if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) {
info->si_type = SI_KCS;
- } else if (strcmp(si_type[i], "smic") == 0) {
+ } else if (ipmi_strcasecmp(si_type[i], "smic") == 0) {
info->si_type = SI_SMIC;
- } else if (strcmp(si_type[i], "bt") == 0) {
+ } else if (ipmi_strcasecmp(si_type[i], "bt") == 0) {
info->si_type = SI_BT;
} else {
printk(KERN_WARNING
@@ -1952,19 +2229,9 @@ static int try_get_dev_id(struct smi_info *smi_info)
static int type_file_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- char *out = (char *) page;
struct smi_info *smi = data;
- switch (smi->si_type) {
- case SI_KCS:
- return sprintf(out, "kcs\n");
- case SI_SMIC:
- return sprintf(out, "smic\n");
- case SI_BT:
- return sprintf(out, "bt\n");
- default:
- return 0;
- }
+ return sprintf(page, "%s\n", si_to_str[smi->si_type]);
}
static int stat_file_read_proc(char *page, char **start, off_t off,
@@ -2000,7 +2267,24 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
out += sprintf(out, "incoming_messages: %ld\n",
smi->incoming_messages);
- return (out - ((char *) page));
+ return out - page;
+}
+
+static int param_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct smi_info *smi = data;
+
+ return sprintf(page,
+ "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+ si_to_str[smi->si_type],
+ addr_space_to_str[smi->io.addr_type],
+ smi->io.addr_data,
+ smi->io.regspacing,
+ smi->io.regsize,
+ smi->io.regshift,
+ smi->irq,
+ smi->slave_addr);
}
/*
@@ -2362,6 +2646,7 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi,
&new_smi->device_id,
new_smi->dev,
+ "bmc",
new_smi->slave_addr);
if (rv) {
printk(KERN_ERR
@@ -2390,6 +2675,16 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err_stop_timer;
}
+ rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
+ param_read_proc, NULL,
+ new_smi, THIS_MODULE);
+ if (rv) {
+ printk(KERN_ERR
+ "ipmi_si: Unable to create proc entry: %d\n",
+ rv);
+ goto out_err_stop_timer;
+ }
+
list_add_tail(&new_smi->link, &smi_infos);
mutex_unlock(&smi_infos_lock);
@@ -2483,7 +2778,12 @@ static __devinit int init_ipmi_si(void)
#endif
#ifdef CONFIG_PCI
- pci_module_init(&ipmi_pci_driver);
+ rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv){
+ printk(KERN_ERR
+ "init_ipmi_si: Unable to register PCI driver: %d\n",
+ rv);
+ }
#endif
if (si_trydefaults) {
@@ -2498,7 +2798,7 @@ static __devinit int init_ipmi_si(void)
}
mutex_lock(&smi_infos_lock);
- if (list_empty(&smi_infos)) {
+ if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
@@ -2513,7 +2813,7 @@ static __devinit int init_ipmi_si(void)
}
module_init(init_ipmi_si);
-static void __devexit cleanup_one_si(struct smi_info *to_clean)
+static void cleanup_one_si(struct smi_info *to_clean)
{
int rv;
unsigned long flags;
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index 39d7e5ef1a2..e64ea7d25d2 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -141,12 +141,14 @@ static int start_smic_transaction(struct si_sm_data *smic,
{
unsigned int i;
- if ((size < 2) || (size > MAX_SMIC_WRITE_SIZE)) {
- return -1;
- }
- if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) {
- return -2;
- }
+ if (size < 2)
+ return IPMI_REQ_LEN_INVALID_ERR;
+ if (size > MAX_SMIC_WRITE_SIZE)
+ return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+ if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED))
+ return IPMI_NOT_IN_MY_STATE_ERR;
+
if (smic_debug & SMIC_DEBUG_MSG) {
printk(KERN_INFO "start_smic_transaction -");
for (i = 0; i < size; i ++) {
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 73f759eaa5a..90fb2a54191 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -135,6 +135,7 @@
static int nowayout = WATCHDOG_NOWAYOUT;
static ipmi_user_t watchdog_user = NULL;
+static int watchdog_ifnum;
/* Default the timeout to 10 seconds. */
static int timeout = 10;
@@ -161,6 +162,8 @@ static struct fasync_struct *fasync_q = NULL;
static char pretimeout_since_last_heartbeat = 0;
static char expect_close;
+static int ifnum_to_use = -1;
+
static DECLARE_RWSEM(register_sem);
/* Parameters to ipmi_set_timeout */
@@ -169,6 +172,8 @@ static DECLARE_RWSEM(register_sem);
#define IPMI_SET_TIMEOUT_FORCE_HB 2
static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
/* If true, the driver will start running as soon as it is configured
and ready. */
@@ -245,6 +250,26 @@ static int get_param_str(char *buffer, struct kernel_param *kp)
return strlen(buffer);
}
+
+static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
+{
+ int rv = param_set_int(val, kp);
+ if (rv)
+ return rv;
+ if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+ return 0;
+
+ ipmi_unregister_watchdog(watchdog_ifnum);
+ ipmi_register_watchdog(ifnum_to_use);
+ return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int,
+ &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+ "timer. Setting to -1 defaults to the first registered "
+ "interface");
+
module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
@@ -263,12 +288,13 @@ module_param_call(preop, set_param_str, get_param_str, preop_op, 0644);
MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
"preop_none, preop_panic, preop_give_data.");
-module_param(start_now, int, 0);
+module_param(start_now, int, 0444);
MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
"soon as the driver is loaded.");
module_param(nowayout, int, 0644);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=CONFIG_WATCHDOG_NOWAYOUT)");
/* Default state of the timer. */
static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
@@ -872,6 +898,11 @@ static void ipmi_register_watchdog(int ipmi_intf)
if (watchdog_user)
goto out;
+ if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+ goto out;
+
+ watchdog_ifnum = ipmi_intf;
+
rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
if (rv < 0) {
printk(KERN_CRIT PFX "Unable to register with ipmi\n");
@@ -901,6 +932,39 @@ static void ipmi_register_watchdog(int ipmi_intf)
}
}
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+ int rv;
+
+ down_write(&register_sem);
+
+ if (!watchdog_user)
+ goto out;
+
+ if (watchdog_ifnum != ipmi_intf)
+ goto out;
+
+ /* Make sure no one can call us any more. */
+ misc_deregister(&ipmi_wdog_miscdev);
+
+ /* Wait to make sure the message makes it out. The lower layer has
+ pointers to our buffers, we want to make sure they are done before
+ we release our memory. */
+ while (atomic_read(&set_timeout_tofree))
+ schedule_timeout_uninterruptible(1);
+
+ /* Disconnect from IPMI. */
+ rv = ipmi_destroy_user(watchdog_user);
+ if (rv) {
+ printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
+ rv);
+ }
+ watchdog_user = NULL;
+
+ out:
+ up_write(&register_sem);
+}
+
#ifdef HAVE_NMI_HANDLER
static int
ipmi_nmi(void *dev_id, int cpu, int handled)
@@ -1004,9 +1068,7 @@ static void ipmi_new_smi(int if_num, struct device *device)
static void ipmi_smi_gone(int if_num)
{
- /* This can never be called, because once the watchdog is
- registered, the interface can't go away until the watchdog
- is unregistered. */
+ ipmi_unregister_watchdog(if_num);
}
static struct ipmi_smi_watcher smi_watcher =
@@ -1148,30 +1210,32 @@ static int __init ipmi_wdog_init(void)
check_parms();
+ register_reboot_notifier(&wdog_reboot_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &wdog_panic_notifier);
+
rv = ipmi_smi_watcher_register(&smi_watcher);
if (rv) {
#ifdef HAVE_NMI_HANDLER
if (preaction_val == WDOG_PRETIMEOUT_NMI)
release_nmi(&ipmi_nmi_handler);
#endif
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &wdog_panic_notifier);
+ unregister_reboot_notifier(&wdog_reboot_notifier);
printk(KERN_WARNING PFX "can't register smi watcher\n");
return rv;
}
- register_reboot_notifier(&wdog_reboot_notifier);
- atomic_notifier_chain_register(&panic_notifier_list,
- &wdog_panic_notifier);
-
printk(KERN_INFO PFX "driver initialized\n");
return 0;
}
-static __exit void ipmi_unregister_watchdog(void)
+static void __exit ipmi_wdog_exit(void)
{
- int rv;
-
- down_write(&register_sem);
+ ipmi_smi_watcher_unregister(&smi_watcher);
+ ipmi_unregister_watchdog(watchdog_ifnum);
#ifdef HAVE_NMI_HANDLER
if (nmi_handler_registered)
@@ -1179,37 +1243,8 @@ static __exit void ipmi_unregister_watchdog(void)
#endif
atomic_notifier_chain_unregister(&panic_notifier_list,
- &wdog_panic_notifier);
+ &wdog_panic_notifier);
unregister_reboot_notifier(&wdog_reboot_notifier);
-
- if (! watchdog_user)
- goto out;
-
- /* Make sure no one can call us any more. */
- misc_deregister(&ipmi_wdog_miscdev);
-
- /* Wait to make sure the message makes it out. The lower layer has
- pointers to our buffers, we want to make sure they are done before
- we release our memory. */
- while (atomic_read(&set_timeout_tofree))
- schedule_timeout_uninterruptible(1);
-
- /* Disconnect from IPMI. */
- rv = ipmi_destroy_user(watchdog_user);
- if (rv) {
- printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
- rv);
- }
- watchdog_user = NULL;
-
- out:
- up_write(&register_sem);
-}
-
-static void __exit ipmi_wdog_exit(void)
-{
- ipmi_smi_watcher_unregister(&smi_watcher);
- ipmi_unregister_watchdog();
}
module_exit(ipmi_wdog_exit);
module_init(ipmi_wdog_init);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index bd9195e1795..8f591945ebd 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -3476,6 +3476,8 @@ static int stli_initecp(stlibrd_t *brdp)
if (sig.magic != cpu_to_le32(ECP_MAGIC))
{
release_region(brdp->iobase, brdp->iosize);
+ iounmap(brdp->membase);
+ brdp->membase = NULL;
return -ENODEV;
}
@@ -3632,6 +3634,8 @@ static int stli_initonb(stlibrd_t *brdp)
sig.magic3 != cpu_to_le16(ONB_MAGIC3))
{
release_region(brdp->iobase, brdp->iosize);
+ iounmap(brdp->membase);
+ brdp->membase = NULL;
return -ENODEV;
}
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 7a484fc7cb9..7e975f60692 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -199,6 +199,8 @@ int misc_register(struct miscdevice * misc)
dev_t dev;
int err = 0;
+ INIT_LIST_HEAD(&misc->list);
+
down(&misc_sem);
list_for_each_entry(c, &misc_list, list) {
if (c->minor == misc->minor) {
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 22b9905c1e5..c09160383a5 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -680,7 +680,7 @@ static int __init mmtimer_init(void)
if (sn_rtc_cycles_per_second < 100000) {
printk(KERN_ERR "%s: unable to determine clock frequency\n",
MMTIMER_NAME);
- return -1;
+ goto out1;
}
mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
@@ -689,13 +689,13 @@ static int __init mmtimer_init(void)
if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) {
printk(KERN_WARNING "%s: unable to allocate interrupt.",
MMTIMER_NAME);
- return -1;
+ goto out1;
}
if (misc_register(&mmtimer_miscdev)) {
printk(KERN_ERR "%s: failed to register device\n",
MMTIMER_NAME);
- return -1;
+ goto out2;
}
/* Get max numbered node, calculate slots needed */
@@ -709,16 +709,18 @@ static int __init mmtimer_init(void)
if (timers == NULL) {
printk(KERN_ERR "%s: failed to allocate memory for device\n",
MMTIMER_NAME);
- return -1;
+ goto out3;
}
+ memset(timers,0,(sizeof(mmtimer_t *)*maxn));
+
/* Allocate mmtimer_t's for each online node */
for_each_online_node(node) {
timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
if (timers[node] == NULL) {
printk(KERN_ERR "%s: failed to allocate memory for device\n",
MMTIMER_NAME);
- return -1;
+ goto out4;
}
for (i=0; i< NUM_COMPARATORS; i++) {
mmtimer_t * base = timers[node] + i;
@@ -739,6 +741,17 @@ static int __init mmtimer_init(void)
sn_rtc_cycles_per_second/(unsigned long)1E6);
return 0;
+
+out4:
+ for_each_online_node(node) {
+ kfree(timers[node]);
+ }
+out3:
+ misc_deregister(&mmtimer_miscdev);
+out2:
+ free_irq(SGI_MMTIMER_VECTOR, NULL);
+out1:
+ return -1;
}
module_init(mmtimer_init);
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 2d025a9fd14..8b316953173 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -498,9 +498,12 @@ static void __exit moxa_exit(void)
printk("Couldn't unregister MOXA Intellio family serial driver\n");
put_tty_driver(moxaDriver);
- for (i = 0; i < MAX_BOARDS; i++)
+ for (i = 0; i < MAX_BOARDS; i++) {
+ if (moxaBaseAddr[i])
+ iounmap(moxaBaseAddr[i]);
if (moxa_boards[i].busType == MOXA_BUS_TYPE_PCI)
pci_dev_put(moxa_boards[i].pciInfo.pdev);
+ }
if (verbose)
printk("Done\n");
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 1bd12296dca..74d21c1c104 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -75,8 +75,10 @@
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
#endif
#define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -235,7 +237,7 @@ typedef struct _mgslpc_info {
int dosyncppp;
spinlock_t netlock;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
struct net_device *netdev;
#endif
@@ -392,7 +394,7 @@ static void tx_timeout(unsigned long context);
static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg);
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
static void hdlcdev_tx_done(MGSLPC_INFO *info);
static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size);
@@ -1053,7 +1055,7 @@ static void tx_done(MGSLPC_INFO *info)
info->drop_rts_on_tx_done = 0;
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
@@ -1164,7 +1166,7 @@ static void dcd_change(MGSLPC_INFO *info)
}
else
info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount) {
if (info->serial_signals & SerialSignal_DCD)
netif_carrier_on(info->netdev);
@@ -2953,7 +2955,7 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
info->device_name, info->io_base, info->irq_level);
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
hdlcdev_init(info);
#endif
}
@@ -2969,7 +2971,7 @@ static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
last->next_device = info->next_device;
else
mgslpc_device_list = info->next_device;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
hdlcdev_exit(info);
#endif
release_resources(info);
@@ -3901,7 +3903,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
return_frame = 1;
}
framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
{
struct net_device_stats *stats = hdlc_stats(info->netdev);
stats->rx_errors++;
@@ -3935,7 +3937,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
++framesize;
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_rx(info, buf->data, framesize);
else
@@ -4091,7 +4093,7 @@ static void tx_timeout(unsigned long context)
spin_unlock_irqrestore(&info->lock,flags);
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
@@ -4099,7 +4101,7 @@ static void tx_timeout(unsigned long context)
bh_transmit(info);
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
/**
* called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 7ac68cb3bed..e79b2ede851 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -1026,6 +1026,7 @@ static int __init rio_init(void)
found++;
} else {
iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
+ p->RIOHosts[p->RIONumHosts].Caddr = NULL;
}
}
@@ -1078,6 +1079,7 @@ static int __init rio_init(void)
found++;
} else {
iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
+ p->RIOHosts[p->RIONumHosts].Caddr = NULL;
}
#else
printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n");
@@ -1117,8 +1119,10 @@ static int __init rio_init(void)
}
}
- if (!okboard)
+ if (!okboard) {
iounmap(hp->Caddr);
+ hp->Caddr = NULL;
+ }
}
}
@@ -1188,6 +1192,8 @@ static void __exit rio_exit(void)
}
/* It is safe/allowed to del_timer a non-active timer */
del_timer(&hp->timer);
+ if (hp->Caddr)
+ iounmap(hp->Caddr);
if (hp->Type == RIO_PCI)
pci_dev_put(hp->pdev);
}
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 722dd3e7418..0a77bfcd5b5 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -82,11 +82,6 @@
static struct riscom_board * IRQ_to_board[16];
static struct tty_driver *riscom_driver;
-static unsigned long baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
- 9600, 19200, 38400, 57600, 76800, 0,
-};
-
static struct riscom_board rc_board[RC_NBOARD] = {
{
.base = RC_IOBASE1,
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 147c30da81e..645187b9141 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -101,8 +101,10 @@
#include <linux/hdlc.h>
#include <linux/dma-mapping.h>
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
#endif
#define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -320,7 +322,7 @@ struct mgsl_struct {
int dosyncppp;
spinlock_t netlock;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
struct net_device *netdev;
#endif
};
@@ -728,7 +730,7 @@ static void usc_loopmode_send_done( struct mgsl_struct * info );
static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
static void hdlcdev_tx_done(struct mgsl_struct *info);
static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
@@ -1277,7 +1279,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info )
info->drop_rts_on_tx_done = 0;
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
@@ -1342,7 +1344,7 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
info->input_signal_events.dcd_up++;
} else
info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount) {
if (status & MISCSTATUS_DCD)
netif_carrier_on(info->netdev);
@@ -4313,7 +4315,7 @@ static void mgsl_add_device( struct mgsl_struct *info )
info->max_frame_size );
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
hdlcdev_init(info);
#endif
@@ -4471,7 +4473,7 @@ static void synclink_cleanup(void)
info = mgsl_device_list;
while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
hdlcdev_exit(info);
#endif
mgsl_release_resources(info);
@@ -6645,7 +6647,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
return_frame = 1;
}
framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
{
struct net_device_stats *stats = hdlc_stats(info->netdev);
stats->rx_errors++;
@@ -6721,7 +6723,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
*ptmp);
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
else
@@ -7625,7 +7627,7 @@ static void mgsl_tx_timeout(unsigned long context)
spin_unlock_irqrestore(&info->irq_spinlock,flags);
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
@@ -7701,7 +7703,7 @@ static int usc_loopmode_active( struct mgsl_struct * info)
return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
/**
* called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 07f34d43dc7..e4730a7312b 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -83,8 +83,10 @@
#include "linux/synclink.h"
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
#endif
/*
@@ -171,7 +173,7 @@ static void set_break(struct tty_struct *tty, int break_state);
/*
* generic HDLC support and callbacks
*/
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
static void hdlcdev_tx_done(struct slgt_info *info);
static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
@@ -359,7 +361,7 @@ struct slgt_info {
int netcount;
int dosyncppp;
spinlock_t netlock;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
struct net_device *netdev;
#endif
@@ -1354,7 +1356,7 @@ static void set_break(struct tty_struct *tty, int break_state)
spin_unlock_irqrestore(&info->lock,flags);
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
/**
* called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
@@ -2002,7 +2004,7 @@ static void dcd_change(struct slgt_info *info)
} else {
info->input_signal_events.dcd_down++;
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount) {
if (info->signals & SerialSignal_DCD)
netif_carrier_on(info->netdev);
@@ -2180,7 +2182,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
set_signals(info);
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
@@ -3306,7 +3308,7 @@ static void add_device(struct slgt_info *info)
devstr, info->device_name, info->phys_reg_addr,
info->irq_level, info->max_frame_size);
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
hdlcdev_init(info);
#endif
}
@@ -3488,7 +3490,7 @@ static void slgt_cleanup(void)
/* release devices */
info = slgt_device_list;
while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
hdlcdev_exit(info);
#endif
free_dma_bufs(info);
@@ -3522,6 +3524,7 @@ static int __init slgt_init(void)
if (!slgt_device_list) {
printk("%s no devices found\n",driver_name);
+ pci_unregister_driver(&pci_driver);
return -ENODEV;
}
@@ -4433,7 +4436,7 @@ check_again:
framesize = 0;
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (framesize == 0) {
struct net_device_stats *stats = hdlc_stats(info->netdev);
stats->rx_errors++;
@@ -4476,7 +4479,7 @@ check_again:
framesize++;
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_rx(info,info->tmp_rbuf, framesize);
else
@@ -4779,7 +4782,7 @@ static void tx_timeout(unsigned long context)
info->tx_count = 0;
spin_unlock_irqrestore(&info->lock,flags);
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 13a57245cf2..20a96ef250b 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -67,8 +67,10 @@
#include <linux/workqueue.h>
#include <linux/hdlc.h>
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
#endif
#define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -280,7 +282,7 @@ typedef struct _synclinkmp_info {
int dosyncppp;
spinlock_t netlock;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
struct net_device *netdev;
#endif
@@ -536,7 +538,7 @@ static void throttle(struct tty_struct * tty);
static void unthrottle(struct tty_struct * tty);
static void set_break(struct tty_struct *tty, int break_state);
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
static void hdlcdev_tx_done(SLMP_INFO *info);
static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size);
@@ -1607,7 +1609,7 @@ static void set_break(struct tty_struct *tty, int break_state)
spin_unlock_irqrestore(&info->lock,flags);
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
/**
* called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
@@ -2339,7 +2341,7 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status)
set_signals(info);
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
@@ -2523,7 +2525,7 @@ void isr_io_pin( SLMP_INFO *info, u16 status )
info->input_signal_events.dcd_up++;
} else
info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount) {
if (status & SerialSignal_DCD)
netif_carrier_on(info->netdev);
@@ -3783,7 +3785,7 @@ void add_device(SLMP_INFO *info)
info->irq_level,
info->max_frame_size );
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
hdlcdev_init(info);
#endif
}
@@ -3977,7 +3979,7 @@ static void synclinkmp_cleanup(void)
/* release devices */
info = synclinkmp_device_list;
while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
hdlcdev_exit(info);
#endif
free_dma_bufs(info);
@@ -4979,7 +4981,7 @@ CheckAgain:
info->icount.rxcrc++;
framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
{
struct net_device_stats *stats = hdlc_stats(info->netdev);
stats->rx_errors++;
@@ -5020,7 +5022,7 @@ CheckAgain:
index = 0;
}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_rx(info,info->tmp_rx_buf,framesize);
else
@@ -5531,7 +5533,7 @@ void tx_timeout(unsigned long context)
spin_unlock_irqrestore(&info->lock,flags);
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index c64f5bcff94..05810c8d20b 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -182,6 +182,18 @@ static struct sysrq_key_op sysrq_showstate_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
+static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
+{
+ show_state_filter(TASK_UNINTERRUPTIBLE);
+}
+static struct sysrq_key_op sysrq_showstate_blocked_op = {
+ .handler = sysrq_handle_showstate_blocked,
+ .help_msg = "showBlockedTasks",
+ .action_msg = "Show Blocked State",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+
static void sysrq_handle_showmem(int key, struct tty_struct *tty)
{
show_mem();
@@ -304,7 +316,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
/* May be assigned at init time by SMP VOYAGER */
NULL, /* v */
NULL, /* w */
- NULL, /* x */
+ &sysrq_showstate_blocked_op, /* x */
NULL, /* y */
NULL /* z */
};
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index dd36fd04a84..07067c31c4e 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -249,6 +249,7 @@ int tosh_smm(SMMRegisters *regs)
return eax;
}
+EXPORT_SYMBOL(tosh_smm);
static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 774fa861169..33e1f66e39c 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1155,6 +1155,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
list_del(&chip->list);
+ misc_deregister(&chip->vendor.miscdev);
put_device(dev);
clear_bit(chip->dev_num, dev_mask);
kfree(chip);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 75ff0286e1a..a8239dac994 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -152,7 +152,7 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y);
static void save_cur(struct vc_data *vc);
static void reset_terminal(struct vc_data *vc, int do_clear);
static void con_flush_chars(struct tty_struct *tty);
-static void set_vesa_blanking(char __user *p);
+static int set_vesa_blanking(char __user *p);
static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
@@ -2369,7 +2369,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
ret = __put_user(data, p);
break;
case TIOCL_SETVESABLANK:
- set_vesa_blanking(p);
+ ret = set_vesa_blanking(p);
break;
case TIOCL_GETKMSGREDIRECT:
data = kmsg_redirect;
@@ -3313,11 +3313,15 @@ postcore_initcall(vtconsole_class_init);
* Screen blanking
*/
-static void set_vesa_blanking(char __user *p)
+static int set_vesa_blanking(char __user *p)
{
- unsigned int mode;
- get_user(mode, p + 1);
- vesa_blank_mode = (mode < 4) ? mode : 0;
+ unsigned int mode;
+
+ if (get_user(mode, p + 1))
+ return -EFAULT;
+
+ vesa_blank_mode = (mode < 4) ? mode : 0;
+ return 0;
}
void do_blank_screen(int entering_gfx)
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index e275dd4a705..61138726b50 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -634,7 +634,7 @@ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_devi
usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8);
/* set up the memory buffer's */
- if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, SLAB_ATOMIC, &usb_pcwd->intr_dma))) {
+ if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, GFP_ATOMIC, &usb_pcwd->intr_dma))) {
printk(KERN_ERR PFX "Out of memory\n");
goto error;
}