diff options
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/agp/amd64-agp.c | 1 | ||||
-rw-r--r-- | drivers/char/consolemap.c | 12 | ||||
-rw-r--r-- | drivers/char/drm/ffb_context.c | 6 | ||||
-rw-r--r-- | drivers/char/drm/ffb_drv.c | 6 | ||||
-rw-r--r-- | drivers/char/ip2/i2ellis.c | 4 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_bt_sm.c | 38 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_kcs_sm.c | 48 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 952 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_poweroff.c | 4 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 183 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_sm.h | 1 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_smic_sm.c | 15 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_watchdog.c | 259 | ||||
-rw-r--r-- | drivers/char/istallion.c | 7 | ||||
-rw-r--r-- | drivers/char/mxser.c | 47 | ||||
-rw-r--r-- | drivers/char/n_hdlc.c | 3 | ||||
-rw-r--r-- | drivers/char/pcmcia/synclink_cs.c | 3 | ||||
-rw-r--r-- | drivers/char/rocket.c | 6 | ||||
-rw-r--r-- | drivers/char/selection.c | 3 | ||||
-rw-r--r-- | drivers/char/stallion.c | 6 | ||||
-rw-r--r-- | drivers/char/synclink.c | 10 | ||||
-rw-r--r-- | drivers/char/synclinkmp.c | 9 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_nsc.c | 48 | ||||
-rw-r--r-- | drivers/char/tty_io.c | 9 | ||||
-rw-r--r-- | drivers/char/vt_ioctl.c | 5 |
25 files changed, 992 insertions, 693 deletions
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 0e6c3a31d34..78ce98a69f3 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -13,6 +13,7 @@ #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> +#include <linux/mmzone.h> #include <asm/page.h> /* PAGE_SIZE */ #include "agp.h" diff --git a/drivers/char/consolemap.c b/drivers/char/consolemap.c index 406dea91463..c85a4fa60da 100644 --- a/drivers/char/consolemap.c +++ b/drivers/char/consolemap.c @@ -345,17 +345,15 @@ static void con_release_unimap(struct uni_pagedir *p) for (i = 0; i < 32; i++) { if ((p1 = p->uni_pgdir[i]) != NULL) { for (j = 0; j < 32; j++) - if (p1[j]) - kfree(p1[j]); + kfree(p1[j]); kfree(p1); } p->uni_pgdir[i] = NULL; } - for (i = 0; i < 4; i++) - if (p->inverse_translations[i]) { - kfree(p->inverse_translations[i]); - p->inverse_translations[i] = NULL; - } + for (i = 0; i < 4; i++) { + kfree(p->inverse_translations[i]); + p->inverse_translations[i] = NULL; + } } void con_free_unimap(struct vc_data *vc) diff --git a/drivers/char/drm/ffb_context.c b/drivers/char/drm/ffb_context.c index 8a6cc2751bc..1383727b443 100644 --- a/drivers/char/drm/ffb_context.c +++ b/drivers/char/drm/ffb_context.c @@ -526,10 +526,8 @@ int ffb_driver_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, if (idx < 0 || idx >= FFB_MAX_CTXS) return -EINVAL; - if (fpriv->hw_state[idx] != NULL) { - kfree(fpriv->hw_state[idx]); - fpriv->hw_state[idx] = NULL; - } + kfree(fpriv->hw_state[idx]); + fpriv->hw_state[idx] = NULL; return 0; } diff --git a/drivers/char/drm/ffb_drv.c b/drivers/char/drm/ffb_drv.c index 5c121d6df9f..c13f9abb41e 100644 --- a/drivers/char/drm/ffb_drv.c +++ b/drivers/char/drm/ffb_drv.c @@ -245,14 +245,12 @@ static void ffb_driver_release(drm_device_t * dev, struct file *filp) static void ffb_driver_pretakedown(drm_device_t * dev) { - if (dev->dev_private) - kfree(dev->dev_private); + kfree(dev->dev_private); } static int ffb_driver_postcleanup(drm_device_t * dev) { - if (ffb_position != NULL) - kfree(ffb_position); + kfree(ffb_position); return 0; } diff --git a/drivers/char/ip2/i2ellis.c b/drivers/char/ip2/i2ellis.c index f834d05ccc9..dd761a1e4f0 100644 --- a/drivers/char/ip2/i2ellis.c +++ b/drivers/char/ip2/i2ellis.c @@ -106,9 +106,7 @@ iiEllisInit(void) static void iiEllisCleanup(void) { - if ( pDelayTimer != NULL ) { - kfree ( pDelayTimer ); - } + kfree(pDelayTimer); } //****************************************************************************** diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index 33862670e28..58dcdee1cd7 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -28,6 +28,8 @@ #include <linux/kernel.h> /* For printk. */ #include <linux/string.h> +#include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/ipmi_msgdefs.h> /* for completion codes */ #include "ipmi_si_sm.h" @@ -36,6 +38,8 @@ static int bt_debug = 0x00; /* Production value 0, see following flags */ #define BT_DEBUG_ENABLE 1 #define BT_DEBUG_MSG 2 #define BT_DEBUG_STATES 4 +module_param(bt_debug, int, 0644); +MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); /* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, and 64 byte buffers. However, one HP implementation wants 255 bytes of @@ -43,7 +47,7 @@ static int bt_debug = 0x00; /* Production value 0, see following flags */ Since the Open IPMI architecture is single-message oriented at this stage, the queue depth of BT is of no concern. */ -#define BT_NORMAL_TIMEOUT 2000000 /* seconds in microseconds */ +#define BT_NORMAL_TIMEOUT 5000000 /* seconds in microseconds */ #define BT_RETRY_LIMIT 2 #define BT_RESET_DELAY 6000000 /* 6 seconds after warm reset */ @@ -202,7 +206,7 @@ static int bt_get_result(struct si_sm_data *bt, msg_len = bt->read_count - 2; /* account for length & seq */ /* Always NetFn, Cmd, cCode */ if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { - printk(KERN_WARNING "BT results: bad msg_len = %d\n", msg_len); + printk(KERN_DEBUG "BT results: bad msg_len = %d\n", msg_len); data[0] = bt->write_data[1] | 0x4; /* Kludge a response */ data[1] = bt->write_data[3]; data[2] = IPMI_ERR_UNSPECIFIED; @@ -240,7 +244,7 @@ static void reset_flags(struct si_sm_data *bt) BT_CONTROL(BT_B_BUSY); BT_CONTROL(BT_CLR_WR_PTR); BT_CONTROL(BT_SMS_ATN); -#ifdef DEVELOPMENT_ONLY_NOT_FOR_PRODUCTION + if (BT_STATUS & BT_B2H_ATN) { int i; BT_CONTROL(BT_H_BUSY); @@ -250,7 +254,6 @@ static void reset_flags(struct si_sm_data *bt) BMC2HOST; BT_CONTROL(BT_H_BUSY); } -#endif } static inline void write_all_bytes(struct si_sm_data *bt) @@ -295,7 +298,7 @@ static inline int read_all_bytes(struct si_sm_data *bt) printk ("\n"); } if (bt->seq != bt->write_data[2]) /* idiot check */ - printk(KERN_WARNING "BT: internal error: sequence mismatch\n"); + printk(KERN_DEBUG "BT: internal error: sequence mismatch\n"); /* per the spec, the (NetFn, Seq, Cmd) tuples should match */ if ((bt->read_data[3] == bt->write_data[3]) && /* Cmd */ @@ -321,18 +324,17 @@ static void error_recovery(struct si_sm_data *bt, char *reason) bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */ status = BT_STATUS; - printk(KERN_WARNING "BT: %s in %s %s ", reason, STATE2TXT, + printk(KERN_DEBUG "BT: %s in %s %s\n", reason, STATE2TXT, STATUS2TXT(buf)); (bt->error_retries)++; if (bt->error_retries > BT_RETRY_LIMIT) { - printk("retry limit (%d) exceeded\n", BT_RETRY_LIMIT); + printk(KERN_DEBUG "retry limit (%d) exceeded\n", BT_RETRY_LIMIT); bt->state = BT_STATE_HOSED; if (!bt->nonzero_status) printk(KERN_ERR "IPMI: BT stuck, try power cycle\n"); - else if (bt->seq == FIRST_SEQ + BT_RETRY_LIMIT) { - /* most likely during insmod */ - printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n"); + else if (bt->error_retries <= BT_RETRY_LIMIT + 1) { + printk(KERN_DEBUG "IPMI: BT reset (takes 5 secs)\n"); bt->state = BT_STATE_RESET1; } return; @@ -340,11 +342,11 @@ static void error_recovery(struct si_sm_data *bt, char *reason) /* Sometimes the BMC queues get in an "off-by-one" state...*/ if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) { - printk("retry B2H_WAIT\n"); + printk(KERN_DEBUG "retry B2H_WAIT\n"); return; } - printk("restart command\n"); + printk(KERN_DEBUG "restart command\n"); bt->state = BT_STATE_RESTART; } @@ -372,17 +374,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) return SI_SM_HOSED; if (bt->state != BT_STATE_IDLE) { /* do timeout test */ - - /* Certain states, on error conditions, can lock up a CPU - because they are effectively in an infinite loop with - CALL_WITHOUT_DELAY (right back here with time == 0). - Prevent infinite lockup by ALWAYS decrementing timeout. */ - - /* FIXME: bt_event is sometimes called with time > BT_NORMAL_TIMEOUT - (noticed in ipmi_smic_sm.c January 2004) */ - - if ((time <= 0) || (time >= BT_NORMAL_TIMEOUT)) - time = 100; bt->timeout -= time; if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) { error_recovery(bt, "timed out"); @@ -483,6 +474,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) break; case BT_STATE_RESTART: /* don't reset retries! */ + reset_flags(bt); bt->write_data[2] = ++bt->seq; bt->read_count = 0; bt->nonzero_status = 0; diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index d21853a594a..da1554194d3 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c @@ -38,16 +38,25 @@ */ #include <linux/kernel.h> /* For printk. */ +#include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/string.h> +#include <linux/jiffies.h> #include <linux/ipmi_msgdefs.h> /* for completion codes */ #include "ipmi_si_sm.h" -/* Set this if you want a printout of why the state machine was hosed - when it gets hosed. */ -#define DEBUG_HOSED_REASON +/* kcs_debug is a bit-field + * KCS_DEBUG_ENABLE - turned on for now + * KCS_DEBUG_MSG - commands and their responses + * KCS_DEBUG_STATES - state machine + */ +#define KCS_DEBUG_STATES 4 +#define KCS_DEBUG_MSG 2 +#define KCS_DEBUG_ENABLE 1 -/* Print the state machine state on entry every time. */ -#undef DEBUG_STATE +static int kcs_debug; +module_param(kcs_debug, int, 0644); +MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); /* The states the KCS driver may be in. */ enum kcs_states { @@ -91,6 +100,7 @@ enum kcs_states { #define IBF_RETRY_TIMEOUT 1000000 #define OBF_RETRY_TIMEOUT 1000000 #define MAX_ERROR_RETRIES 10 +#define ERROR0_OBF_WAIT_JIFFIES (2*HZ) struct si_sm_data { @@ -107,6 +117,7 @@ struct si_sm_data unsigned int error_retries; long ibf_timeout; long obf_timeout; + unsigned long error0_timeout; }; static unsigned int init_kcs_data(struct si_sm_data *kcs, @@ -175,11 +186,11 @@ static inline void start_error_recovery(struct si_sm_data *kcs, char *reason) { (kcs->error_retries)++; if (kcs->error_retries > MAX_ERROR_RETRIES) { -#ifdef DEBUG_HOSED_REASON - printk("ipmi_kcs_sm: kcs hosed: %s\n", reason); -#endif + if (kcs_debug & KCS_DEBUG_ENABLE) + printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason); kcs->state = KCS_HOSED; } else { + kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; kcs->state = KCS_ERROR0; } } @@ -248,14 +259,21 @@ static void restart_kcs_transaction(struct si_sm_data *kcs) static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, unsigned int size) { + unsigned int i; + if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) { return -1; } - if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) { return -2; } - + if (kcs_debug & KCS_DEBUG_MSG) { + printk(KERN_DEBUG "start_kcs_transaction -"); + for (i = 0; i < size; i ++) { + printk(" %02x", (unsigned char) (data [i])); + } + printk ("\n"); + } kcs->error_retries = 0; memcpy(kcs->write_data, data, size); kcs->write_count = size; @@ -305,9 +323,9 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) status = read_status(kcs); -#ifdef DEBUG_STATE - printk(" State = %d, %x\n", kcs->state, status); -#endif + if (kcs_debug & KCS_DEBUG_STATES) + printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status); + /* All states wait for ibf, so just do it here. */ if (!check_ibf(kcs, status, time)) return SI_SM_CALL_WITH_DELAY; @@ -409,6 +427,10 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) case KCS_ERROR0: clear_obf(kcs, status); + status = read_status(kcs); + if (GET_STATUS_OBF(status)) /* controller isn't responding */ + if (time_before(jiffies, kcs->error0_timeout)) + return SI_SM_CALL_WITH_TICK_DELAY; write_cmd(kcs, KCS_GET_STATUS_ABORT); kcs->state = KCS_ERROR1; break; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 32fa82c78c7..c1d06ba449b 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -38,13 +38,13 @@ #include <linux/sched.h> #include <linux/poll.h> #include <linux/spinlock.h> -#include <linux/rwsem.h> #include <linux/slab.h> #include <linux/ipmi.h> #include <linux/ipmi_smi.h> #include <linux/notifier.h> #include <linux/init.h> #include <linux/proc_fs.h> +#include <linux/rcupdate.h> #define PFX "IPMI message handler: " @@ -65,10 +65,19 @@ struct proc_dir_entry *proc_ipmi_root = NULL; the max message timer. This is in milliseconds. */ #define MAX_MSG_TIMEOUT 60000 + +/* + * The main "user" data structure. + */ struct ipmi_user { struct list_head link; + /* Set to "0" when the user is destroyed. */ + int valid; + + struct kref refcount; + /* The upper layer that handles receive messages. */ struct ipmi_user_hndl *handler; void *handler_data; @@ -87,6 +96,15 @@ struct cmd_rcvr ipmi_user_t user; unsigned char netfn; unsigned char cmd; + + /* + * This is used to form a linked lised during mass deletion. + * Since this is in an RCU list, we cannot use the link above + * or change any data until the RCU period completes. So we + * use this next variable during mass deletion so we can have + * a list and don't have to wait and restart the search on + * every individual deletion of a command. */ + struct cmd_rcvr *next; }; struct seq_table @@ -150,13 +168,11 @@ struct ipmi_smi /* What interface number are we? */ int intf_num; - /* The list of upper layers that are using me. We read-lock - this when delivering messages to the upper layer to keep - the user from going away while we are processing the - message. This means that you cannot add or delete a user - from the receive callback. */ - rwlock_t users_lock; - struct list_head users; + struct kref refcount; + + /* The list of upper layers that are using me. seq_lock + * protects this. */ + struct list_head users; /* Used for wake ups at startup. */ wait_queue_head_t waitq; @@ -193,7 +209,7 @@ struct ipmi_smi /* The list of command receivers that are registered for commands on this interface. */ - rwlock_t cmd_rcvr_lock; + struct semaphore cmd_rcvrs_lock; struct list_head cmd_rcvrs; /* Events that were queues because no one was there to receive @@ -296,16 +312,17 @@ struct ipmi_smi unsigned int events; }; +/* Used to mark an interface entry that cannot be used but is not a + * free entry, either, primarily used at creation and deletion time so + * a slot doesn't get reused too quickly. */ +#define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1)) +#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ + || (i == IPMI_INVALID_INTERFACE_ENTRY)) + #define MAX_IPMI_INTERFACES 4 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; -/* Used to keep interfaces from going away while operations are - operating on interfaces. Grab read if you are not modifying the - interfaces, write if you are. */ -static DECLARE_RWSEM(interfaces_sem); - -/* Directly protects the ipmi_interfaces data structure. This is - claimed in the timer interrupt. */ +/* Directly protects the ipmi_interfaces data structure. */ static DEFINE_SPINLOCK(interfaces_lock); /* List of watchers that want to know when smi's are added and @@ -313,20 +330,72 @@ static DEFINE_SPINLOCK(interfaces_lock); static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); static DECLARE_RWSEM(smi_watchers_sem); + +static void free_recv_msg_list(struct list_head *q) +{ + struct ipmi_recv_msg *msg, *msg2; + + list_for_each_entry_safe(msg, msg2, q, link) { + list_del(&msg->link); + ipmi_free_recv_msg(msg); + } +} + +static void clean_up_interface_data(ipmi_smi_t intf) +{ + int i; + struct cmd_rcvr *rcvr, *rcvr2; + struct list_head list; + + free_recv_msg_list(&intf->waiting_msgs); + free_recv_msg_list(&intf->waiting_events); + + /* Wholesale remove all the entries from the list in the + * interface and wait for RCU to know that none are in use. */ + down(&intf->cmd_rcvrs_lock); + list_add_rcu(&list, &intf->cmd_rcvrs); + list_del_rcu(&intf->cmd_rcvrs); + up(&intf->cmd_rcvrs_lock); + synchronize_rcu(); + + list_for_each_entry_safe(rcvr, rcvr2, &list, link) + kfree(rcvr); + + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { + if ((intf->seq_table[i].inuse) + && (intf->seq_table[i].recv_msg)) + { + ipmi_free_recv_msg(intf->seq_table[i].recv_msg); + } + } +} + +static void intf_free(struct kref *ref) +{ + ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount); + + clean_up_interface_data(intf); + kfree(intf); +} + int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) { - int i; + int i; + unsigned long flags; - down_read(&interfaces_sem); down_write(&smi_watchers_sem); list_add(&(watcher->link), &smi_watchers); + up_write(&smi_watchers_sem); + spin_lock_irqsave(&interfaces_lock, flags); for (i = 0; i < MAX_IPMI_INTERFACES; i++) { - if (ipmi_interfaces[i] != NULL) { - watcher->new_smi(i); - } + ipmi_smi_t intf = ipmi_interfaces[i]; + if (IPMI_INVALID_INTERFACE(intf)) + continue; + spin_unlock_irqrestore(&interfaces_lock, flags); + watcher->new_smi(i); + spin_lock_irqsave(&interfaces_lock, flags); } - up_write(&smi_watchers_sem); - up_read(&interfaces_sem); + spin_unlock_irqrestore(&interfaces_lock, flags); return 0; } @@ -471,8 +540,8 @@ static void deliver_response(struct ipmi_recv_msg *msg) } ipmi_free_recv_msg(msg); } else { - msg->user->handler->ipmi_recv_hndl(msg, - msg->user->handler_data); + ipmi_user_t user = msg->user; + user->handler->ipmi_recv_hndl(msg, user->handler_data); } } @@ -662,15 +731,18 @@ int ipmi_create_user(unsigned int if_num, if (! new_user) return -ENOMEM; - down_read(&interfaces_sem); - if ((if_num >= MAX_IPMI_INTERFACES) || ipmi_interfaces[if_num] == NULL) - { - rv = -EINVAL; - goto out_unlock; + spin_lock_irqsave(&interfaces_lock, flags); + intf = ipmi_interfaces[if_num]; + if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) { + spin_unlock_irqrestore(&interfaces_lock, flags); + return -EINVAL; } - intf = ipmi_interfaces[if_num]; + /* Note that each existing user holds a refcount to the interface. */ + kref_get(&intf->refcount); + spin_unlock_irqrestore(&interfaces_lock, flags); + kref_init(&new_user->refcount); new_user->handler = handler; new_user->handler_data = handler_data; new_user->intf = intf; @@ -678,98 +750,92 @@ int ipmi_create_user(unsigned int if_num, if (!try_module_get(intf->handlers->owner)) { rv = -ENODEV; - goto out_unlock; + goto out_err; } if (intf->handlers->inc_usecount) { rv = intf->handlers->inc_usecount(intf->send_info); if (rv) { module_put(intf->handlers->owner); - goto out_unlock; + goto out_err; } } - write_lock_irqsave(&intf->users_lock, flags); - list_add_tail(&new_user->link, &intf->users); - write_unlock_irqrestore(&intf->users_lock, flags); - - out_unlock: - if (rv) { - kfree(new_user); - } else { - *user = new_user; - } + new_user->valid = 1; + spin_lock_irqsave(&intf->seq_lock, flags); + list_add_rcu(&new_user->link, &intf->users); + spin_unlock_irqrestore(&intf->seq_lock, flags); + *user = new_user; + return 0; - up_read(&interfaces_sem); + out_err: + kfree(new_user); + kref_put(&intf->refcount, intf_free); return rv; } -static int ipmi_destroy_user_nolock(ipmi_user_t user) +static void free_user(struct kref *ref) +{ + ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); + kfree(user); +} + +int ipmi_destroy_user(ipmi_user_t user) { int rv = -ENODEV; - ipmi_user_t t_user; - struct cmd_rcvr *rcvr, *rcvr2; + ipmi_smi_t intf = user->intf; int i; unsigned long flags; + struct cmd_rcvr *rcvr; + struct list_head *entry1, *entry2; + struct cmd_rcvr *rcvrs = NULL; - /* Find the user and delete them from the list. */ - list_for_each_entry(t_user, &(user->intf->users), link) { - if (t_user == user) { - list_del(&t_user->link); - rv = 0; - break; - } - } + user->valid = 1; - if (rv) { - goto out_unlock; - } + /* Remove the user from the interface's sequence table. */ + spin_lock_irqsave(&intf->seq_lock, flags); + list_del_rcu(&user->link); - /* Remove the user from the interfaces sequence table. */ - spin_lock_irqsave(&(user->intf->seq_lock), flags); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { - if (user->intf->seq_table[i].inuse - && (user->intf->seq_table[i].recv_msg->user == user)) + if (intf->seq_table[i].inuse + && (intf->seq_table[i].recv_msg->user == user)) { - user->intf->seq_table[i].inuse = 0; + intf->seq_table[i].inuse = 0; } } - spin_unlock_irqrestore(&(user->intf->seq_lock), flags); - - /* Remove the user from the command receiver's table. */ - write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags); - list_for_each_entry_safe(rcvr, rcvr2, &(user->intf->cmd_rcvrs), link) { + spin_unlock_irqrestore(&intf->seq_lock, flags); + + /* + * Remove the user from the command receiver's table. First + * we build a list of everything (not using the standard link, + * since other things may be using it till we do + * synchronize_rcu()) then free everything in that list. + */ + down(&intf->cmd_rcvrs_lock); + list_for_each_safe_rcu(entry1, entry2, &intf->cmd_rcvrs) { + rcvr = list_entry(entry1, struct cmd_rcvr, link); if (rcvr->user == user) { - list_del(&rcvr->link); - kfree(rcvr); + list_del_rcu(&rcvr->link); + rcvr->next = rcvrs; + rcvrs = rcvr; } } - write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); + up(&intf->cmd_rcvrs_lock); + synchronize_rcu(); + while (rcvrs) { + rcvr = rcvrs; + rcvrs = rcvr->next; + kfree(rcvr); + } - kfree(user); + module_put(intf->handlers->owner); + if (intf->handlers->dec_usecount) + intf->handlers->dec_usecount(intf->send_info); - out_unlock: + kref_put(&intf->refcount, intf_free); - return rv; -} - -int ipmi_destroy_user(ipmi_user_t user) -{ - int rv; - ipmi_smi_t intf = user->intf; - unsigned long flags; + kref_put(&user->refcount, free_user); - down_read(&interfaces_sem); - write_lock_irqsave(&intf->users_lock, flags); - rv = ipmi_destroy_user_nolock(user); - if (!rv) { - module_put(intf->handlers->owner); - if (intf->handlers->dec_usecount) - intf->handlers->dec_usecount(intf->send_info); - } - - write_unlock_irqrestore(&intf->users_lock, flags); - up_read(&interfaces_sem); return rv; } @@ -823,62 +889,78 @@ int ipmi_get_my_LUN(ipmi_user_t user, int ipmi_set_gets_events(ipmi_user_t user, int val) { - unsigned long flags; - struct ipmi_recv_msg *msg, *msg2; + unsigned long flags; + ipmi_smi_t intf = user->intf; + struct ipmi_recv_msg *msg, *msg2; + struct list_head msgs; - read_lock(&(user->intf->users_lock)); - spin_lock_irqsave(&(user->intf->events_lock), flags); + INIT_LIST_HEAD(&msgs); + + spin_lock_irqsave(&intf->events_lock, flags); user->gets_events = val; if (val) { /* Deliver any queued events. */ - list_for_each_entry_safe(msg, msg2, &(user->intf->waiting_events), link) { + list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) { list_del(&msg->link); - msg->user = user; - deliver_response(msg); + list_add_tail(&msg->link, &msgs); } } - - spin_unlock_irqrestore(&(user->intf->events_lock), flags); - read_unlock(&(user->intf->users_lock)); + + /* Hold the events lock while doing this to preserve order. */ + list_for_each_entry_safe(msg, msg2, &msgs, link) { + msg->user = user; + kref_get(&user->refcount); + deliver_response(msg); + } + + spin_unlock_irqrestore(&intf->events_lock, flags); return 0; } +static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, + unsigned char netfn, + unsigned char cmd) +{ + struct cmd_rcvr *rcvr; + + list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { + if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) + return rcvr; + } + return NULL; +} + int ipmi_register_for_cmd(ipmi_user_t user, unsigned char netfn, unsigned char cmd) { - struct cmd_rcvr *cmp; - unsigned long flags; - struct cmd_rcvr *rcvr; - int rv = 0; + ipmi_smi_t intf = user->intf; + struct cmd_rcvr *rcvr; + struct cmd_rcvr *entry; + int rv = 0; rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); if (! rcvr) return -ENOMEM; + rcvr->cmd = cmd; + rcvr->netfn = netfn; + rcvr->user = user; - read_lock(&(user->intf->users_lock)); - write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags); + down(&intf->cmd_rcvrs_lock); /* Make sure the command/netfn is not already registered. */ - list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) { - if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) { - rv = -EBUSY; - break; - } - } - - if (! rv) { - rcvr->cmd = cmd; - rcvr->netfn = netfn; - rcvr->user = user; - list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs)); + entry = find_cmd_rcvr(intf, netfn, cmd); + if (entry) { + rv = -EBUSY; + goto out_unlock; } - write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); - read_unlock(&(user->intf->users_lock)); + list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); + out_unlock: + up(&intf->cmd_rcvrs_lock); if (rv) kfree(rcvr); @@ -889,31 +971,28 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, unsigned char netfn, unsigned char cmd) { - unsigned long flags; - struct cmd_rcvr *rcvr; - int rv = -ENOENT; + ipmi_smi_t intf = user->intf; + struct cmd_rcvr *rcvr; - read_lock(&(user->intf->users_lock)); - write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags); + down(&intf->cmd_rcvrs_lock); /* Make sure the command/netfn is not already registered. */ - list_for_each_entry(rcvr, &(user->intf->cmd_rcvrs), link) { - if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { - rv = 0; - list_del(&rcvr->link); - kfree(rcvr); - break; - } + rcvr = find_cmd_rcvr(intf, netfn, cmd); + if ((rcvr) && (rcvr->user == user)) { + list_del_rcu(&rcvr->link); + up(&intf->cmd_rcvrs_lock); + synchronize_rcu(); + kfree(rcvr); + return 0; + } else { + up(&intf->cmd_rcvrs_lock); + return -ENOENT; } - write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); - read_unlock(&(user->intf->users_lock)); - - return rv; } void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) { - user->intf->handlers->set_run_to_completion(user->intf->send_info, - val); + ipmi_smi_t intf = user->intf; + intf->handlers->set_run_to_completion(intf->send_info, val); } static unsigned char @@ -1010,19 +1089,19 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, supplied in certain circumstances (mainly at panic time). If messages are supplied, they will be freed, even if an error occurs. */ -static inline int i_ipmi_request(ipmi_user_t user, - ipmi_smi_t intf, - struct ipmi_addr *addr, - long msgid, - struct kernel_ipmi_msg *msg, - void *user_msg_data, - void *supplied_smi, - struct ipmi_recv_msg *supplied_recv, - int priority, - unsigned char source_address, - unsigned char source_lun, - int retries, - unsigned int retry_time_ms) +static int i_ipmi_request(ipmi_user_t user, + ipmi_smi_t intf, + struct ipmi_addr *addr, + long msgid, + struct kernel_ipmi_msg *msg, + void *user_msg_data, + void *supplied_smi, + struct ipmi_recv_msg *supplied_recv, + int priority, + unsigned char source_address, + unsigned char source_lun, + int retries, + unsigned int retry_time_ms) { int rv = 0; struct ipmi_smi_msg *smi_msg; @@ -1051,6 +1130,8 @@ static inline int i_ipmi_request(ipmi_user_t user, } recv_msg->user = user; + if (user) + kref_get(&user->refcount); recv_msg->msgid = msgid; /* Store the message to send in the receive message so timeout responses can get the proper response data. */ @@ -1725,11 +1806,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, unsigned char version_major, unsigned char version_minor, unsigned char slave_addr, - ipmi_smi_t *intf) + ipmi_smi_t *new_intf) { int i, j; int rv; - ipmi_smi_t new_intf; + ipmi_smi_t intf; unsigned long flags; @@ -1745,189 +1826,142 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, return -ENODEV; } - new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL); - if (!new_intf) + intf = kmalloc(sizeof(*intf), GFP_KERNEL); + if (!intf) return -ENOMEM; - memset(new_intf, 0, sizeof(*new_intf)); - - new_intf->proc_dir = NULL; + memset(intf, 0, sizeof(*intf)); + intf->intf_num = -1; + kref_init(&intf->refcount); + intf->version_major = version_major; + intf->version_minor = version_minor; + for (j = 0; j < IPMI_MAX_CHANNELS; j++) { + intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; + intf->channels[j].lun = 2; + } + if (slave_addr != 0) + intf->channels[0].address = slave_addr; + INIT_LIST_HEAD(&intf->users); + intf->handlers = handlers; + intf->send_info = send_info; + spin_lock_init(&intf->seq_lock); + for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { + intf->seq_table[j].inuse = 0; + intf->seq_table[j].seqid = 0; + } + intf->curr_seq = 0; +#ifdef CONFIG_PROC_FS + spin_lock_init(&intf->proc_entry_lock); +#endif + spin_lock_init(&intf->waiting_msgs_lock); + INIT_LIST_HEAD(&intf->waiting_msgs); + spin_lock_init(&intf->events_lock); + INIT_LIST_HEAD(&intf->waiting_events); + intf->waiting_events_count = 0; + init_MUTEX(&intf->cmd_rcvrs_lock); + INIT_LIST_HEAD(&intf->cmd_rcvrs); + init_waitqueue_head(&intf->waitq); + + spin_lock_init(&intf->counter_lock); + intf->proc_dir = NULL; rv = -ENOMEM; - - down_write(&interfaces_sem); + spin_lock_irqsave(&interfaces_lock, flags); for (i = 0; i < MAX_IPMI_INTERFACES; i++) { if (ipmi_interfaces[i] == NULL) { - new_intf->intf_num = i; - new_intf->version_major = version_major; - new_intf->version_minor = version_minor; - for (j = 0; j < IPMI_MAX_CHANNELS; j++) { - new_intf->channels[j].address - = IPMI_BMC_SLAVE_ADDR; - new_intf->channels[j].lun = 2; - } - if (slave_addr != 0) - new_intf->channels[0].address = slave_addr; - rwlock_init(&(new_intf->users_lock)); - INIT_LIST_HEAD(&(new_intf->users)); - new_intf->handlers = handlers; - new_intf->send_info = send_info; - spin_lock_init(&(new_intf->seq_lock)); - for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { - new_intf->seq_table[j].inuse = 0; - new_intf->seq_table[j].seqid = 0; - } - new_intf->curr_seq = 0; -#ifdef CONFIG_PROC_FS - spin_lock_init(&(new_intf->proc_entry_lock)); -#endif - spin_lock_init(&(new_intf->waiting_msgs_lock)); - INIT_LIST_HEAD(&(new_intf->waiting_msgs)); - spin_lock_init(&(new_intf->events_lock)); - INIT_LIST_HEAD(&(new_intf->waiting_events)); - new_intf->waiting_events_count = 0; - rwlock_init(&(new_intf->cmd_rcvr_lock)); - init_waitqueue_head(&new_intf->waitq); - INIT_LIST_HEAD(&(new_intf->cmd_rcvrs)); - - spin_lock_init(&(new_intf->counter_lock)); - - spin_lock_irqsave(&interfaces_lock, flags); - ipmi_interfaces[i] = new_intf; - spin_unlock_irqrestore(&interfaces_lock, flags); - + intf->intf_num = i; + /* Reserve the entry till we are done. */ + ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; rv = 0; - *intf = new_intf; break; } } + spin_unlock_irqrestore(&interfaces_lock, flags); + if (rv) + goto out; - downgrade_write(&interfaces_sem); - - if (rv == 0) - rv = add_proc_entries(*intf, i); - - if (rv == 0) { - if ((version_major > 1) - || ((version_major == 1) && (version_minor >= 5))) - { - /* Start scanning the channels to see what is - available. */ - (*intf)->null_user_handler = channel_handler; - (*intf)->curr_channel = 0; - rv = send_channel_info_cmd(*intf, 0); - if (rv) - goto out; + /* FIXME - this is an ugly kludge, this sets the intf for the + caller before sending any messages with it. */ + *new_intf = intf; - /* Wait for the channel info to be read. */ - up_read(&interfaces_sem); - wait_event((*intf)->waitq, - ((*intf)->curr_channel>=IPMI_MAX_CHANNELS)); - down_read(&interfaces_sem); + if ((version_major > 1) + || ((version_major == 1) && (version_minor >= 5))) + { + /* Start scanning the channels to see what is + available. */ + intf->null_user_handler = channel_handler; + intf->curr_channel = 0; + rv = send_channel_info_cmd(intf, 0); + if (rv) + goto out; - if (ipmi_interfaces[i] != new_intf) - /* Well, it went away. Just return. */ - goto out; - } else { - /* Assume a single IPMB channel at zero. */ - (*intf)->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; - (*intf)->channels[0].protocol - = IPMI_CHANNEL_PROTOCOL_IPMB; - } - - /* Call all the watcher interfaces to tell - them that a new interface is available. */ - call_smi_watchers(i); + /* Wait for the channel info to be read. */ + wait_event(intf->waitq, + intf->curr_channel >= IPMI_MAX_CHANNELS); + } else { + /* Assume a single IPMB channel at zero. */ + intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; + intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; } - out: - up_read(&interfaces_sem); + if (rv == 0) + rv = add_proc_entries(intf, i); + out: if (rv) { - if (new_intf->proc_dir) - remove_proc_entries(new_intf); - kfree(new_intf); + if (intf->proc_dir) + remove_proc_entries(intf); + kref_put(&intf->refcount, intf_free); + if (i < MAX_IPMI_INTERFACES) { + spin_lock_irqsave(&interfaces_lock, flags); + ipmi_interfaces[i] = NULL; + spin_unlock_irqrestore(&interfaces_lock, flags); + } + } else { + spin_lock_irqsave(&interfaces_lock, flags); + ipmi_interfaces[i] = intf; + spin_unlock_irqrestore(&interfaces_lock, flags); + call_smi_watchers(i); } return rv; } -static void free_recv_msg_list(struct list_head *q) -{ - struct ipmi_recv_msg *msg, *msg2; - - list_for_each_entry_safe(msg, msg2, q, link) { - list_del(&msg->link); - ipmi_free_recv_msg(msg); - } -} - -static void free_cmd_rcvr_list(struct list_head *q) -{ - struct cmd_rcvr *rcvr, *rcvr2; - - list_for_each_entry_safe(rcvr, rcvr2, q, link) { - list_del(&rcvr->link); - kfree(rcvr); - } -} - -static void clean_up_interface_data(ipmi_smi_t intf) -{ - int i; - - free_recv_msg_list(&(intf->waiting_msgs)); - free_recv_msg_list(&(intf->waiting_events)); - free_cmd_rcvr_list(&(intf->cmd_rcvrs)); - - for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { - if ((intf->seq_table[i].inuse) - && (intf->seq_table[i].recv_msg)) - { - ipmi_free_recv_msg(intf->seq_table[i].recv_msg); - } - } -} - int ipmi_unregister_smi(ipmi_smi_t intf) { - int rv = -ENODEV; int i; struct ipmi_smi_watcher *w; unsigned long flags; - down_write(&interfaces_sem); - if (list_empty(&(intf->users))) - { - for (i = 0; i < MAX_IPMI_INTERFACES; i++) { - if (ipmi_interfaces[i] == intf) { - remove_proc_entries(intf); - spin_lock_irqsave(&interfaces_lock, flags); - ipmi_interfaces[i] = NULL; - clean_up_interface_data(intf); - spin_unlock_irqrestore(&interfaces_lock,flags); - kfree(intf); - rv = 0; - goto out_call_watcher; - } + spin_lock_irqsave(&interfaces_lock, flags); + for (i = 0; i < MAX_IPMI_INTERFACES; i++) { + if (ipmi_interfaces[i] == intf) { + /* Set the interface number reserved until we + * are done. */ + ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; + intf->intf_num = -1; + break; } - } else { - rv = -EBUSY; } - up_write(&interfaces_sem); + spin_unlock_irqrestore(&interfaces_lock,flags); - return rv; + if (i == MAX_IPMI_INTERFACES) + return -ENODEV; - out_call_watcher: - downgrade_write(&interfaces_sem); + remove_proc_entries(intf); /* Call all the watcher interfaces to tell them that an interface is gone. */ down_read(&smi_watchers_sem); - list_for_each_entry(w, &smi_watchers, link) { + list_for_each_entry(w, &smi_watchers, link) w->smi_gone(i); - } up_read(&smi_watchers_sem); - up_read(&interfaces_sem); + + /* Allow the entry to be reused now. */ + spin_lock_irqsave(&interfaces_lock, flags); + ipmi_interfaces[i] = NULL; + spin_unlock_irqrestore(&interfaces_lock,flags); + + kref_put(&intf->refcount, intf_free); return 0; } @@ -1998,14 +2032,14 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { - struct cmd_rcvr *rcvr; - int rv = 0; - unsigned char netfn; - unsigned char cmd; - ipmi_user_t user = NULL; - struct ipmi_ipmb_addr *ipmb_addr; - struct ipmi_recv_msg *recv_msg; - unsigned long flags; + struct cmd_rcvr *rcvr; + int rv = 0; + unsigned char netfn; + unsigned char cmd; + ipmi_user_t user = NULL; + struct ipmi_ipmb_addr *ipmb_addr; + struct ipmi_recv_msg *recv_msg; + unsigned long flags; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ @@ -2023,16 +2057,14 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, netfn = msg->rsp[4] >> 2; cmd = msg->rsp[8]; - read_lock(&(intf->cmd_rcvr_lock)); - - /* Find the command/netfn. */ - list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { - if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { - user = rcvr->user; - break; - } - } - read_unlock(&(intf->cmd_rcvr_lock)); + rcu_read_lock(); + rcvr = find_cmd_rcvr(intf, netfn, cmd); + if (rcvr) { + user = rcvr->user; + kref_get(&user->refcount); + } else + user = NULL; + rcu_read_unlock(); if (user == NULL) { /* We didn't find a user, deliver an error response. */ @@ -2079,6 +2111,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, message, so requeue it for handling later. */ rv = 1; + kref_put(&user->refcount, free_user); } else { /* Extract the source address from the data. */ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; @@ -2179,14 +2212,14 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf, static int handle_lan_get_msg_cmd(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { - struct cmd_rcvr *rcvr; - int rv = 0; - unsigned char netfn; - unsigned char cmd; - ipmi_user_t user = NULL; - struct ipmi_lan_addr *lan_addr; - struct ipmi_recv_msg *recv_msg; - unsigned long flags; + struct cmd_rcvr *rcvr; + int rv = 0; + unsigned char netfn; + unsigned char cmd; + ipmi_user_t user = NULL; + struct ipmi_lan_addr *lan_addr; + struct ipmi_recv_msg *recv_msg; + unsigned long flags; if (msg->rsp_size < 12) { /* Message not big enough, just ignore it. */ @@ -2204,19 +2237,17 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, netfn = msg->rsp[6] >> 2; cmd = msg->rsp[10]; - read_lock(&(intf->cmd_rcvr_lock)); - - /* Find the command/netfn. */ - list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { - if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { - user = rcvr->user; - break; - } - } - read_unlock(&(intf->cmd_rcvr_lock)); + rcu_read_lock(); + rcvr = find_cmd_rcvr(intf, netfn, cmd); + if (rcvr) { + user = rcvr->user; + kref_get(&user->refcount); + } else + user = NULL; + rcu_read_unlock(); if (user == NULL) { - /* We didn't find a user, deliver an error response. */ + /* We didn't find a user, just give up. */ spin_lock_irqsave(&intf->counter_lock, flags); intf->unhandled_commands++; spin_unlock_irqrestore(&intf->counter_lock, flags); @@ -2235,6 +2266,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, message, so requeue it for handling later. */ rv = 1; + kref_put(&user->refcount, free_user); } else { /* Extract the source address from the data. */ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; @@ -2286,8 +2318,6 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, recv_msg->msg.data_len = msg->rsp_size - 3; } -/* This will be called with the intf->users_lock read-locked, so no need - to do that here. */ static int handle_read_event_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { @@ -2313,7 +2343,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf, INIT_LIST_HEAD(&msgs); - spin_lock_irqsave(&(intf->events_lock), flags); + spin_lock_irqsave(&intf->events_lock, flags); spin_lock(&intf->counter_lock); intf->events++; @@ -2321,12 +2351,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf, /* Allocate and fill in one message for every user that is getting events. */ - list_for_each_entry(user, &(intf->users), link) { + rcu_read_lock(); + list_for_each_entry_rcu(user, &intf->users, link) { if (! user->gets_events) continue; recv_msg = ipmi_alloc_recv_msg(); if (! recv_msg) { + rcu_read_unlock(); list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { list_del(&recv_msg->link); ipmi_free_recv_msg(recv_msg); @@ -2342,8 +2374,10 @@ static int handle_read_event_rsp(ipmi_smi_t intf, copy_event_into_recv_msg(recv_msg, msg); recv_msg->user = user; + kref_get(&user->refcount); list_add_tail(&(recv_msg->link), &msgs); } + rcu_read_unlock(); if (deliver_count) { /* Now deliver all the messages. */ @@ -2382,9 +2416,8 @@ static int handle_bmc_rsp(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { struct ipmi_recv_msg *recv_msg; - int found = 0; - struct ipmi_user *user; unsigned long flags; + struct ipmi_user *user; recv_msg = (struct ipmi_recv_msg *) msg->user_data; if (recv_msg == NULL) @@ -2396,16 +2429,9 @@ static int handle_bmc_rsp(ipmi_smi_t intf, return 0; } + user = recv_msg->user; /* Make sure the user still exists. */ - list_for_each_entry(user, &(intf->users), link) { - if (user == recv_msg->user) { - /* Found it, so we can deliver it */ - found = 1; - break; - } - } - - if ((! found) && recv_msg->user) { + if (user && !user->valid) { /* The user for the message went away, so give up. */ spin_lock_irqsave(&intf->counter_lock, flags); intf->unhandled_local_responses++; @@ -2486,7 +2512,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf, { /* It's a response to a response we sent. For this we deliver a send message response to the user. */ - struct ipmi_recv_msg *recv_msg = msg->user_data; + struct ipmi_recv_msg *recv_msg = msg->user_data; requeue = 0; if (msg->rsp_size < 2) @@ -2498,13 +2524,18 @@ static int handle_new_recv_msg(ipmi_smi_t intf, /* Invalid channel number */ goto out; - if (recv_msg) { - recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; - recv_msg->msg.data = recv_msg->msg_data; - recv_msg->msg.data_len = 1; - recv_msg->msg_data[0] = msg->rsp[2]; - deliver_response(recv_msg); - } + if (!recv_msg) + goto out; + + /* Make sure the user still exists. */ + if (!recv_msg->user || !recv_msg->user->valid) + goto out; + + recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; + recv_msg->msg.data = recv_msg->msg_data; + recv_msg->msg.data_len = 1; + recv_msg->msg_data[0] = msg->rsp[2]; + deliver_response(recv_msg); } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { @@ -2570,14 +2601,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, int rv; - /* Lock the user lock so the user can't go away while we are - working on it. */ - read_lock(&(intf->users_lock)); - if ((msg->data_size >= 2) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[1] == IPMI_SEND_MSG_CMD) - && (msg->user_data == NULL)) { + && (msg->user_data == NULL)) + { /* This is the local response to a command send, start the timer for these. The user_data will not be NULL if this is a response send, and we will let @@ -2612,46 +2640,46 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, } ipmi_free_smi_msg(msg); - goto out_unlock; + goto out; } /* To preserve message order, if the list is not empty, we tack this message onto the end of the list. */ - spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); - if (!list_empty(&(intf->waiting_msgs))) { - list_add_tail(&(msg->link), &(intf->waiting_msgs)); - spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); - goto out_unlock; + spin_lock_irqsave(&intf->waiting_msgs_lock, flags); + if (!list_empty(&intf->waiting_msgs)) { + list_add_tail(&msg->link, &intf->waiting_msgs); + spin_unlock(&intf->waiting_msgs_lock); + goto out; } - spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); + spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); rv = handle_new_recv_msg(intf, msg); if (rv > 0) { /* Could not handle the message now, just add it to a list to handle later. */ - spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); - list_add_tail(&(msg->link), &(intf->waiting_msgs)); - spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); + spin_lock(&intf->waiting_msgs_lock); + list_add_tail(&msg->link, &intf->waiting_msgs); + spin_unlock(&intf->waiting_msgs_lock); } else if (rv == 0) { ipmi_free_smi_msg(msg); } - out_unlock: - read_unlock(&(intf->users_lock)); + out: + return; } void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) { ipmi_user_t user; - read_lock(&(intf->users_lock)); - list_for_each_entry(user, &(intf->users), link) { + rcu_read_lock(); + list_for_each_entry_rcu(user, &intf->users, link) { if (! user->handler->ipmi_watchdog_pretimeout) continue; user->handler->ipmi_watchdog_pretimeout(user->handler_data); } - read_unlock(&(intf->users_lock)); + rcu_read_unlock(); } static void @@ -2691,8 +2719,65 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, return smi_msg; } -static void -ipmi_timeout_handler(long timeout_period) +static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, + struct list_head *timeouts, long timeout_period, + int slot, unsigned long *flags) +{ + struct ipmi_recv_msg *msg; + + if (!ent->inuse) + return; + + ent->timeout -= timeout_period; + if (ent->timeout > 0) + return; + + if (ent->retries_left == 0) { + /* The message has used all its retries. */ + ent->inuse = 0; + msg = ent->recv_msg; + list_add_tail(&msg->link, timeouts); + spin_lock(&intf->counter_lock); + if (ent->broadcast) + intf->timed_out_ipmb_broadcasts++; + else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) + intf->timed_out_lan_commands++; + else + intf->timed_out_ipmb_commands++; + spin_unlock(&intf->counter_lock); + } else { + struct ipmi_smi_msg *smi_msg; + /* More retries, send again. */ + + /* Start with the max timer, set to normal + timer after the message is sent. */ + ent->timeout = MAX_MSG_TIMEOUT; + ent->retries_left--; + spin_lock(&intf->counter_lock); + if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) + intf->retransmitted_lan_commands++; + else + intf->retransmitted_ipmb_commands++; + spin_unlock(&intf->counter_lock); + + smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, + ent->seqid); + if (! smi_msg) + return; + + spin_unlock_irqrestore(&intf->seq_lock, *flags); + /* Send the new message. We send with a zero + * priority. It timed out, I doubt time is + * that critical now, and high priority + * messages are really only for messages to the + * local MC, which don't get resent. */ + intf->handlers->sender(intf->send_info, + smi_msg, 0); + spin_lock_irqsave(&intf->seq_lock, *flags); + } +} + +static void ipmi_timeout_handler(long timeout_period) { ipmi_smi_t intf; struct list_head timeouts; @@ -2706,14 +2791,14 @@ ipmi_timeout_handler(long timeout_period) spin_lock(&interfaces_lock); for (i = 0; i < MAX_IPMI_INTERFACES; i++) { intf = ipmi_interfaces[i]; - if (intf == NULL) + if (IPMI_INVALID_INTERFACE(intf)) continue; - - read_lock(&(intf->users_lock)); + kref_get(&intf->refcount); + spin_unlock(&interfaces_lock); /* See if any waiting messages need to be processed. */ - spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); - list_for_each_entry_safe(smi_msg, smi_msg2, &(intf->waiting_msgs), link) { + spin_lock_irqsave(&intf->waiting_msgs_lock, flags); + list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) { if (! handle_new_recv_msg(intf, smi_msg)) { list_del(&smi_msg->link); ipmi_free_smi_msg(smi_msg); @@ -2723,73 +2808,23 @@ ipmi_timeout_handler(long timeout_period) break; } } - spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); + spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); /* Go through the seq table and find any messages that have timed out, putting them in the timeouts list. */ - spin_lock_irqsave(&(intf->seq_lock), flags); - for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { - struct seq_table *ent = &(intf->seq_table[j]); - if (!ent->inuse) - continue; - - ent->timeout -= timeout_period; - if (ent->timeout > 0) - continue; - - if (ent->retries_left == 0) { - /* The message has used all its retries. */ - ent->inuse = 0; - msg = ent->recv_msg; - list_add_tail(&(msg->link), &timeouts); - spin_lock(&intf->counter_lock); - if (ent->broadcast) - intf->timed_out_ipmb_broadcasts++; - else if (ent->recv_msg->addr.addr_type - == IPMI_LAN_ADDR_TYPE) - intf->timed_out_lan_commands++; - else - intf->timed_out_ipmb_commands++; - spin_unlock(&intf->counter_lock); - } else { - struct ipmi_smi_msg *smi_msg; - /* More retries, send again. */ - - /* Start with the max timer, set to normal - timer after the message is sent. */ - ent->timeout = MAX_MSG_TIMEOUT; - ent->retries_left--; - spin_lock(&intf->counter_lock); - if (ent->recv_msg->addr.addr_type - == IPMI_LAN_ADDR_TYPE) - intf->retransmitted_lan_commands++; - else - intf->retransmitted_ipmb_commands++; - spin_unlock(&intf->counter_lock); - smi_msg = smi_from_recv_msg(intf, - ent->recv_msg, j, ent->seqid); - if (! smi_msg) - continue; - - spin_unlock_irqrestore(&(intf->seq_lock),flags); - /* Send the new message. We send with a zero - * priority. It timed out, I doubt time is - * that critical now, and high priority - * messages are really only for messages to the - * local MC, which don't get resent. */ - intf->handlers->sender(intf->send_info, - smi_msg, 0); - spin_lock_irqsave(&(intf->seq_lock), flags); - } - } - spin_unlock_irqrestore(&(intf->seq_lock), flags); - - list_for_each_entry_safe(msg, msg2, &timeouts, link) { + spin_lock_irqsave(&intf->seq_lock, flags); + for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) + check_msg_timeout(intf, &(intf->seq_table[j]), + &timeouts, timeout_period, j, + &flags); + spin_unlock_irqrestore(&intf->seq_lock, flags); + + list_for_each_entry_safe(msg, msg2, &timeouts, link) handle_msg_timeout(msg); - } - read_unlock(&(intf->users_lock)); + kref_put(&intf->refcount, intf_free); + spin_lock(&interfaces_lock); } spin_unlock(&interfaces_lock); } @@ -2802,7 +2837,7 @@ static void ipmi_request_event(void) spin_lock(&interfaces_lock); for (i = 0; i < MAX_IPMI_INTERFACES; i++) { intf = ipmi_interfaces[i]; - if (intf == NULL) + if (IPMI_INVALID_INTERFACE(intf)) continue; intf->handlers->request_events(intf->send_info); @@ -2884,6 +2919,13 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) return rv; } +void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) +{ + if (msg->user) + kref_put(&msg->user->refcount, free_user); + msg->done(msg); +} + #ifdef CONFIG_IPMI_PANIC_EVENT static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) @@ -2964,7 +3006,7 @@ static void send_panic_events(char *str) /* For every registered interface, send the event. */ for (i = 0; i < MAX_IPMI_INTERFACES; i++) { intf = ipmi_interfaces[i]; - if (intf == NULL) + if (IPMI_INVALID_INTERFACE(intf)) continue; /* Send the event announcing the panic. */ @@ -2995,7 +3037,7 @@ static void send_panic_events(char *str) int j; intf = ipmi_interfaces[i]; - if (intf == NULL) + if (IPMI_INVALID_INTERFACE(intf)) continue; /* First job here is to figure out where to send the @@ -3131,7 +3173,7 @@ static int panic_event(struct notifier_block *this, /* For every registered interface, set it to run to completion. */ for (i = 0; i < MAX_IPMI_INTERFACES; i++) { intf = ipmi_interfaces[i]; - if (intf == NULL) + if (IPMI_INVALID_INTERFACE(intf)) continue; intf->handlers->set_run_to_completion(intf->send_info, 1); @@ -3160,9 +3202,8 @@ static int ipmi_init_msghandler(void) printk(KERN_INFO "ipmi message handler version " IPMI_DRIVER_VERSION "\n"); - for (i = 0; i < MAX_IPMI_INTERFACES; i++) { + for (i = 0; i < MAX_IPMI_INTERFACES; i++) ipmi_interfaces[i] = NULL; - } #ifdef CONFIG_PROC_FS proc_ipmi_root = proc_mkdir("ipmi", NULL); @@ -3258,3 +3299,4 @@ EXPORT_SYMBOL(ipmi_get_my_LUN); EXPORT_SYMBOL(ipmi_smi_add_proc_entry); EXPORT_SYMBOL(proc_ipmi_root); EXPORT_SYMBOL(ipmi_user_set_run_to_completion); +EXPORT_SYMBOL(ipmi_free_recv_msg); diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index f66947722e1..e053eade036 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -56,7 +56,7 @@ static int poweroff_powercycle; /* parameter definition to allow user to flag power cycle */ module_param(poweroff_powercycle, int, 0644); -MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); +MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); /* Stuff from the get device id command. */ static unsigned int mfg_id; @@ -611,9 +611,7 @@ static int ipmi_poweroff_init (void) } #endif -#ifdef CONFIG_PROC_FS rv = ipmi_smi_watcher_register(&smi_watcher); -#endif if (rv) { unregister_sysctl_table(ipmi_table_header); printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index b6e5cbfb09f..ea89dca3dbb 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -51,6 +51,8 @@ #include <linux/list.h> #include <linux/pci.h> #include <linux/ioport.h> +#include <linux/notifier.h> +#include <linux/kthread.h> #include <asm/irq.h> #ifdef CONFIG_HIGH_RES_TIMERS #include <linux/hrtime.h> @@ -125,6 +127,7 @@ struct ipmi_device_id { struct smi_info { + int intf_num; ipmi_smi_t intf; struct si_sm_data *si_sm; struct si_sm_handlers *handlers; @@ -192,8 +195,7 @@ struct smi_info unsigned long last_timeout_jiffies; /* Used to gracefully stop the timer without race conditions. */ - volatile int stop_operation; - volatile int timer_stopped; + atomic_t stop_operation; /* The driver will disable interrupts when it gets into a situation where it cannot handle messages due to lack of @@ -220,8 +222,16 @@ struct smi_info unsigned long events; unsigned long watchdog_pretimeouts; unsigned long incoming_messages; + + struct task_struct *thread; }; +static struct notifier_block *xaction_notifier_list; +static int register_xaction_notifier(struct notifier_block * nb) +{ + return notifier_chain_register(&xaction_notifier_list, nb); +} + static void si_restart_short_timer(struct smi_info *smi_info); static void deliver_recv_msg(struct smi_info *smi_info, @@ -281,6 +291,11 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) do_gettimeofday(&t); printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); #endif + err = notifier_call_chain(&xaction_notifier_list, 0, smi_info); + if (err & NOTIFY_STOP_MASK) { + rv = SI_SM_CALL_WITHOUT_DELAY; + goto out; + } err = smi_info->handlers->start_transaction( smi_info->si_sm, smi_info->curr_msg->data, @@ -291,6 +306,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) rv = SI_SM_CALL_WITHOUT_DELAY; } + out: spin_unlock(&(smi_info->msg_lock)); return rv; @@ -766,6 +782,29 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion) spin_unlock_irqrestore(&(smi_info->si_lock), flags); } +static int ipmi_thread(void *data) +{ + struct smi_info *smi_info = data; + unsigned long flags; + enum si_sm_result smi_result; + + set_user_nice(current, 19); + while (!kthread_should_stop()) { + spin_lock_irqsave(&(smi_info->si_lock), flags); + smi_result=smi_event_handler(smi_info, 0); + spin_unlock_irqrestore(&(smi_info->si_lock), flags); + if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { + /* do nothing */ + } + else if (smi_result == SI_SM_CALL_WITH_DELAY) + udelay(1); + else + schedule_timeout_interruptible(1); + } + return 0; +} + + static void poll(void *send_info) { struct smi_info *smi_info = send_info; @@ -819,15 +858,13 @@ static void smi_timeout(unsigned long data) enum si_sm_result smi_result; unsigned long flags; unsigned long jiffies_now; - unsigned long time_diff; + long time_diff; #ifdef DEBUG_TIMING struct timeval t; #endif - if (smi_info->stop_operation) { - smi_info->timer_stopped = 1; + if (atomic_read(&smi_info->stop_operation)) return; - } spin_lock_irqsave(&(smi_info->si_lock), flags); #ifdef DEBUG_TIMING @@ -835,7 +872,7 @@ static void smi_timeout(unsigned long data) printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); #endif jiffies_now = jiffies; - time_diff = ((jiffies_now - smi_info->last_timeout_jiffies) + time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); smi_result = smi_event_handler(smi_info, time_diff); @@ -900,7 +937,7 @@ static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs) smi_info->interrupts++; spin_unlock(&smi_info->count_lock); - if (smi_info->stop_operation) + if (atomic_read(&smi_info->stop_operation)) goto out; #ifdef DEBUG_TIMING @@ -1419,7 +1456,7 @@ static u32 ipmi_acpi_gpe(void *context) smi_info->interrupts++; spin_unlock(&smi_info->count_lock); - if (smi_info->stop_operation) + if (atomic_read(&smi_info->stop_operation)) goto out; #ifdef DEBUG_TIMING @@ -1919,7 +1956,8 @@ static int try_get_dev_id(struct smi_info *smi_info) smi_result = smi_info->handlers->event(smi_info->si_sm, 0); for (;;) { - if (smi_result == SI_SM_CALL_WITH_DELAY) { + if (smi_result == SI_SM_CALL_WITH_DELAY || + smi_result == SI_SM_CALL_WITH_TICK_DELAY) { schedule_timeout_uninterruptible(1); smi_result = smi_info->handlers->event( smi_info->si_sm, 100); @@ -2052,6 +2090,9 @@ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) * IPMI Version = 0x51 IPMI 1.5 * Manufacturer ID = A2 02 00 Dell IANA * + * Additionally, PowerEdge systems with IPMI < 1.5 may also assert + * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. + * */ #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 @@ -2061,16 +2102,87 @@ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) { struct ipmi_device_id *id = &smi_info->device_id; const char mfr[3]=DELL_IANA_MFR_ID; - if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) - && (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID) - && (id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV) - && (id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION)) - { - smi_info->oem_data_avail_handler = - oem_data_avail_to_receive_msg_avail; + if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) { + if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && + id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && + id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { + smi_info->oem_data_avail_handler = + oem_data_avail_to_receive_msg_avail; + } + else if (ipmi_version_major(id) < 1 || + (ipmi_version_major(id) == 1 && + ipmi_version_minor(id) < 5)) { + smi_info->oem_data_avail_handler = + oem_data_avail_to_receive_msg_avail; + } } } +#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA +static void return_hosed_msg_badsize(struct smi_info *smi_info) +{ + struct ipmi_smi_msg *msg = smi_info->curr_msg; + + /* Make it a reponse */ + msg->rsp[0] = msg->data[0] | 4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; + msg->rsp_size = 3; + smi_info->curr_msg = NULL; + deliver_recv_msg(smi_info, msg); +} + +/* + * dell_poweredge_bt_xaction_handler + * @info - smi_info.device_id must be populated + * + * Dell PowerEdge servers with the BT interface (x6xx and 1750) will + * not respond to a Get SDR command if the length of the data + * requested is exactly 0x3A, which leads to command timeouts and no + * data returned. This intercepts such commands, and causes userspace + * callers to try again with a different-sized buffer, which succeeds. + */ + +#define STORAGE_NETFN 0x0A +#define STORAGE_CMD_GET_SDR 0x23 +static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, + unsigned long unused, + void *in) +{ + struct smi_info *smi_info = in; + unsigned char *data = smi_info->curr_msg->data; + unsigned int size = smi_info->curr_msg->data_size; + if (size >= 8 && + (data[0]>>2) == STORAGE_NETFN && + data[1] == STORAGE_CMD_GET_SDR && + data[7] == 0x3A) { + return_hosed_msg_badsize(smi_info); + return NOTIFY_STOP; + } + return NOTIFY_DONE; +} + +static struct notifier_block dell_poweredge_bt_xaction_notifier = { + .notifier_call = dell_poweredge_bt_xaction_handler, +}; + +/* + * setup_dell_poweredge_bt_xaction_handler + * @info - smi_info.device_id must be filled in already + * + * Fills in smi_info.device_id.start_transaction_pre_hook + * when we know what function to use there. + */ +static void +setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) +{ + struct ipmi_device_id *id = &smi_info->device_id; + const char mfr[3]=DELL_IANA_MFR_ID; + if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) && + smi_info->si_type == SI_BT) + register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); +} + /* * setup_oem_data_handler * @info - smi_info.device_id must be filled in already @@ -2084,6 +2196,18 @@ static void setup_oem_data_handler(struct smi_info *smi_info) setup_dell_poweredge_oem_data_handler(smi_info); } +static void setup_xaction_handlers(struct smi_info *smi_info) +{ + setup_dell_poweredge_bt_xaction_handler(smi_info); +} + +static inline void wait_for_timer_and_thread(struct smi_info *smi_info) +{ + if (smi_info->thread != ERR_PTR(-ENOMEM)) + kthread_stop(smi_info->thread); + del_timer_sync(&smi_info->si_timer); +} + /* Returns 0 if initialized, or negative on an error. */ static int init_one_smi(int intf_num, struct smi_info **smi) { @@ -2179,6 +2303,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi) goto out_err; setup_oem_data_handler(new_smi); + setup_xaction_handlers(new_smi); /* Try to claim any interrupts. */ new_smi->irq_setup(new_smi); @@ -2190,8 +2315,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi) new_smi->run_to_completion = 0; new_smi->interrupt_disabled = 0; - new_smi->timer_stopped = 0; - new_smi->stop_operation = 0; + atomic_set(&new_smi->stop_operation, 0); + new_smi->intf_num = intf_num; /* Start clearing the flags before we enable interrupts or the timer to avoid racing with the timer. */ @@ -2209,7 +2334,11 @@ static int init_one_smi(int intf_num, struct smi_info **smi) new_smi->si_timer.function = smi_timeout; new_smi->last_timeout_jiffies = jiffies; new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; + add_timer(&(new_smi->si_timer)); + if (new_smi->si_type != SI_BT) + new_smi->thread = kthread_run(ipmi_thread, new_smi, + "kipmi%d", new_smi->intf_num); rv = ipmi_register_smi(&handlers, new_smi, @@ -2251,12 +2380,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi) return 0; out_err_stop_timer: - new_smi->stop_operation = 1; - - /* Wait for the timer to stop. This avoids problems with race - conditions removing the timer here. */ - while (!new_smi->timer_stopped) - schedule_timeout_uninterruptible(1); + atomic_inc(&new_smi->stop_operation); + wait_for_timer_and_thread(new_smi); out_err: if (new_smi->intf) @@ -2362,8 +2487,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) spin_lock_irqsave(&(to_clean->si_lock), flags); spin_lock(&(to_clean->msg_lock)); - to_clean->stop_operation = 1; - + atomic_inc(&to_clean->stop_operation); to_clean->irq_cleanup(to_clean); spin_unlock(&(to_clean->msg_lock)); @@ -2374,10 +2498,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) interrupt. */ synchronize_sched(); - /* Wait for the timer to stop. This avoids problems with race - conditions removing the timer here. */ - while (!to_clean->timer_stopped) - schedule_timeout_uninterruptible(1); + wait_for_timer_and_thread(to_clean); /* Interrupts and timeouts are stopped, now make sure the interface is in a clean state. */ diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h index 62791dd4298..bf3d4962d6a 100644 --- a/drivers/char/ipmi/ipmi_si_sm.h +++ b/drivers/char/ipmi/ipmi_si_sm.h @@ -62,6 +62,7 @@ enum si_sm_result { SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ + SI_SM_CALL_WITH_TICK_DELAY, /* Delay at least 1 tick before calling again. */ SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ SI_SM_IDLE, /* The SM is in idle state. */ SI_SM_HOSED, /* The hardware violated the state machine. */ diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c index add2aa2732f..39d7e5ef1a2 100644 --- a/drivers/char/ipmi/ipmi_smic_sm.c +++ b/drivers/char/ipmi/ipmi_smic_sm.c @@ -43,6 +43,8 @@ #include <linux/kernel.h> /* For printk. */ #include <linux/string.h> +#include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/ipmi_msgdefs.h> /* for completion codes */ #include "ipmi_si_sm.h" @@ -56,6 +58,8 @@ #define SMIC_DEBUG_ENABLE 1 static int smic_debug = 1; +module_param(smic_debug, int, 0644); +MODULE_PARM_DESC(smic_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); enum smic_states { SMIC_IDLE, @@ -76,11 +80,17 @@ enum smic_states { #define SMIC_MAX_ERROR_RETRIES 3 /* Timeouts in microseconds. */ -#define SMIC_RETRY_TIMEOUT 100000 +#define SMIC_RETRY_TIMEOUT 2000000 /* SMIC Flags Register Bits */ #define SMIC_RX_DATA_READY 0x80 #define SMIC_TX_DATA_READY 0x40 +/* + * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by + * a few systems, and then only by Systems Management + * Interrupts, not by the OS. Always ignore these bits. + * + */ #define SMIC_SMI 0x10 #define SMIC_EVM_DATA_AVAIL 0x08 #define SMIC_SMS_DATA_AVAIL 0x04 @@ -364,8 +374,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) switch (smic->state) { case SMIC_IDLE: /* in IDLE we check for available messages */ - if (flags & (SMIC_SMI | - SMIC_EVM_DATA_AVAIL | SMIC_SMS_DATA_AVAIL)) + if (flags & SMIC_SMS_DATA_AVAIL) { return SI_SM_ATTN; } diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 2da64bf7469..1f3159eb1ed 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -47,6 +47,9 @@ #include <linux/reboot.h> #include <linux/wait.h> #include <linux/poll.h> +#include <linux/string.h> +#include <linux/ctype.h> +#include <asm/atomic.h> #ifdef CONFIG_X86_LOCAL_APIC #include <asm/apic.h> #endif @@ -158,27 +161,120 @@ static struct fasync_struct *fasync_q = NULL; static char pretimeout_since_last_heartbeat = 0; static char expect_close; +static DECLARE_RWSEM(register_sem); + +/* Parameters to ipmi_set_timeout */ +#define IPMI_SET_TIMEOUT_NO_HB 0 +#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1 +#define IPMI_SET_TIMEOUT_FORCE_HB 2 + +static int ipmi_set_timeout(int do_heartbeat); + /* If true, the driver will start running as soon as it is configured and ready. */ static int start_now = 0; -module_param(timeout, int, 0); +static int set_param_int(const char *val, struct kernel_param *kp) +{ + char *endp; + int l; + int rv = 0; + + if (!val) + return -EINVAL; + l = simple_strtoul(val, &endp, 0); + if (endp == val) + return -EINVAL; + + down_read(®ister_sem); + *((int *)kp->arg) = l; + if (watchdog_user) + rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); + up_read(®ister_sem); + + return rv; +} + +static int get_param_int(char *buffer, struct kernel_param *kp) +{ + return sprintf(buffer, "%i", *((int *)kp->arg)); +} + +typedef int (*action_fn)(const char *intval, char *outval); + +static int action_op(const char *inval, char *outval); +static int preaction_op(const char *inval, char *outval); +static int preop_op(const char *inval, char *outval); +static void check_parms(void); + +static int set_param_str(const char *val, struct kernel_param *kp) +{ + action_fn fn = (action_fn) kp->arg; + int rv = 0; + const char *end; + char valcp[16]; + int len; + + /* Truncate leading and trailing spaces. */ + while (isspace(*val)) + val++; + end = val + strlen(val) - 1; + while ((end >= val) && isspace(*end)) + end--; + len = end - val + 1; + if (len > sizeof(valcp) - 1) + return -EINVAL; + memcpy(valcp, val, len); + valcp[len] = '\0'; + + down_read(®ister_sem); + rv = fn(valcp, NULL); + if (rv) + goto out_unlock; + + check_parms(); + if (watchdog_user) + rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); + + out_unlock: + up_read(®ister_sem); + return rv; +} + +static int get_param_str(char *buffer, struct kernel_param *kp) +{ + action_fn fn = (action_fn) kp->arg; + int rv; + + rv = fn(NULL, buffer); + if (rv) + return rv; + return strlen(buffer); +} + +module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644); MODULE_PARM_DESC(timeout, "Timeout value in seconds."); -module_param(pretimeout, int, 0); + +module_param_call(pretimeout, set_param_int, get_param_int, &pretimeout, 0644); MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); -module_param_string(action, action, sizeof(action), 0); + +module_param_call(action, set_param_str, get_param_str, action_op, 0644); MODULE_PARM_DESC(action, "Timeout action. One of: " "reset, none, power_cycle, power_off."); -module_param_string(preaction, preaction, sizeof(preaction), 0); + +module_param_call(preaction, set_param_str, get_param_str, preaction_op, 0644); MODULE_PARM_DESC(preaction, "Pretimeout action. One of: " "pre_none, pre_smi, pre_nmi, pre_int."); -module_param_string(preop, preop, sizeof(preop), 0); + +module_param_call(preop, set_param_str, get_param_str, preop_op, 0644); MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " "preop_none, preop_panic, preop_give_data."); + module_param(start_now, int, 0); MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" "soon as the driver is loaded."); -module_param(nowayout, int, 0); + +module_param(nowayout, int, 0644); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); /* Default state of the timer. */ @@ -200,6 +296,8 @@ static int ipmi_start_timer_on_heartbeat = 0; static unsigned char ipmi_version_major; static unsigned char ipmi_version_minor; +/* If a pretimeout occurs, this is used to allow only one panic to happen. */ +static atomic_t preop_panic_excl = ATOMIC_INIT(-1); static int ipmi_heartbeat(void); static void panic_halt_ipmi_heartbeat(void); @@ -294,11 +392,6 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, return rv; } -/* Parameters to ipmi_set_timeout */ -#define IPMI_SET_TIMEOUT_NO_HB 0 -#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1 -#define IPMI_SET_TIMEOUT_FORCE_HB 2 - static int ipmi_set_timeout(int do_heartbeat) { int send_heartbeat_now; @@ -732,8 +825,6 @@ static struct miscdevice ipmi_wdog_miscdev = { .fops = &ipmi_wdog_fops }; -static DECLARE_RWSEM(register_sem); - static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, void *handler_data) { @@ -749,9 +840,10 @@ static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, static void ipmi_wdog_pretimeout_handler(void *handler_data) { if (preaction_val != WDOG_PRETIMEOUT_NONE) { - if (preop_val == WDOG_PREOP_PANIC) - panic("Watchdog pre-timeout"); - else if (preop_val == WDOG_PREOP_GIVE_DATA) { + if (preop_val == WDOG_PREOP_PANIC) { + if (atomic_inc_and_test(&preop_panic_excl)) + panic("Watchdog pre-timeout"); + } else if (preop_val == WDOG_PREOP_GIVE_DATA) { spin_lock(&ipmi_read_lock); data_to_read = 1; wake_up_interruptible(&read_q); @@ -825,7 +917,8 @@ ipmi_nmi(void *dev_id, struct pt_regs *regs, int cpu, int handled) an error and not work unless we re-enable the timer. So do so. */ pretimeout_since_last_heartbeat = 1; - panic(PFX "pre-timeout"); + if (atomic_inc_and_test(&preop_panic_excl)) + panic(PFX "pre-timeout"); } return NOTIFY_DONE; @@ -839,6 +932,7 @@ static struct nmi_handler ipmi_nmi_handler = .handler = ipmi_nmi, .priority = 0, /* Call us last. */ }; +int nmi_handler_registered; #endif static int wdog_reboot_handler(struct notifier_block *this, @@ -921,59 +1015,86 @@ static struct ipmi_smi_watcher smi_watcher = .smi_gone = ipmi_smi_gone }; -static int __init ipmi_wdog_init(void) +static int action_op(const char *inval, char *outval) { - int rv; + if (outval) + strcpy(outval, action); + + if (!inval) + return 0; - if (strcmp(action, "reset") == 0) { + if (strcmp(inval, "reset") == 0) action_val = WDOG_TIMEOUT_RESET; - } else if (strcmp(action, "none") == 0) { + else if (strcmp(inval, "none") == 0) action_val = WDOG_TIMEOUT_NONE; - } else if (strcmp(action, "power_cycle") == 0) { + else if (strcmp(inval, "power_cycle") == 0) action_val = WDOG_TIMEOUT_POWER_CYCLE; - } else if (strcmp(action, "power_off") == 0) { + else if (strcmp(inval, "power_off") == 0) action_val = WDOG_TIMEOUT_POWER_DOWN; - } else { - action_val = WDOG_TIMEOUT_RESET; - printk(KERN_INFO PFX "Unknown action '%s', defaulting to" - " reset\n", action); - } + else + return -EINVAL; + strcpy(action, inval); + return 0; +} + +static int preaction_op(const char *inval, char *outval) +{ + if (outval) + strcpy(outval, preaction); - if (strcmp(preaction, "pre_none") == 0) { + if (!inval) + return 0; + + if (strcmp(inval, "pre_none") == 0) preaction_val = WDOG_PRETIMEOUT_NONE; - } else if (strcmp(preaction, "pre_smi") == 0) { + else if (strcmp(inval, "pre_smi") == 0) preaction_val = WDOG_PRETIMEOUT_SMI; #ifdef HAVE_NMI_HANDLER - } else if (strcmp(preaction, "pre_nmi") == 0) { + else if (strcmp(inval, "pre_nmi") == 0) preaction_val = WDOG_PRETIMEOUT_NMI; #endif - } else if (strcmp(preaction, "pre_int") == 0) { + else if (strcmp(inval, "pre_int") == 0) preaction_val = WDOG_PRETIMEOUT_MSG_INT; - } else { - preaction_val = WDOG_PRETIMEOUT_NONE; - printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to" - " none\n", preaction); - } + else + return -EINVAL; + strcpy(preaction, inval); + return 0; +} + +static int preop_op(const char *inval, char *outval) +{ + if (outval) + strcpy(outval, preop); - if (strcmp(preop, "preop_none") == 0) { + if (!inval) + return 0; + + if (strcmp(inval, "preop_none") == 0) preop_val = WDOG_PREOP_NONE; - } else if (strcmp(preop, "preop_panic") == 0) { + else if (strcmp(inval, "preop_panic") == 0) preop_val = WDOG_PREOP_PANIC; - } else if (strcmp(preop, "preop_give_data") == 0) { + else if (strcmp(inval, "preop_give_data") == 0) preop_val = WDOG_PREOP_GIVE_DATA; - } else { - preop_val = WDOG_PREOP_NONE; - printk(KERN_INFO PFX "Unknown preop '%s', defaulting to" - " none\n", preop); - } + else + return -EINVAL; + strcpy(preop, inval); + return 0; +} +static void check_parms(void) +{ #ifdef HAVE_NMI_HANDLER + int do_nmi = 0; + int rv; + if (preaction_val == WDOG_PRETIMEOUT_NMI) { + do_nmi = 1; if (preop_val == WDOG_PREOP_GIVE_DATA) { printk(KERN_WARNING PFX "Pretimeout op is to give data" " but NMI pretimeout is enabled, setting" " pretimeout op to none\n"); - preop_val = WDOG_PREOP_NONE; + preop_op("preop_none", NULL); + do_nmi = 0; } #ifdef CONFIG_X86_LOCAL_APIC if (nmi_watchdog == NMI_IO_APIC) { @@ -983,18 +1104,48 @@ static int __init ipmi_wdog_init(void) " Disabling IPMI nmi pretimeout.\n", nmi_watchdog); preaction_val = WDOG_PRETIMEOUT_NONE; - } else { + do_nmi = 0; + } #endif + } + if (do_nmi && !nmi_handler_registered) { rv = request_nmi(&ipmi_nmi_handler); if (rv) { - printk(KERN_WARNING PFX "Can't register nmi handler\n"); - return rv; - } -#ifdef CONFIG_X86_LOCAL_APIC - } -#endif + printk(KERN_WARNING PFX + "Can't register nmi handler\n"); + return; + } else + nmi_handler_registered = 1; + } else if (!do_nmi && nmi_handler_registered) { + release_nmi(&ipmi_nmi_handler); + nmi_handler_registered = 0; } #endif +} + +static int __init ipmi_wdog_init(void) +{ + int rv; + + if (action_op(action, NULL)) { + action_op("reset", NULL); + printk(KERN_INFO PFX "Unknown action '%s', defaulting to" + " reset\n", action); + } + + if (preaction_op(preaction, NULL)) { + preaction_op("pre_none", NULL); + printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to" + " none\n", preaction); + } + + if (preop_op(preop, NULL)) { + preop_op("preop_none", NULL); + printk(KERN_INFO PFX "Unknown preop '%s', defaulting to" + " none\n", preop); + } + + check_parms(); rv = ipmi_smi_watcher_register(&smi_watcher); if (rv) { @@ -1021,7 +1172,7 @@ static __exit void ipmi_unregister_watchdog(void) down_write(®ister_sem); #ifdef HAVE_NMI_HANDLER - if (preaction_val == WDOG_PRETIMEOUT_NMI) + if (nmi_handler_registered) release_nmi(&ipmi_nmi_handler); #endif diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index e3ddbdb85a2..ce3bc0d45f1 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c @@ -860,10 +860,9 @@ static void __exit istallion_module_exit(void) if ((i = unregister_chrdev(STL_SIOMEMMAJOR, "staliomem"))) printk("STALLION: failed to un-register serial memory device, " "errno=%d\n", -i); - if (stli_tmpwritebuf != (char *) NULL) - kfree(stli_tmpwritebuf); - if (stli_txcookbuf != (char *) NULL) - kfree(stli_txcookbuf); + + kfree(stli_tmpwritebuf); + kfree(stli_txcookbuf); for (i = 0; (i < stli_nrbrds); i++) { if ((brdp = stli_brds[i]) == (stlibrd_t *) NULL) diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 45d012d85e8..3b965a651da 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c @@ -470,6 +470,8 @@ static struct tty_operations mxser_ops = { .stop = mxser_stop, .start = mxser_start, .hangup = mxser_hangup, + .break_ctl = mxser_rs_break, + .wait_until_sent = mxser_wait_until_sent, .tiocmget = mxser_tiocmget, .tiocmset = mxser_tiocmset, }; @@ -492,14 +494,18 @@ static int __init mxser_module_init(void) static void __exit mxser_module_exit(void) { - int i, err = 0; + int i, err; if (verbose) printk(KERN_DEBUG "Unloading module mxser ...\n"); - if ((err |= tty_unregister_driver(mxvar_sdriver))) + err = tty_unregister_driver(mxvar_sdriver); + if (!err) + put_tty_driver(mxvar_sdriver); + else printk(KERN_ERR "Couldn't unregister MOXA Smartio/Industio family serial driver\n"); + for (i = 0; i < MXSER_BOARDS; i++) { struct pci_dev *pdev; @@ -688,7 +694,6 @@ static int mxser_get_PCI_conf(int busnum, int devnum, int board_type, struct mxs static int mxser_init(void) { int i, m, retval, b, n; - int ret1; struct pci_dev *pdev = NULL; int index; unsigned char busnum, devnum; @@ -722,24 +727,6 @@ static int mxser_init(void) mxvar_sdriver->termios = mxvar_termios; mxvar_sdriver->termios_locked = mxvar_termios_locked; - mxvar_sdriver->open = mxser_open; - mxvar_sdriver->close = mxser_close; - mxvar_sdriver->write = mxser_write; - mxvar_sdriver->put_char = mxser_put_char; - mxvar_sdriver->flush_chars = mxser_flush_chars; - mxvar_sdriver->write_room = mxser_write_room; - mxvar_sdriver->chars_in_buffer = mxser_chars_in_buffer; - mxvar_sdriver->flush_buffer = mxser_flush_buffer; - mxvar_sdriver->ioctl = mxser_ioctl; - mxvar_sdriver->throttle = mxser_throttle; - mxvar_sdriver->unthrottle = mxser_unthrottle; - mxvar_sdriver->set_termios = mxser_set_termios; - mxvar_sdriver->stop = mxser_stop; - mxvar_sdriver->start = mxser_start; - mxvar_sdriver->hangup = mxser_hangup; - mxvar_sdriver->break_ctl = mxser_rs_break; - mxvar_sdriver->wait_until_sent = mxser_wait_until_sent; - mxvar_diagflag = 0; memset(mxvar_table, 0, MXSER_PORTS * sizeof(struct mxser_struct)); memset(&mxvar_log, 0, sizeof(struct mxser_log)); @@ -870,14 +857,11 @@ static int mxser_init(void) } #endif - ret1 = 0; - if (!(ret1 = tty_register_driver(mxvar_sdriver))) { - return 0; - } else + retval = tty_register_driver(mxvar_sdriver); + if (retval) { printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family driver !\n"); + put_tty_driver(mxvar_sdriver); - - if (ret1) { for (i = 0; i < MXSER_BOARDS; i++) { if (mxsercfg[i].board_type == -1) continue; @@ -886,10 +870,10 @@ static int mxser_init(void) //todo: release io, vector } } - return -1; + return retval; } - return (0); + return 0; } static void mxser_do_softint(void *private_) @@ -933,6 +917,9 @@ static int mxser_open(struct tty_struct *tty, struct file *filp) struct mxser_struct *info; int retval, line; + /* initialize driver_data in case something fails */ + tty->driver_data = NULL; + line = tty->index; if (line == MXSER_PORTS) return 0; @@ -995,7 +982,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp) if (tty->index == MXSER_PORTS) return; if (!info) - BUG(); + return; spin_lock_irqsave(&info->slock, flags); diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c index 5079beda69b..c3660d8781a 100644 --- a/drivers/char/n_hdlc.c +++ b/drivers/char/n_hdlc.c @@ -264,8 +264,7 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc) } else break; } - if (n_hdlc->tbuf) - kfree(n_hdlc->tbuf); + kfree(n_hdlc->tbuf); kfree(n_hdlc); } /* end of n_hdlc_release() */ diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 02d7f046c10..2c326ea5342 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -2994,8 +2994,7 @@ int rx_alloc_buffers(MGSLPC_INFO *info) void rx_free_buffers(MGSLPC_INFO *info) { - if (info->rx_buf) - kfree(info->rx_buf); + kfree(info->rx_buf); info->rx_buf = NULL; } diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c index 928b850cc67..d3bc731fbb2 100644 --- a/drivers/char/rocket.c +++ b/drivers/char/rocket.c @@ -2512,10 +2512,8 @@ static void rp_cleanup_module(void) "rocketport driver\n", -retval); put_tty_driver(rocket_driver); - for (i = 0; i < MAX_RP_PORTS; i++) { - if (rp_table[i]) - kfree(rp_table[i]); - } + for (i = 0; i < MAX_RP_PORTS; i++) + kfree(rp_table[i]); for (i = 0; i < NUM_BOARDS; i++) { if (rcktpt_io_addr[i] <= 0 || is_PCI[i]) diff --git a/drivers/char/selection.c b/drivers/char/selection.c index 16d630f58bb..5b187c895c1 100644 --- a/drivers/char/selection.c +++ b/drivers/char/selection.c @@ -246,8 +246,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t clear_selection(); return -ENOMEM; } - if (sel_buffer) - kfree(sel_buffer); + kfree(sel_buffer); sel_buffer = bp; obp = bp; diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index 1c686414e0a..95af2a94159 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c @@ -785,8 +785,7 @@ static void __exit stallion_module_exit(void) "errno=%d\n", -i); class_destroy(stallion_class); - if (stl_tmpwritebuf != (char *) NULL) - kfree(stl_tmpwritebuf); + kfree(stl_tmpwritebuf); for (i = 0; (i < stl_nrbrds); i++) { if ((brdp = stl_brds[i]) == (stlbrd_t *) NULL) @@ -804,8 +803,7 @@ static void __exit stallion_module_exit(void) continue; if (portp->tty != (struct tty_struct *) NULL) stl_hangup(portp->tty); - if (portp->tx.buf != (char *) NULL) - kfree(portp->tx.buf); + kfree(portp->tx.buf); kfree(portp); } kfree(panelp); diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index 0133dc0e25d..5d1ffa3bd4c 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c @@ -4016,9 +4016,7 @@ static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info) */ static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info) { - if ( info->intermediate_rxbuffer ) - kfree(info->intermediate_rxbuffer); - + kfree(info->intermediate_rxbuffer); info->intermediate_rxbuffer = NULL; } /* end of mgsl_free_intermediate_rxbuffer_memory() */ @@ -4072,10 +4070,8 @@ static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info) int i; for ( i=0; i<info->num_tx_holding_buffers; ++i ) { - if ( info->tx_holding_buffers[i].buffer ) { - kfree(info->tx_holding_buffers[i].buffer); - info->tx_holding_buffers[i].buffer=NULL; - } + kfree(info->tx_holding_buffers[i].buffer); + info->tx_holding_buffers[i].buffer = NULL; } info->get_tx_holding_index = 0; diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index f185724448b..7c063c5abc5 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c @@ -2788,10 +2788,8 @@ static void shutdown(SLMP_INFO * info) del_timer(&info->tx_timer); del_timer(&info->status_timer); - if (info->tx_buf) { - kfree(info->tx_buf); - info->tx_buf = NULL; - } + kfree(info->tx_buf); + info->tx_buf = NULL; spin_lock_irqsave(&info->lock,flags); @@ -3611,8 +3609,7 @@ int alloc_tmp_rx_buf(SLMP_INFO *info) void free_tmp_rx_buf(SLMP_INFO *info) { - if (info->tmp_rx_buf) - kfree(info->tmp_rx_buf); + kfree(info->tmp_rx_buf); info->tmp_rx_buf = NULL; } diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 8d125c974a2..680a8e33188 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -287,10 +287,6 @@ static int __init init_nsc(void) int lo, hi; int nscAddrBase = TPM_ADDR; - driver_register(&nsc_drv); - - /* select PM channel 1 */ - tpm_write_index(nscAddrBase,NSC_LDN_INDEX, 0x12); /* verify that it is a National part (SID) */ if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) { @@ -300,6 +296,8 @@ static int __init init_nsc(void) return -ENODEV; } + driver_register(&nsc_drv); + hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI); lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO); tpm_nsc.base = (hi<<8) | lo; @@ -307,11 +305,11 @@ static int __init init_nsc(void) /* enable the DPM module */ tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01); - pdev = kmalloc(sizeof(struct platform_device), GFP_KERNEL); - if ( !pdev ) - return -ENOMEM; - - memset(pdev, 0, sizeof(struct platform_device)); + pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); + if (!pdev) { + rc = -ENOMEM; + goto err_unreg_drv; + } pdev->name = "tpm_nscl0"; pdev->id = -1; @@ -319,26 +317,16 @@ static int __init init_nsc(void) pdev->dev.release = tpm_nsc_remove; pdev->dev.driver = &nsc_drv; - if ((rc=platform_device_register(pdev)) < 0) { - kfree(pdev); - pdev = NULL; - return rc; - } + if ((rc = platform_device_register(pdev)) < 0) + goto err_free_dev; if (request_region(tpm_nsc.base, 2, "tpm_nsc0") == NULL ) { - platform_device_unregister(pdev); - kfree(pdev); - pdev = NULL; - return -EBUSY; + rc = -EBUSY; + goto err_unreg_dev; } - if ((rc = tpm_register_hardware(&pdev->dev, &tpm_nsc)) < 0) { - release_region(tpm_nsc.base, 2); - platform_device_unregister(pdev); - kfree(pdev); - pdev = NULL; - return rc; - } + if ((rc = tpm_register_hardware(&pdev->dev, &tpm_nsc)) < 0) + goto err_rel_reg; dev_dbg(&pdev->dev, "NSC TPM detected\n"); dev_dbg(&pdev->dev, @@ -374,6 +362,16 @@ static int __init init_nsc(void) tpm_read_index(nscAddrBase, 0x27) & 0x1F); return 0; + +err_rel_reg: + release_region(tpm_nsc.base, 2); +err_unreg_dev: + platform_device_unregister(pdev); +err_free_dev: + kfree(pdev); +err_unreg_drv: + driver_unregister(&nsc_drv); + return rc; } static void __exit cleanup_nsc(void) diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index c586bfa852e..4b1eef51ec5 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -1416,14 +1416,11 @@ end_init: /* Release locally allocated memory ... nothing placed in slots */ free_mem_out: - if (o_tp) - kfree(o_tp); + kfree(o_tp); if (o_tty) free_tty_struct(o_tty); - if (ltp) - kfree(ltp); - if (tp) - kfree(tp); + kfree(ltp); + kfree(tp); free_tty_struct(tty); fail_no_mem: diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 003dda147cd..24011e7c81f 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -80,6 +80,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) return -EFAULT; + if (!capable(CAP_SYS_TTY_CONFIG)) + perm = 0; + switch (cmd) { case KDGKBENT: key_map = key_maps[s]; @@ -193,7 +196,7 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) int ret; if (!capable(CAP_SYS_TTY_CONFIG)) - return -EPERM; + perm = 0; kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); if (!kbs) { |