aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/sn/kernel/xpc_channel.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn/kernel/xpc_channel.c')
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c34
1 files changed, 14 insertions, 20 deletions
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 0c0a6890240..8d950c778bb 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -22,6 +22,8 @@
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
#include <asm/sn/bte.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/xpc.h>
@@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
atomic_set(&ch->n_to_notify, 0);
spin_lock_init(&ch->lock);
- sema_init(&ch->msg_to_pull_sema, 1); /* mutex */
- sema_init(&ch->wdisconnect_sema, 0); /* event wait */
+ mutex_init(&ch->msg_to_pull_mutex);
+ init_completion(&ch->wdisconnect_wait);
atomic_set(&ch->n_on_msg_allocate_wq, 0);
init_waitqueue_head(&ch->msg_allocate_wq);
@@ -534,7 +536,6 @@ static enum xpc_retval
xpc_allocate_msgqueues(struct xpc_channel *ch)
{
unsigned long irq_flags;
- int i;
enum xpc_retval ret;
@@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
return ret;
}
- for (i = 0; i < ch->local_nentries; i++) {
- /* use a semaphore as an event wait queue */
- sema_init(&ch->notify_queue[i].sema, 0);
- }
-
spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_SETUP;
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
}
if (ch->flags & XPC_C_WDISCONNECT) {
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
- up(&ch->wdisconnect_sema);
- spin_lock_irqsave(&ch->lock, *irq_flags);
-
+ /* we won't lose the CPU since we're holding ch->lock */
+ complete(&ch->wdisconnect_wait);
} else if (ch->delayed_IPI_flags) {
if (part->act_state != XPC_P_DEACTIVATING) {
/* time to take action on any delayed IPI flags */
@@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch)
struct xpc_registration *registration = &xpc_registrations[ch->number];
- if (down_trylock(&registration->sema) != 0) {
+ if (mutex_trylock(&registration->mutex) == 0) {
return xpcRetry;
}
if (!XPC_CHANNEL_REGISTERED(ch->number)) {
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
return xpcUnregistered;
}
@@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch)
if (ch->flags & XPC_C_DISCONNECTING) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
return ch->reason;
}
@@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch)
* channel lock be locked and will unlock and relock
* the channel lock as needed.
*/
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch)
atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
}
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
/* initiate the connection */
@@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
enum xpc_retval ret;
- if (down_interruptible(&ch->msg_to_pull_sema) != 0) {
+ if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
/* we were interrupted by a signal */
return NULL;
}
@@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
XPC_DEACTIVATE_PARTITION(part, ret);
- up(&ch->msg_to_pull_sema);
+ mutex_unlock(&ch->msg_to_pull_mutex);
return NULL;
}
@@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
ch->next_msg_to_pull += nmsgs;
}
- up(&ch->msg_to_pull_sema);
+ mutex_unlock(&ch->msg_to_pull_mutex);
/* return the message we were looking for */
msg_offset = (get % ch->remote_nentries) * ch->msg_size;