aboutsummaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/eeprom/at24.c8
-rw-r--r--drivers/misc/eeprom/at25.c5
-rw-r--r--drivers/misc/isl29003.c9
-rw-r--r--drivers/misc/sgi-gru/grufile.c2
-rw-r--r--drivers/misc/sgi-xp/xp_main.c12
-rw-r--r--drivers/misc/sgi-xp/xpc.h254
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c138
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c128
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c20
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c164
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c257
11 files changed, 498 insertions, 499 deletions
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index d184dfab963..db39f4a52f5 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -278,7 +278,7 @@ static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr,
* We only use page mode writes; the alternative is sloooow. This routine
* writes at most one page.
*/
-static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf,
+static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
unsigned offset, size_t count)
{
struct i2c_client *client;
@@ -347,8 +347,8 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf,
return -ETIMEDOUT;
}
-static ssize_t at24_write(struct at24_data *at24,
- char *buf, loff_t off, size_t count)
+static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
+ size_t count)
{
ssize_t retval = 0;
@@ -406,7 +406,7 @@ static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf,
return at24_read(at24, buf, offset, count);
}
-static ssize_t at24_macc_write(struct memory_accessor *macc, char *buf,
+static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
off_t offset, size_t count)
{
struct at24_data *at24 = container_of(macc, struct at24_data, macc);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 6bc0dac5c1e..b34cb5f79ee 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -140,7 +140,8 @@ at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr,
static ssize_t
-at25_ee_write(struct at25_data *at25, char *buf, loff_t off, size_t count)
+at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
+ size_t count)
{
ssize_t status = 0;
unsigned written = 0;
@@ -276,7 +277,7 @@ static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
return at25_ee_read(at25, buf, offset, count);
}
-static ssize_t at25_mem_write(struct memory_accessor *mem, char *buf,
+static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
off_t offset, size_t count)
{
struct at25_data *at25 = container_of(mem, struct at25_data, mem);
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index 2e2a5923d4c..a71e245801e 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -64,6 +64,7 @@ struct isl29003_data {
struct i2c_client *client;
struct mutex lock;
u8 reg_cache[ISL29003_NUM_CACHABLE_REGS];
+ u8 power_state_before_suspend;
};
static int gain_range[] = {
@@ -411,6 +412,9 @@ static int __devexit isl29003_remove(struct i2c_client *client)
#ifdef CONFIG_PM
static int isl29003_suspend(struct i2c_client *client, pm_message_t mesg)
{
+ struct isl29003_data *data = i2c_get_clientdata(client);
+
+ data->power_state_before_suspend = isl29003_get_power_state(client);
return isl29003_set_power_state(client, 0);
}
@@ -421,10 +425,11 @@ static int isl29003_resume(struct i2c_client *client)
/* restore registers from cache */
for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++)
- if (!i2c_smbus_write_byte_data(client, i, data->reg_cache[i]))
+ if (i2c_smbus_write_byte_data(client, i, data->reg_cache[i]))
return -EIO;
- return 0;
+ return isl29003_set_power_state(client,
+ data->power_state_before_suspend);
}
#else
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 3e6e42d2f01..bbefe77c67a 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -375,7 +375,7 @@ static int __init gru_init(void)
void *gru_start_vaddr;
if (!is_uv_system())
- return -ENODEV;
+ return 0;
#if defined CONFIG_IA64
gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 16f8dcab2da..7896849b16d 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -248,19 +248,19 @@ xp_init(void)
enum xp_retval ret;
int ch_number;
+ /* initialize the connection registration mutex */
+ for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
+ mutex_init(&xpc_registrations[ch_number].mutex);
+
if (is_shub())
ret = xp_init_sn2();
else if (is_uv())
ret = xp_init_uv();
else
- ret = xpUnsupported;
+ ret = 0;
if (ret != xpSuccess)
- return -ENODEV;
-
- /* initialize the connection registration mutex */
- for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
- mutex_init(&xpc_registrations[ch_number].mutex);
+ return ret;
return 0;
}
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 114444cfd49..b94d5f76770 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -90,18 +90,21 @@ struct xpc_rsvd_page {
short max_npartitions; /* value of XPC_MAX_PARTITIONS */
u8 version;
u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */
+ unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
union {
- unsigned long vars_pa; /* phys address of struct xpc_vars */
- unsigned long activate_gru_mq_desc_gpa; /* phys addr of */
- /* activate mq's */
- /* gru mq descriptor */
+ struct {
+ unsigned long vars_pa; /* phys addr */
+ } sn2;
+ struct {
+ unsigned long heartbeat_gpa; /* phys addr */
+ unsigned long activate_gru_mq_desc_gpa; /* phys addr */
+ } uv;
} sn;
- unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */
- u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */
+ u64 pad2[9]; /* align to last u64 in 2nd 64-byte cacheline */
u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */
};
-#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */
+#define XPC_RP_VERSION _XPC_VERSION(3, 0) /* version 3.0 of the reserved page */
/*
* Define the structures by which XPC variables can be exported to other
@@ -182,6 +185,17 @@ struct xpc_vars_part_sn2 {
(XPC_RP_MACH_NASIDS(_rp) + \
xpc_nasid_mask_nlongs))
+
+/*
+ * The following structure describes the partition's heartbeat info which
+ * will be periodically read by other partitions to determine whether this
+ * XPC is still 'alive'.
+ */
+struct xpc_heartbeat_uv {
+ unsigned long value;
+ unsigned long offline; /* if 0, heartbeat should be changing */
+};
+
/*
* Info pertinent to a GRU message queue using a watch list for irq generation.
*/
@@ -198,7 +212,7 @@ struct xpc_gru_mq_uv {
/*
* The activate_mq is used to send/receive GRU messages that affect XPC's
- * heartbeat, partition active state, and channel state. This is UV only.
+ * partition active state and channel state. This is uv only.
*/
struct xpc_activate_mq_msghdr_uv {
unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */
@@ -210,33 +224,27 @@ struct xpc_activate_mq_msghdr_uv {
/* activate_mq defined message types */
#define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0
-#define XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV 1
-#define XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV 2
-#define XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV 3
-#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 4
-#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 5
+#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 1
+#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 2
-#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 6
-#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 7
-#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 8
-#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 9
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 3
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 4
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 5
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 6
+#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV 7
-#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 10
-#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 11
+#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 8
+#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 9
struct xpc_activate_mq_msg_uv {
struct xpc_activate_mq_msghdr_uv hdr;
};
-struct xpc_activate_mq_msg_heartbeat_req_uv {
- struct xpc_activate_mq_msghdr_uv hdr;
- u64 heartbeat;
-};
-
struct xpc_activate_mq_msg_activate_req_uv {
struct xpc_activate_mq_msghdr_uv hdr;
unsigned long rp_gpa;
+ unsigned long heartbeat_gpa;
unsigned long activate_gru_mq_desc_gpa;
};
@@ -271,6 +279,11 @@ struct xpc_activate_mq_msg_chctl_openreply_uv {
unsigned long notify_gru_mq_desc_gpa;
};
+struct xpc_activate_mq_msg_chctl_opencomplete_uv {
+ struct xpc_activate_mq_msghdr_uv hdr;
+ short ch_number;
+};
+
/*
* Functions registered by add_timer() or called by kernel_thread() only
* allow for a single 64-bit argument. The following macros can be used to
@@ -576,30 +589,32 @@ struct xpc_channel {
#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
-#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
-#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
-#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
-#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
+#define XPC_C_ROPENCOMPLETE 0x00000002 /* remote open channel complete */
+#define XPC_C_OPENCOMPLETE 0x00000004 /* local open channel complete */
+#define XPC_C_ROPENREPLY 0x00000008 /* remote open channel reply */
+#define XPC_C_OPENREPLY 0x00000010 /* local open channel reply */
+#define XPC_C_ROPENREQUEST 0x00000020 /* remote open channel request */
+#define XPC_C_OPENREQUEST 0x00000040 /* local open channel request */
-#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
-#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
+#define XPC_C_SETUP 0x00000080 /* channel's msgqueues are alloc'd */
+#define XPC_C_CONNECTEDCALLOUT 0x00000100 /* connected callout initiated */
#define XPC_C_CONNECTEDCALLOUT_MADE \
- 0x00000080 /* connected callout completed */
-#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
-#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
+ 0x00000200 /* connected callout completed */
+#define XPC_C_CONNECTED 0x00000400 /* local channel is connected */
+#define XPC_C_CONNECTING 0x00000800 /* channel is being connected */
-#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
-#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
-#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
-#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
+#define XPC_C_RCLOSEREPLY 0x00001000 /* remote close channel reply */
+#define XPC_C_CLOSEREPLY 0x00002000 /* local close channel reply */
+#define XPC_C_RCLOSEREQUEST 0x00004000 /* remote close channel request */
+#define XPC_C_CLOSEREQUEST 0x00008000 /* local close channel request */
-#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
-#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
+#define XPC_C_DISCONNECTED 0x00010000 /* channel is disconnected */
+#define XPC_C_DISCONNECTING 0x00020000 /* channel is being disconnected */
#define XPC_C_DISCONNECTINGCALLOUT \
- 0x00010000 /* disconnecting callout initiated */
+ 0x00040000 /* disconnecting callout initiated */
#define XPC_C_DISCONNECTINGCALLOUT_MADE \
- 0x00020000 /* disconnecting callout completed */
-#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
+ 0x00080000 /* disconnecting callout completed */
+#define XPC_C_WDISCONNECT 0x00100000 /* waiting for channel disconnect */
/*
* The channel control flags (chctl) union consists of a 64-bit variable which
@@ -618,11 +633,13 @@ union xpc_channel_ctl_flags {
#define XPC_CHCTL_CLOSEREPLY 0x02
#define XPC_CHCTL_OPENREQUEST 0x04
#define XPC_CHCTL_OPENREPLY 0x08
-#define XPC_CHCTL_MSGREQUEST 0x10
+#define XPC_CHCTL_OPENCOMPLETE 0x10
+#define XPC_CHCTL_MSGREQUEST 0x20
#define XPC_OPENCLOSE_CHCTL_FLAGS \
(XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \
- XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY)
+ XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY | \
+ XPC_CHCTL_OPENCOMPLETE)
#define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST
static inline int
@@ -687,6 +704,9 @@ struct xpc_partition_sn2 {
};
struct xpc_partition_uv {
+ unsigned long heartbeat_gpa; /* phys addr of partition's heartbeat */
+ struct xpc_heartbeat_uv cached_heartbeat; /* cached copy of */
+ /* partition's heartbeat */
unsigned long activate_gru_mq_desc_gpa; /* phys addr of parititon's */
/* activate mq's gru mq */
/* descriptor */
@@ -698,14 +718,12 @@ struct xpc_partition_uv {
u8 remote_act_state; /* remote partition's act_state */
u8 act_state_req; /* act_state request from remote partition */
enum xp_retval reason; /* reason for deactivate act_state request */
- u64 heartbeat; /* incremented by remote partition */
};
/* struct xpc_partition_uv flags */
-#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001
+#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000001
#define XPC_P_ENGAGED_UV 0x00000002
-#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000004
/* struct xpc_partition_uv act_state change requests */
@@ -762,6 +780,62 @@ struct xpc_partition {
} ____cacheline_aligned;
+struct xpc_arch_operations {
+ int (*setup_partitions) (void);
+ void (*teardown_partitions) (void);
+ void (*process_activate_IRQ_rcvd) (void);
+ enum xp_retval (*get_partition_rsvd_page_pa)
+ (void *, u64 *, unsigned long *, size_t *);
+ int (*setup_rsvd_page) (struct xpc_rsvd_page *);
+
+ void (*allow_hb) (short);
+ void (*disallow_hb) (short);
+ void (*disallow_all_hbs) (void);
+ void (*increment_heartbeat) (void);
+ void (*offline_heartbeat) (void);
+ void (*online_heartbeat) (void);
+ void (*heartbeat_init) (void);
+ void (*heartbeat_exit) (void);
+ enum xp_retval (*get_remote_heartbeat) (struct xpc_partition *);
+
+ void (*request_partition_activation) (struct xpc_rsvd_page *,
+ unsigned long, int);
+ void (*request_partition_reactivation) (struct xpc_partition *);
+ void (*request_partition_deactivation) (struct xpc_partition *);
+ void (*cancel_partition_deactivation_request) (struct xpc_partition *);
+ enum xp_retval (*setup_ch_structures) (struct xpc_partition *);
+ void (*teardown_ch_structures) (struct xpc_partition *);
+
+ enum xp_retval (*make_first_contact) (struct xpc_partition *);
+
+ u64 (*get_chctl_all_flags) (struct xpc_partition *);
+ void (*send_chctl_closerequest) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_closereply) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_openrequest) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_openreply) (struct xpc_channel *, unsigned long *);
+ void (*send_chctl_opencomplete) (struct xpc_channel *, unsigned long *);
+ void (*process_msg_chctl_flags) (struct xpc_partition *, int);
+
+ enum xp_retval (*save_remote_msgqueue_pa) (struct xpc_channel *,
+ unsigned long);
+
+ enum xp_retval (*setup_msg_structures) (struct xpc_channel *);
+ void (*teardown_msg_structures) (struct xpc_channel *);
+
+ void (*indicate_partition_engaged) (struct xpc_partition *);
+ void (*indicate_partition_disengaged) (struct xpc_partition *);
+ void (*assume_partition_disengaged) (short);
+ int (*partition_engaged) (short);
+ int (*any_partition_engaged) (void);
+
+ int (*n_of_deliverable_payloads) (struct xpc_channel *);
+ enum xp_retval (*send_payload) (struct xpc_channel *, u32, void *,
+ u16, u8, xpc_notify_func, void *);
+ void *(*get_deliverable_payload) (struct xpc_channel *);
+ void (*received_payload) (struct xpc_channel *, void *);
+ void (*notify_senders_of_disconnect) (struct xpc_channel *);
+};
+
/* struct xpc_partition act_state values (for XPC HB) */
#define XPC_P_AS_INACTIVE 0x00 /* partition is not active */
@@ -802,67 +876,17 @@ extern struct xpc_registration xpc_registrations[];
/* found in xpc_main.c */
extern struct device *xpc_part;
extern struct device *xpc_chan;
+extern struct xpc_arch_operations xpc_arch_ops;
extern int xpc_disengage_timelimit;
extern int xpc_disengage_timedout;
extern int xpc_activate_IRQ_rcvd;
extern spinlock_t xpc_activate_IRQ_rcvd_lock;
extern wait_queue_head_t xpc_activate_IRQ_wq;
-extern void *xpc_heartbeating_to_mask;
extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **);
extern void xpc_activate_partition(struct xpc_partition *);
extern void xpc_activate_kthreads(struct xpc_channel *, int);
extern void xpc_create_kthreads(struct xpc_channel *, int, int);
extern void xpc_disconnect_wait(int);
-extern int (*xpc_setup_partitions_sn) (void);
-extern void (*xpc_teardown_partitions_sn) (void);
-extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *,
- unsigned long *,
- size_t *);
-extern int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *);
-extern void (*xpc_heartbeat_init) (void);
-extern void (*xpc_heartbeat_exit) (void);
-extern void (*xpc_increment_heartbeat) (void);
-extern void (*xpc_offline_heartbeat) (void);
-extern void (*xpc_online_heartbeat) (void);
-extern enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *);
-extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *);
-extern u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *);
-extern enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *);
-extern void (*xpc_teardown_msg_structures) (struct xpc_channel *);
-extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *);
-extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int);
-extern int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *);
-extern void *(*xpc_get_deliverable_payload) (struct xpc_channel *);
-extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *,
- unsigned long, int);
-extern void (*xpc_request_partition_reactivation) (struct xpc_partition *);
-extern void (*xpc_request_partition_deactivation) (struct xpc_partition *);
-extern void (*xpc_cancel_partition_deactivation_request) (
- struct xpc_partition *);
-extern void (*xpc_process_activate_IRQ_rcvd) (void);
-extern enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *);
-extern void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *);
-
-extern void (*xpc_indicate_partition_engaged) (struct xpc_partition *);
-extern int (*xpc_partition_engaged) (short);
-extern int (*xpc_any_partition_engaged) (void);
-extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *);
-extern void (*xpc_assume_partition_disengaged) (short);
-
-extern void (*xpc_send_chctl_closerequest) (struct xpc_channel *,
- unsigned long *);
-extern void (*xpc_send_chctl_closereply) (struct xpc_channel *,
- unsigned long *);
-extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *,
- unsigned long *);
-extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *);
-
-extern enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *,
- unsigned long);
-
-extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *,
- u16, u8, xpc_notify_func, void *);
-extern void (*xpc_received_payload) (struct xpc_channel *, void *);
/* found in xpc_sn2.c */
extern int xpc_init_sn2(void);
@@ -909,40 +933,6 @@ extern void xpc_disconnect_channel(const int, struct xpc_channel *,
extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
-static inline int
-xpc_hb_allowed(short partid, void *heartbeating_to_mask)
-{
- return test_bit(partid, heartbeating_to_mask);
-}
-
-static inline int
-xpc_any_hbs_allowed(void)
-{
- DBUG_ON(xpc_heartbeating_to_mask == NULL);
- return !bitmap_empty(xpc_heartbeating_to_mask, xp_max_npartitions);
-}
-
-static inline void
-xpc_allow_hb(short partid)
-{
- DBUG_ON(xpc_heartbeating_to_mask == NULL);
- set_bit(partid, xpc_heartbeating_to_mask);
-}
-
-static inline void
-xpc_disallow_hb(short partid)
-{
- DBUG_ON(xpc_heartbeating_to_mask == NULL);
- clear_bit(partid, xpc_heartbeating_to_mask);
-}
-
-static inline void
-xpc_disallow_all_hbs(void)
-{
- DBUG_ON(xpc_heartbeating_to_mask == NULL);
- bitmap_zero(xpc_heartbeating_to_mask, xp_max_npartitions);
-}
-
static inline void
xpc_wakeup_channel_mgr(struct xpc_partition *part)
{
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 99a2534c38a..652593fc486 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
@@ -39,34 +39,38 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
if (!(ch->flags & XPC_C_SETUP)) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
- ret = xpc_setup_msg_structures(ch);
+ ret = xpc_arch_ops.setup_msg_structures(ch);
spin_lock_irqsave(&ch->lock, *irq_flags);
if (ret != xpSuccess)
XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
+ else
+ ch->flags |= XPC_C_SETUP;
- ch->flags |= XPC_C_SETUP;
-
- if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
+ if (ch->flags & XPC_C_DISCONNECTING)
return;
}
if (!(ch->flags & XPC_C_OPENREPLY)) {
ch->flags |= XPC_C_OPENREPLY;
- xpc_send_chctl_openreply(ch, irq_flags);
+ xpc_arch_ops.send_chctl_openreply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_ROPENREPLY))
return;
- ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
+ if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
+ ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
+ xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags);
+ }
+
+ if (!(ch->flags & XPC_C_ROPENCOMPLETE))
+ return;
dev_info(xpc_chan, "channel %d to partition %d connected\n",
ch->number, ch->partid);
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
- xpc_create_kthreads(ch, 1, 0);
- spin_lock_irqsave(&ch->lock, *irq_flags);
+ ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
}
/*
@@ -96,7 +100,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
if (part->act_state == XPC_P_AS_DEACTIVATING) {
/* can't proceed until the other side disengages from us */
- if (xpc_partition_engaged(ch->partid))
+ if (xpc_arch_ops.partition_engaged(ch->partid))
return;
} else {
@@ -108,7 +112,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
if (!(ch->flags & XPC_C_CLOSEREPLY)) {
ch->flags |= XPC_C_CLOSEREPLY;
- xpc_send_chctl_closereply(ch, irq_flags);
+ xpc_arch_ops.send_chctl_closereply(ch, irq_flags);
}
if (!(ch->flags & XPC_C_RCLOSEREPLY))
@@ -118,7 +122,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
/* wake those waiting for notify completion */
if (atomic_read(&ch->n_to_notify) > 0) {
/* we do callout while holding ch->lock, callout can't block */
- xpc_notify_senders_of_disconnect(ch);
+ xpc_arch_ops.notify_senders_of_disconnect(ch);
}
/* both sides are disconnected now */
@@ -132,7 +136,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
/* it's now safe to free the channel's message queues */
- xpc_teardown_msg_structures(ch);
+ xpc_arch_ops.teardown_msg_structures(ch);
ch->func = NULL;
ch->key = NULL;
@@ -144,8 +148,9 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
/*
* Mark the channel disconnected and clear all other flags, including
- * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but
- * not including XPC_C_WDISCONNECT (if it was set).
+ * XPC_C_SETUP (because of call to
+ * xpc_arch_ops.teardown_msg_structures()) but not including
+ * XPC_C_WDISCONNECT (if it was set).
*/
ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
@@ -184,6 +189,7 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
struct xpc_channel *ch = &part->channels[ch_number];
enum xp_retval reason;
enum xp_retval ret;
+ int create_kthread = 0;
spin_lock_irqsave(&ch->lock, irq_flags);
@@ -196,8 +202,7 @@ again:
* has had a chance to see that the channel is disconnected.
*/
ch->delayed_chctl_flags |= chctl_flags;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
@@ -239,8 +244,7 @@ again:
XPC_CHCTL_CLOSEREQUEST;
spin_unlock(&part->chctl_lock);
}
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
XPC_SET_REASON(ch, 0, 0);
@@ -250,7 +254,8 @@ again:
ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
}
- chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY);
+ chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY |
+ XPC_CHCTL_OPENCOMPLETE);
/*
* The meaningful CLOSEREQUEST connection state fields are:
@@ -269,8 +274,7 @@ again:
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
xpc_process_disconnect(ch, &irq_flags);
@@ -283,8 +287,7 @@ again:
if (ch->flags & XPC_C_DISCONNECTED) {
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
@@ -299,8 +302,7 @@ again:
XPC_CHCTL_CLOSEREPLY;
spin_unlock(&part->chctl_lock);
}
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
ch->flags |= XPC_C_RCLOSEREPLY;
@@ -320,14 +322,12 @@ again:
if (part->act_state == XPC_P_AS_DEACTIVATING ||
(ch->flags & XPC_C_ROPENREQUEST)) {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
XPC_C_OPENREQUEST)));
@@ -341,8 +341,7 @@ again:
*/
if (args->entry_size == 0 || args->local_nentries == 0) {
/* assume OPENREQUEST was delayed by mistake */
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
@@ -352,8 +351,7 @@ again:
if (args->entry_size != ch->entry_size) {
XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
} else {
ch->entry_size = args->entry_size;
@@ -375,15 +373,13 @@ again:
args->local_msgqueue_pa, args->local_nentries,
args->remote_nentries, ch->partid, ch->number);
- if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
- }
+ if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
+ goto out;
+
if (!(ch->flags & XPC_C_OPENREQUEST)) {
XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
&irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
@@ -400,11 +396,11 @@ again:
DBUG_ON(args->local_nentries == 0);
DBUG_ON(args->remote_nentries == 0);
- ret = xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
+ ret = xpc_arch_ops.save_remote_msgqueue_pa(ch,
+ args->local_msgqueue_pa);
if (ret != xpSuccess) {
XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
- return;
+ goto out;
}
ch->flags |= XPC_C_ROPENREPLY;
@@ -430,7 +426,36 @@ again:
xpc_process_connect(ch, &irq_flags);
}
+ if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) {
+
+ dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from "
+ "partid=%d, channel=%d\n", ch->partid, ch->number);
+
+ if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
+ goto out;
+
+ if (!(ch->flags & XPC_C_OPENREQUEST) ||
+ !(ch->flags & XPC_C_OPENREPLY)) {
+ XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
+ &irq_flags);
+ goto out;
+ }
+
+ DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
+ DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY));
+ DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
+
+ ch->flags |= XPC_C_ROPENCOMPLETE;
+
+ xpc_process_connect(ch, &irq_flags);
+ create_kthread = 1;
+ }
+
+out:
spin_unlock_irqrestore(&ch->lock, irq_flags);
+
+ if (create_kthread)
+ xpc_create_kthreads(ch, 1, 0);
}
/*
@@ -508,7 +533,7 @@ xpc_connect_channel(struct xpc_channel *ch)
/* initiate the connection */
ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
- xpc_send_chctl_openrequest(ch, &irq_flags);
+ xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags);
xpc_process_connect(ch, &irq_flags);
@@ -526,7 +551,7 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part)
int ch_number;
u32 ch_flags;
- chctl.all_flags = xpc_get_chctl_all_flags(part);
+ chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part);
/*
* Initiate channel connections for registered channels.
@@ -564,10 +589,6 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part)
if (!(ch_flags & XPC_C_OPENREQUEST)) {
DBUG_ON(ch_flags & XPC_C_SETUP);
(void)xpc_connect_channel(ch);
- } else {
- spin_lock_irqsave(&ch->lock, irq_flags);
- xpc_process_connect(ch, &irq_flags);
- spin_unlock_irqrestore(&ch->lock, irq_flags);
}
continue;
}
@@ -579,7 +600,7 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part)
*/
if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
- xpc_process_msg_chctl_flags(part, ch_number);
+ xpc_arch_ops.process_msg_chctl_flags(part, ch_number);
}
}
@@ -755,7 +776,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
XPC_C_CONNECTING | XPC_C_CONNECTED);
- xpc_send_chctl_closerequest(ch, irq_flags);
+ xpc_arch_ops.send_chctl_closerequest(ch, irq_flags);
if (channel_was_connected)
ch->flags |= XPC_C_WASCONNECTED;
@@ -862,8 +883,8 @@ xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
DBUG_ON(payload == NULL);
if (xpc_part_ref(part)) {
- ret = xpc_send_payload(&part->channels[ch_number], flags,
- payload, payload_size, 0, NULL, NULL);
+ ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
+ flags, payload, payload_size, 0, NULL, NULL);
xpc_part_deref(part);
}
@@ -914,9 +935,8 @@ xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
DBUG_ON(func == NULL);
if (xpc_part_ref(part)) {
- ret = xpc_send_payload(&part->channels[ch_number], flags,
- payload, payload_size, XPC_N_CALL, func,
- key);
+ ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
+ flags, payload, payload_size, XPC_N_CALL, func, key);
xpc_part_deref(part);
}
return ret;
@@ -930,7 +950,7 @@ xpc_deliver_payload(struct xpc_channel *ch)
{
void *payload;
- payload = xpc_get_deliverable_payload(ch);
+ payload = xpc_arch_ops.get_deliverable_payload(ch);
if (payload != NULL) {
/*
@@ -984,7 +1004,7 @@ xpc_initiate_received(short partid, int ch_number, void *payload)
DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
ch = &part->channels[ch_number];
- xpc_received_payload(ch, payload);
+ xpc_arch_ops.received_payload(ch, payload);
/* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
xpc_msgqueue_deref(ch);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 1ab9fda87fa..fd3688a3e23 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
@@ -150,7 +150,6 @@ DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
static unsigned long xpc_hb_check_timeout;
static struct timer_list xpc_hb_timer;
-void *xpc_heartbeating_to_mask;
/* notification that the xpc_hb_checker thread has exited */
static DECLARE_COMPLETION(xpc_hb_checker_exited);
@@ -170,62 +169,7 @@ static struct notifier_block xpc_die_notifier = {
.notifier_call = xpc_system_die,
};
-int (*xpc_setup_partitions_sn) (void);
-void (*xpc_teardown_partitions_sn) (void);
-enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
- unsigned long *rp_pa,
- size_t *len);
-int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp);
-void (*xpc_heartbeat_init) (void);
-void (*xpc_heartbeat_exit) (void);
-void (*xpc_increment_heartbeat) (void);
-void (*xpc_offline_heartbeat) (void);
-void (*xpc_online_heartbeat) (void);
-enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part);
-
-enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
-void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
-u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
-enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch);
-void (*xpc_teardown_msg_structures) (struct xpc_channel *ch);
-void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
-int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch);
-void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch);
-
-void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
- unsigned long remote_rp_pa,
- int nasid);
-void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
-void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
-void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
-
-void (*xpc_process_activate_IRQ_rcvd) (void);
-enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part);
-void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part);
-
-void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
-int (*xpc_partition_engaged) (short partid);
-int (*xpc_any_partition_engaged) (void);
-void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
-void (*xpc_assume_partition_disengaged) (short partid);
-
-void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
- unsigned long *irq_flags);
-void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
- unsigned long *irq_flags);
-void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
- unsigned long *irq_flags);
-void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
- unsigned long *irq_flags);
-
-enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch,
- unsigned long msgqueue_pa);
-
-enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags,
- void *payload, u16 payload_size,
- u8 notify_type, xpc_notify_func func,
- void *key);
-void (*xpc_received_payload) (struct xpc_channel *ch, void *payload);
+struct xpc_arch_operations xpc_arch_ops;
/*
* Timer function to enforce the timelimit on the partition disengage.
@@ -240,7 +184,7 @@ xpc_timeout_partition_disengage(unsigned long data)
(void)xpc_partition_disengaged(part);
DBUG_ON(part->disengage_timeout != 0);
- DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
+ DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
}
/*
@@ -251,7 +195,7 @@ xpc_timeout_partition_disengage(unsigned long data)
static void
xpc_hb_beater(unsigned long dummy)
{
- xpc_increment_heartbeat();
+ xpc_arch_ops.increment_heartbeat();
if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
wake_up_interruptible(&xpc_activate_IRQ_wq);
@@ -263,7 +207,7 @@ xpc_hb_beater(unsigned long dummy)
static void
xpc_start_hb_beater(void)
{
- xpc_heartbeat_init();
+ xpc_arch_ops.heartbeat_init();
init_timer(&xpc_hb_timer);
xpc_hb_timer.function = xpc_hb_beater;
xpc_hb_beater(0);
@@ -273,7 +217,7 @@ static void
xpc_stop_hb_beater(void)
{
del_timer_sync(&xpc_hb_timer);
- xpc_heartbeat_exit();
+ xpc_arch_ops.heartbeat_exit();
}
/*
@@ -302,7 +246,7 @@ xpc_check_remote_hb(void)
continue;
}
- ret = xpc_get_remote_heartbeat(part);
+ ret = xpc_arch_ops.get_remote_heartbeat(part);
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(part, ret);
}
@@ -353,7 +297,7 @@ xpc_hb_checker(void *ignore)
force_IRQ = 0;
dev_dbg(xpc_part, "processing activate IRQs "
"received\n");
- xpc_process_activate_IRQ_rcvd();
+ xpc_arch_ops.process_activate_IRQ_rcvd();
}
/* wait for IRQ or timeout */
@@ -528,7 +472,7 @@ xpc_setup_ch_structures(struct xpc_partition *part)
init_waitqueue_head(&ch->idle_wq);
}
- ret = xpc_setup_ch_structures_sn(part);
+ ret = xpc_arch_ops.setup_ch_structures(part);
if (ret != xpSuccess)
goto out_2;
@@ -572,7 +516,7 @@ xpc_teardown_ch_structures(struct xpc_partition *part)
/* now we can begin tearing down the infrastructure */
- xpc_teardown_ch_structures_sn(part);
+ xpc_arch_ops.teardown_ch_structures(part);
kfree(part->remote_openclose_args_base);
part->remote_openclose_args = NULL;
@@ -620,12 +564,12 @@ xpc_activating(void *__partid)
dev_dbg(xpc_part, "activating partition %d\n", partid);
- xpc_allow_hb(partid);
+ xpc_arch_ops.allow_hb(partid);
if (xpc_setup_ch_structures(part) == xpSuccess) {
(void)xpc_part_ref(part); /* this will always succeed */
- if (xpc_make_first_contact(part) == xpSuccess) {
+ if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
xpc_mark_partition_active(part);
xpc_channel_mgr(part);
/* won't return until partition is deactivating */
@@ -635,12 +579,12 @@ xpc_activating(void *__partid)
xpc_teardown_ch_structures(part);
}
- xpc_disallow_hb(partid);
+ xpc_arch_ops.disallow_hb(partid);
xpc_mark_partition_inactive(part);
if (part->reason == xpReactivating) {
/* interrupting ourselves results in activating partition */
- xpc_request_partition_reactivation(part);
+ xpc_arch_ops.request_partition_reactivation(part);
}
return 0;
@@ -713,10 +657,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
+ int (*n_of_deliverable_payloads) (struct xpc_channel *) =
+ xpc_arch_ops.n_of_deliverable_payloads;
+
do {
/* deliver messages to their intended recipients */
- while (xpc_n_of_deliverable_payloads(ch) > 0 &&
+ while (n_of_deliverable_payloads(ch) > 0 &&
!(ch->flags & XPC_C_DISCONNECTING)) {
xpc_deliver_payload(ch);
}
@@ -732,7 +679,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
"wait_event_interruptible_exclusive()\n");
(void)wait_event_interruptible_exclusive(ch->idle_wq,
- (xpc_n_of_deliverable_payloads(ch) > 0 ||
+ (n_of_deliverable_payloads(ch) > 0 ||
(ch->flags & XPC_C_DISCONNECTING)));
atomic_dec(&ch->kthreads_idle);
@@ -749,6 +696,8 @@ xpc_kthread_start(void *args)
struct xpc_channel *ch;
int n_needed;
unsigned long irq_flags;
+ int (*n_of_deliverable_payloads) (struct xpc_channel *) =
+ xpc_arch_ops.n_of_deliverable_payloads;
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
partid, ch_number);
@@ -777,7 +726,7 @@ xpc_kthread_start(void *args)
* additional kthreads to help deliver them. We only
* need one less than total #of messages to deliver.
*/
- n_needed = xpc_n_of_deliverable_payloads(ch) - 1;
+ n_needed = n_of_deliverable_payloads(ch) - 1;
if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
xpc_activate_kthreads(ch, n_needed);
@@ -805,7 +754,7 @@ xpc_kthread_start(void *args)
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
atomic_dec_return(&part->nchannels_engaged) == 0) {
- xpc_indicate_partition_disengaged(part);
+ xpc_arch_ops.indicate_partition_disengaged(part);
}
xpc_msgqueue_deref(ch);
@@ -837,6 +786,8 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
struct xpc_partition *part = &xpc_partitions[ch->partid];
struct task_struct *kthread;
+ void (*indicate_partition_disengaged) (struct xpc_partition *) =
+ xpc_arch_ops.indicate_partition_disengaged;
while (needed-- > 0) {
@@ -858,7 +809,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
atomic_inc_return(&part->nchannels_engaged) == 1) {
- xpc_indicate_partition_engaged(part);
+ xpc_arch_ops.indicate_partition_engaged(part);
}
(void)xpc_part_ref(part);
xpc_msgqueue_ref(ch);
@@ -880,7 +831,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
atomic_dec_return(&part->nchannels_engaged) == 0) {
- xpc_indicate_partition_disengaged(part);
+ indicate_partition_disengaged(part);
}
xpc_msgqueue_deref(ch);
xpc_part_deref(part);
@@ -993,13 +944,13 @@ xpc_setup_partitions(void)
atomic_set(&part->references, 0);
}
- return xpc_setup_partitions_sn();
+ return xpc_arch_ops.setup_partitions();
}
static void
xpc_teardown_partitions(void)
{
- xpc_teardown_partitions_sn();
+ xpc_arch_ops.teardown_partitions();
kfree(xpc_partitions);
}
@@ -1055,7 +1006,7 @@ xpc_do_exit(enum xp_retval reason)
disengage_timeout = part->disengage_timeout;
}
- if (xpc_any_partition_engaged()) {
+ if (xpc_arch_ops.any_partition_engaged()) {
if (time_is_before_jiffies(printmsg_time)) {
dev_info(xpc_part, "waiting for remote "
"partitions to deactivate, timeout in "
@@ -1086,8 +1037,7 @@ xpc_do_exit(enum xp_retval reason)
} while (1);
- DBUG_ON(xpc_any_partition_engaged());
- DBUG_ON(xpc_any_hbs_allowed() != 0);
+ DBUG_ON(xpc_arch_ops.any_partition_engaged());
xpc_teardown_rsvd_page();
@@ -1152,15 +1102,15 @@ xpc_die_deactivate(void)
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
- xpc_disallow_all_hbs(); /*indicate we're deactivated */
+ xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */
for (partid = 0; partid < xp_max_npartitions; partid++) {
part = &xpc_partitions[partid];
- if (xpc_partition_engaged(partid) ||
+ if (xpc_arch_ops.partition_engaged(partid) ||
part->act_state != XPC_P_AS_INACTIVE) {
- xpc_request_partition_deactivation(part);
- xpc_indicate_partition_disengaged(part);
+ xpc_arch_ops.request_partition_deactivation(part);
+ xpc_arch_ops.indicate_partition_disengaged(part);
}
}
@@ -1177,7 +1127,7 @@ xpc_die_deactivate(void)
wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
while (1) {
- any_engaged = xpc_any_partition_engaged();
+ any_engaged = xpc_arch_ops.any_partition_engaged();
if (!any_engaged) {
dev_info(xpc_part, "all partitions have deactivated\n");
break;
@@ -1186,7 +1136,7 @@ xpc_die_deactivate(void)
if (!keep_waiting--) {
for (partid = 0; partid < xp_max_npartitions;
partid++) {
- if (xpc_partition_engaged(partid)) {
+ if (xpc_arch_ops.partition_engaged(partid)) {
dev_info(xpc_part, "deactivate from "
"remote partition %d timed "
"out\n", partid);
@@ -1233,7 +1183,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
/* fall through */
case DIE_MCA_MONARCH_ENTER:
case DIE_INIT_MONARCH_ENTER:
- xpc_offline_heartbeat();
+ xpc_arch_ops.offline_heartbeat();
break;
case DIE_KDEBUG_LEAVE:
@@ -1244,7 +1194,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
/* fall through */
case DIE_MCA_MONARCH_LEAVE:
case DIE_INIT_MONARCH_LEAVE:
- xpc_online_heartbeat();
+ xpc_arch_ops.online_heartbeat();
break;
}
#else
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 6722f6fe4dc..65877bc5eda 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -70,6 +70,9 @@ xpc_get_rsvd_page_pa(int nasid)
size_t buf_len = 0;
void *buf = buf;
void *buf_base = NULL;
+ enum xp_retval (*get_partition_rsvd_page_pa)
+ (void *, u64 *, unsigned long *, size_t *) =
+ xpc_arch_ops.get_partition_rsvd_page_pa;
while (1) {
@@ -79,8 +82,7 @@ xpc_get_rsvd_page_pa(int nasid)
* ??? function or have two versions? Rename rp_pa for UV to
* ??? rp_gpa?
*/
- ret = xpc_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa,
- &len);
+ ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);
dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
"address=0x%016lx, len=0x%016lx\n", ret,
@@ -172,7 +174,7 @@ xpc_setup_rsvd_page(void)
xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
- ret = xpc_setup_rsvd_page_sn(rp);
+ ret = xpc_arch_ops.setup_rsvd_page(rp);
if (ret != 0)
return ret;
@@ -264,7 +266,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
short partid = XPC_PARTID(part);
int disengaged;
- disengaged = !xpc_partition_engaged(partid);
+ disengaged = !xpc_arch_ops.partition_engaged(partid);
if (part->disengage_timeout) {
if (!disengaged) {
if (time_is_after_jiffies(part->disengage_timeout)) {
@@ -280,7 +282,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
dev_info(xpc_part, "deactivate request to remote "
"partition %d timed out\n", partid);
xpc_disengage_timedout = 1;
- xpc_assume_partition_disengaged(partid);
+ xpc_arch_ops.assume_partition_disengaged(partid);
disengaged = 1;
}
part->disengage_timeout = 0;
@@ -294,7 +296,7 @@ xpc_partition_disengaged(struct xpc_partition *part)
if (part->act_state != XPC_P_AS_INACTIVE)
xpc_wakeup_channel_mgr(part);
- xpc_cancel_partition_deactivation_request(part);
+ xpc_arch_ops.cancel_partition_deactivation_request(part);
}
return disengaged;
}
@@ -339,7 +341,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
spin_unlock_irqrestore(&part->act_lock, irq_flags);
if (reason == xpReactivating) {
/* we interrupt ourselves to reactivate partition */
- xpc_request_partition_reactivation(part);
+ xpc_arch_ops.request_partition_reactivation(part);
}
return;
}
@@ -358,7 +360,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
spin_unlock_irqrestore(&part->act_lock, irq_flags);
/* ask remote partition to deactivate with regard to us */
- xpc_request_partition_deactivation(part);
+ xpc_arch_ops.request_partition_deactivation(part);
/* set a timelimit on the disengage phase of the deactivation request */
part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ);
@@ -496,7 +498,7 @@ xpc_discovery(void)
continue;
}
- xpc_request_partition_activation(remote_rp,
+ xpc_arch_ops.request_partition_activation(remote_rp,
remote_rp_pa, nasid);
}
}
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index eaaa964942d..915a3b495da 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
@@ -60,14 +60,14 @@ static struct xpc_vars_sn2 *xpc_vars_sn2;
static struct xpc_vars_part_sn2 *xpc_vars_part_sn2;
static int
-xpc_setup_partitions_sn_sn2(void)
+xpc_setup_partitions_sn2(void)
{
/* nothing needs to be done */
return 0;
}
static void
-xpc_teardown_partitions_sn_sn2(void)
+xpc_teardown_partitions_sn2(void)
{
/* nothing needs to be done */
}
@@ -431,6 +431,13 @@ xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
}
static void
+xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch,
+ unsigned long *irq_flags)
+{
+ XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENCOMPLETE, irq_flags);
+}
+
+static void
xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch)
{
XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL);
@@ -621,7 +628,7 @@ xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
static int
-xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp)
+xpc_setup_rsvd_page_sn2(struct xpc_rsvd_page *rp)
{
struct amo *amos_page;
int i;
@@ -629,7 +636,7 @@ xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp)
xpc_vars_sn2 = XPC_RP_VARS(rp);
- rp->sn.vars_pa = xp_pa(xpc_vars_sn2);
+ rp->sn.sn2.vars_pa = xp_pa(xpc_vars_sn2);
/* vars_part array follows immediately after vars */
xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
@@ -693,6 +700,33 @@ xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp)
return 0;
}
+static int
+xpc_hb_allowed_sn2(short partid, void *heartbeating_to_mask)
+{
+ return test_bit(partid, heartbeating_to_mask);
+}
+
+static void
+xpc_allow_hb_sn2(short partid)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+ set_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
+}
+
+static void
+xpc_disallow_hb_sn2(short partid)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+ clear_bit(partid, xpc_vars_sn2->heartbeating_to_mask);
+}
+
+static void
+xpc_disallow_all_hbs_sn2(void)
+{
+ DBUG_ON(xpc_vars_sn2 == NULL);
+ bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, xp_max_npartitions);
+}
+
static void
xpc_increment_heartbeat_sn2(void)
{
@@ -719,7 +753,6 @@ xpc_heartbeat_init_sn2(void)
DBUG_ON(xpc_vars_sn2 == NULL);
bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
- xpc_heartbeating_to_mask = &xpc_vars_sn2->heartbeating_to_mask[0];
xpc_online_heartbeat_sn2();
}
@@ -751,9 +784,9 @@ xpc_get_remote_heartbeat_sn2(struct xpc_partition *part)
remote_vars->heartbeating_to_mask[0]);
if ((remote_vars->heartbeat == part->last_heartbeat &&
- remote_vars->heartbeat_offline == 0) ||
- !xpc_hb_allowed(sn_partition_id,
- &remote_vars->heartbeating_to_mask)) {
+ !remote_vars->heartbeat_offline) ||
+ !xpc_hb_allowed_sn2(sn_partition_id,
+ remote_vars->heartbeating_to_mask)) {
ret = xpNoHeartbeat;
} else {
part->last_heartbeat = remote_vars->heartbeat;
@@ -972,7 +1005,7 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
return;
}
- remote_vars_pa = remote_rp->sn.vars_pa;
+ remote_vars_pa = remote_rp->sn.sn2.vars_pa;
remote_rp_version = remote_rp->version;
remote_rp_ts_jiffies = remote_rp->ts_jiffies;
@@ -1129,7 +1162,7 @@ xpc_process_activate_IRQ_rcvd_sn2(void)
* Setup the channel structures that are sn2 specific.
*/
static enum xp_retval
-xpc_setup_ch_structures_sn_sn2(struct xpc_partition *part)
+xpc_setup_ch_structures_sn2(struct xpc_partition *part)
{
struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
struct xpc_channel_sn2 *ch_sn2;
@@ -1251,7 +1284,7 @@ out_1:
* Teardown the channel structures that are sn2 specific.
*/
static void
-xpc_teardown_ch_structures_sn_sn2(struct xpc_partition *part)
+xpc_teardown_ch_structures_sn2(struct xpc_partition *part)
{
struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
short partid = XPC_PARTID(part);
@@ -2315,61 +2348,70 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
}
+static struct xpc_arch_operations xpc_arch_ops_sn2 = {
+ .setup_partitions = xpc_setup_partitions_sn2,
+ .teardown_partitions = xpc_teardown_partitions_sn2,
+ .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
+ .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2,
+ .setup_rsvd_page = xpc_setup_rsvd_page_sn2,
+
+ .allow_hb = xpc_allow_hb_sn2,
+ .disallow_hb = xpc_disallow_hb_sn2,
+ .disallow_all_hbs = xpc_disallow_all_hbs_sn2,
+ .increment_heartbeat = xpc_increment_heartbeat_sn2,
+ .offline_heartbeat = xpc_offline_heartbeat_sn2,
+ .online_heartbeat = xpc_online_heartbeat_sn2,
+ .heartbeat_init = xpc_heartbeat_init_sn2,
+ .heartbeat_exit = xpc_heartbeat_exit_sn2,
+ .get_remote_heartbeat = xpc_get_remote_heartbeat_sn2,
+
+ .request_partition_activation =
+ xpc_request_partition_activation_sn2,
+ .request_partition_reactivation =
+ xpc_request_partition_reactivation_sn2,
+ .request_partition_deactivation =
+ xpc_request_partition_deactivation_sn2,
+ .cancel_partition_deactivation_request =
+ xpc_cancel_partition_deactivation_request_sn2,
+
+ .setup_ch_structures = xpc_setup_ch_structures_sn2,
+ .teardown_ch_structures = xpc_teardown_ch_structures_sn2,
+
+ .make_first_contact = xpc_make_first_contact_sn2,
+
+ .get_chctl_all_flags = xpc_get_chctl_all_flags_sn2,
+ .send_chctl_closerequest = xpc_send_chctl_closerequest_sn2,
+ .send_chctl_closereply = xpc_send_chctl_closereply_sn2,
+ .send_chctl_openrequest = xpc_send_chctl_openrequest_sn2,
+ .send_chctl_openreply = xpc_send_chctl_openreply_sn2,
+ .send_chctl_opencomplete = xpc_send_chctl_opencomplete_sn2,
+ .process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2,
+
+ .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2,
+
+ .setup_msg_structures = xpc_setup_msg_structures_sn2,
+ .teardown_msg_structures = xpc_teardown_msg_structures_sn2,
+
+ .indicate_partition_engaged = xpc_indicate_partition_engaged_sn2,
+ .indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2,
+ .partition_engaged = xpc_partition_engaged_sn2,
+ .any_partition_engaged = xpc_any_partition_engaged_sn2,
+ .assume_partition_disengaged = xpc_assume_partition_disengaged_sn2,
+
+ .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2,
+ .send_payload = xpc_send_payload_sn2,
+ .get_deliverable_payload = xpc_get_deliverable_payload_sn2,
+ .received_payload = xpc_received_payload_sn2,
+ .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2,
+};
+
int
xpc_init_sn2(void)
{
int ret;
size_t buf_size;
- xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2;
- xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_sn2;
- xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2;
- xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2;
- xpc_increment_heartbeat = xpc_increment_heartbeat_sn2;
- xpc_offline_heartbeat = xpc_offline_heartbeat_sn2;
- xpc_online_heartbeat = xpc_online_heartbeat_sn2;
- xpc_heartbeat_init = xpc_heartbeat_init_sn2;
- xpc_heartbeat_exit = xpc_heartbeat_exit_sn2;
- xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_sn2;
-
- xpc_request_partition_activation = xpc_request_partition_activation_sn2;
- xpc_request_partition_reactivation =
- xpc_request_partition_reactivation_sn2;
- xpc_request_partition_deactivation =
- xpc_request_partition_deactivation_sn2;
- xpc_cancel_partition_deactivation_request =
- xpc_cancel_partition_deactivation_request_sn2;
-
- xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2;
- xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_sn2;
- xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_sn2;
- xpc_make_first_contact = xpc_make_first_contact_sn2;
-
- xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2;
- xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2;
- xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2;
- xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2;
- xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2;
-
- xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2;
-
- xpc_setup_msg_structures = xpc_setup_msg_structures_sn2;
- xpc_teardown_msg_structures = xpc_teardown_msg_structures_sn2;
-
- xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2;
- xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2;
- xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2;
- xpc_get_deliverable_payload = xpc_get_deliverable_payload_sn2;
-
- xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2;
- xpc_indicate_partition_disengaged =
- xpc_indicate_partition_disengaged_sn2;
- xpc_partition_engaged = xpc_partition_engaged_sn2;
- xpc_any_partition_engaged = xpc_any_partition_engaged_sn2;
- xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2;
-
- xpc_send_payload = xpc_send_payload_sn2;
- xpc_received_payload = xpc_received_payload_sn2;
+ xpc_arch_ops = xpc_arch_ops_sn2;
if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index f7fff4727ed..9172fcdee4e 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -46,8 +46,7 @@ struct uv_IO_APIC_route_entry {
};
#endif
-static atomic64_t xpc_heartbeat_uv;
-static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
+static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
@@ -63,7 +62,7 @@ static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
static int
-xpc_setup_partitions_sn_uv(void)
+xpc_setup_partitions_uv(void)
{
short partid;
struct xpc_partition_uv *part_uv;
@@ -79,7 +78,7 @@ xpc_setup_partitions_sn_uv(void)
}
static void
-xpc_teardown_partitions_sn_uv(void)
+xpc_teardown_partitions_uv(void)
{
short partid;
struct xpc_partition_uv *part_uv;
@@ -423,41 +422,6 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
/* syncing of remote_act_state was just done above */
break;
- case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: {
- struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
-
- msg = container_of(msg_hdr,
- struct xpc_activate_mq_msg_heartbeat_req_uv,
- hdr);
- part_uv->heartbeat = msg->heartbeat;
- break;
- }
- case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: {
- struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
-
- msg = container_of(msg_hdr,
- struct xpc_activate_mq_msg_heartbeat_req_uv,
- hdr);
- part_uv->heartbeat = msg->heartbeat;
-
- spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
- part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV;
- spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
- break;
- }
- case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: {
- struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
-
- msg = container_of(msg_hdr,
- struct xpc_activate_mq_msg_heartbeat_req_uv,
- hdr);
- part_uv->heartbeat = msg->heartbeat;
-
- spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
- part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV;
- spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
- break;
- }
case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
struct xpc_activate_mq_msg_activate_req_uv *msg;
@@ -475,6 +439,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
+ part_uv->heartbeat_gpa = msg->heartbeat_gpa;
if (msg->activate_gru_mq_desc_gpa !=
part_uv->activate_gru_mq_desc_gpa) {
@@ -569,6 +534,17 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
xpc_wakeup_channel_mgr(part);
break;
}
+ case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
+ struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
+
+ msg = container_of(msg_hdr, struct
+ xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
+ spin_lock_irqsave(&part->chctl_lock, irq_flags);
+ part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
+ spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
+
+ xpc_wakeup_channel_mgr(part);
+ }
case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags |= XPC_P_ENGAGED_UV;
@@ -759,7 +735,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
/*
* !!! Make our side think that the remote partition sent an activate
- * !!! message our way by doing what the activate IRQ handler would
+ * !!! mq message our way by doing what the activate IRQ handler would
* !!! do had one really been sent.
*/
@@ -806,90 +782,82 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
}
static int
-xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
+xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
{
- rp->sn.activate_gru_mq_desc_gpa =
+ xpc_heartbeat_uv =
+ &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
+ rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
+ rp->sn.uv.activate_gru_mq_desc_gpa =
uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
return 0;
}
static void
-xpc_send_heartbeat_uv(int msg_type)
+xpc_allow_hb_uv(short partid)
{
- short partid;
- struct xpc_partition *part;
- struct xpc_activate_mq_msg_heartbeat_req_uv msg;
-
- /*
- * !!! On uv we're broadcasting a heartbeat message every 5 seconds.
- * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20
- * !!! seconds. This is an increase in numalink traffic.
- * ??? Is this good?
- */
-
- msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv);
-
- partid = find_first_bit(xpc_heartbeating_to_mask_uv,
- XP_MAX_NPARTITIONS_UV);
-
- while (partid < XP_MAX_NPARTITIONS_UV) {
- part = &xpc_partitions[partid];
+}
- xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
- msg_type);
+static void
+xpc_disallow_hb_uv(short partid)
+{
+}
- partid = find_next_bit(xpc_heartbeating_to_mask_uv,
- XP_MAX_NPARTITIONS_UV, partid + 1);
- }
+static void
+xpc_disallow_all_hbs_uv(void)
+{
}
static void
xpc_increment_heartbeat_uv(void)
{
- xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV);
+ xpc_heartbeat_uv->value++;
}
static void
xpc_offline_heartbeat_uv(void)
{
- xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
+ xpc_increment_heartbeat_uv();
+ xpc_heartbeat_uv->offline = 1;
}
static void
xpc_online_heartbeat_uv(void)
{
- xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV);
+ xpc_increment_heartbeat_uv();
+ xpc_heartbeat_uv->offline = 0;
}
static void
xpc_heartbeat_init_uv(void)
{
- atomic64_set(&xpc_heartbeat_uv, 0);
- bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
- xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0];
+ xpc_heartbeat_uv->value = 1;
+ xpc_heartbeat_uv->offline = 0;
}
static void
xpc_heartbeat_exit_uv(void)
{
- xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
+ xpc_offline_heartbeat_uv();
}
static enum xp_retval
xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
{
struct xpc_partition_uv *part_uv = &part->sn.uv;
- enum xp_retval ret = xpNoHeartbeat;
+ enum xp_retval ret;
- if (part_uv->remote_act_state != XPC_P_AS_INACTIVE &&
- part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) {
+ ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
+ part_uv->heartbeat_gpa,
+ sizeof(struct xpc_heartbeat_uv));
+ if (ret != xpSuccess)
+ return ret;
- if (part_uv->heartbeat != part->last_heartbeat ||
- (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) {
+ if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
+ !part_uv->cached_heartbeat.offline) {
- part->last_heartbeat = part_uv->heartbeat;
- ret = xpSuccess;
- }
+ ret = xpNoHeartbeat;
+ } else {
+ part->last_heartbeat = part_uv->cached_heartbeat.value;
}
return ret;
}
@@ -904,8 +872,9 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
+ part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
part->sn.uv.activate_gru_mq_desc_gpa =
- remote_rp->sn.activate_gru_mq_desc_gpa;
+ remote_rp->sn.uv.activate_gru_mq_desc_gpa;
/*
* ??? Is it a good idea to make this conditional on what is
@@ -913,8 +882,9 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
*/
if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
msg.rp_gpa = uv_gpa(xpc_rsvd_page);
+ msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
msg.activate_gru_mq_desc_gpa =
- xpc_rsvd_page->sn.activate_gru_mq_desc_gpa;
+ xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
}
@@ -1010,7 +980,7 @@ xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
* Setup the channel structures that are uv specific.
*/
static enum xp_retval
-xpc_setup_ch_structures_sn_uv(struct xpc_partition *part)
+xpc_setup_ch_structures_uv(struct xpc_partition *part)
{
struct xpc_channel_uv *ch_uv;
int ch_number;
@@ -1029,7 +999,7 @@ xpc_setup_ch_structures_sn_uv(struct xpc_partition *part)
* Teardown the channel structures that are uv specific.
*/
static void
-xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part)
+xpc_teardown_ch_structures_uv(struct xpc_partition *part)
{
/* nothing needs to be done */
return;
@@ -1243,6 +1213,16 @@ xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
}
static void
+xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+ struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
+
+ msg.ch_number = ch->number;
+ xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
+ XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
+}
+
+static void
xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
{
unsigned long irq_flags;
@@ -1669,58 +1649,67 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
msg->hdr.msg_slot_number += ch->remote_nentries;
}
+static struct xpc_arch_operations xpc_arch_ops_uv = {
+ .setup_partitions = xpc_setup_partitions_uv,
+ .teardown_partitions = xpc_teardown_partitions_uv,
+ .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
+ .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
+ .setup_rsvd_page = xpc_setup_rsvd_page_uv,
+
+ .allow_hb = xpc_allow_hb_uv,
+ .disallow_hb = xpc_disallow_hb_uv,
+ .disallow_all_hbs = xpc_disallow_all_hbs_uv,
+ .increment_heartbeat = xpc_increment_heartbeat_uv,
+ .offline_heartbeat = xpc_offline_heartbeat_uv,
+ .online_heartbeat = xpc_online_heartbeat_uv,
+ .heartbeat_init = xpc_heartbeat_init_uv,
+ .heartbeat_exit = xpc_heartbeat_exit_uv,
+ .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
+
+ .request_partition_activation =
+ xpc_request_partition_activation_uv,
+ .request_partition_reactivation =
+ xpc_request_partition_reactivation_uv,
+ .request_partition_deactivation =
+ xpc_request_partition_deactivation_uv,
+ .cancel_partition_deactivation_request =
+ xpc_cancel_partition_deactivation_request_uv,
+
+ .setup_ch_structures = xpc_setup_ch_structures_uv,
+ .teardown_ch_structures = xpc_teardown_ch_structures_uv,
+
+ .make_first_contact = xpc_make_first_contact_uv,
+
+ .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
+ .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
+ .send_chctl_closereply = xpc_send_chctl_closereply_uv,
+ .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
+ .send_chctl_openreply = xpc_send_chctl_openreply_uv,
+ .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
+ .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
+
+ .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
+
+ .setup_msg_structures = xpc_setup_msg_structures_uv,
+ .teardown_msg_structures = xpc_teardown_msg_structures_uv,
+
+ .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
+ .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
+ .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
+ .partition_engaged = xpc_partition_engaged_uv,
+ .any_partition_engaged = xpc_any_partition_engaged_uv,
+
+ .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
+ .send_payload = xpc_send_payload_uv,
+ .get_deliverable_payload = xpc_get_deliverable_payload_uv,
+ .received_payload = xpc_received_payload_uv,
+ .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
+};
+
int
xpc_init_uv(void)
{
- xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv;
- xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_uv;
- xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv;
- xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv;
- xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv;
- xpc_increment_heartbeat = xpc_increment_heartbeat_uv;
- xpc_offline_heartbeat = xpc_offline_heartbeat_uv;
- xpc_online_heartbeat = xpc_online_heartbeat_uv;
- xpc_heartbeat_init = xpc_heartbeat_init_uv;
- xpc_heartbeat_exit = xpc_heartbeat_exit_uv;
- xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv;
-
- xpc_request_partition_activation = xpc_request_partition_activation_uv;
- xpc_request_partition_reactivation =
- xpc_request_partition_reactivation_uv;
- xpc_request_partition_deactivation =
- xpc_request_partition_deactivation_uv;
- xpc_cancel_partition_deactivation_request =
- xpc_cancel_partition_deactivation_request_uv;
-
- xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv;
- xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv;
-
- xpc_make_first_contact = xpc_make_first_contact_uv;
-
- xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv;
- xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv;
- xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv;
- xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv;
- xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv;
-
- xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv;
-
- xpc_setup_msg_structures = xpc_setup_msg_structures_uv;
- xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv;
-
- xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv;
- xpc_indicate_partition_disengaged =
- xpc_indicate_partition_disengaged_uv;
- xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv;
- xpc_partition_engaged = xpc_partition_engaged_uv;
- xpc_any_partition_engaged = xpc_any_partition_engaged_uv;
-
- xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv;
- xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv;
- xpc_send_payload = xpc_send_payload_uv;
- xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv;
- xpc_get_deliverable_payload = xpc_get_deliverable_payload_uv;
- xpc_received_payload = xpc_received_payload_uv;
+ xpc_arch_ops = xpc_arch_ops_uv;
if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",