From f03aa2d89ad600a1ed21a223f196776f217cfe00 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sat, 14 Jan 2006 03:10:22 +0100 Subject: [PATCH] drivers/net/arcnet/: possible cleanups This patch contains the following possible cleanups: - make needlessly global code static - arcnet.c: remove the unneeded EXPORT_SYMBOL(arc_proto_null) - arcnet.c: remove the unneeded EXPORT_SYMBOL(arcnet_dump_packet) To make Jeff happy, arcnet.c still prints arcnet: v3.93 BETA 2000/04/29 - by Avery Pennarun et al. Signed-off-by: Adrian Bunk Signed-off-by: Jeff Garzik --- include/linux/arcdevice.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'include') diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index 7198f129e13..231ba090ae3 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h @@ -206,7 +206,6 @@ struct ArcProto { extern struct ArcProto *arc_proto_map[256], *arc_proto_default, *arc_bcast_proto, *arc_raw_proto; -extern struct ArcProto arc_proto_null; /* @@ -334,17 +333,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); #define arcnet_dump_skb(dev,skb,desc) ; #endif -#if (ARCNET_DEBUG_MAX & D_RX) || (ARCNET_DEBUG_MAX & D_TX) -void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, - int take_arcnet_lock); -#else -#define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) ; -#endif - void arcnet_unregister_proto(struct ArcProto *proto); irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs); struct net_device *alloc_arcdev(char *name); -void arcnet_rx(struct net_device *dev, int bufnum); #endif /* __KERNEL__ */ #endif /* _LINUX_ARCDEVICE_H */ -- cgit v1.2.3 From d0412d967032b9e147bcbacc9ff0c0342636cf2d Mon Sep 17 00:00:00 2001 From: James Chapman Date: Fri, 27 Jan 2006 01:15:30 -0700 Subject: [PATCH] mv643xx_eth: use MII library for ethtool functions Use the common ethtool support functions of the MII library. Add generic MII ioctl handler. Add PHY parameter speed/duplex/negotiation initialization and modification. Signed-off-by: James Chapman Signed-off-by: Dale Farnsworth Signed-off-by: Jeff Garzik --- include/linux/mv643xx.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 0b08cd69220..7ffbeac7d2b 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h @@ -1214,6 +1214,7 @@ struct mv64xxx_i2c_pdata { #define MV643XX_ETH_FORCE_BP_MODE_NO_JAM 0 #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) +#define MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED (1<<9) #define MV643XX_ETH_FORCE_LINK_FAIL 0 #define MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL (1<<10) #define MV643XX_ETH_RETRANSMIT_16_ATTEMPTS 0 @@ -1243,6 +1244,8 @@ struct mv64xxx_i2c_pdata { #define MV643XX_ETH_SET_MII_SPEED_TO_10 0 #define MV643XX_ETH_SET_MII_SPEED_TO_100 (1<<24) +#define MV643XX_ETH_MAX_RX_PACKET_MASK (0x7<<17) + #define MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE \ MV643XX_ETH_DO_NOT_FORCE_LINK_PASS | \ MV643XX_ETH_ENABLE_AUTO_NEG_FOR_DUPLX | \ -- cgit v1.2.3 From 01999873a455fe9104e91820c72849e608239928 Mon Sep 17 00:00:00 2001 From: Dale Farnsworth Date: Fri, 27 Jan 2006 01:18:01 -0700 Subject: [PATCH] mv643xx_eth: Clean up platform_data configuration We shouldn't expose the hardware register contents in platform_data. The only things we allow the user to configure are autoneg, speed, and duplex. Add specific platform_data fields for these values and remove the registers configs. Signed-off-by: Dale Farnsworth Signed-off-by: Jeff Garzik --- include/linux/mv643xx.h | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 7ffbeac7d2b..955d3069d72 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h @@ -1288,23 +1288,15 @@ struct mv64xxx_i2c_pdata { #define MV643XX_ETH_NAME "mv643xx_eth" struct mv643xx_eth_platform_data { - /* - * Non-values for mac_addr, phy_addr, port_config, etc. - * override the default value. Setting the corresponding - * force_* field, causes the default value to be overridden - * even when zero. - */ - unsigned int force_phy_addr:1; - unsigned int force_port_config:1; - unsigned int force_port_config_extend:1; - unsigned int force_port_sdma_config:1; - unsigned int force_port_serial_control:1; - int phy_addr; char *mac_addr; /* pointer to mac address */ - u32 port_config; - u32 port_config_extend; - u32 port_sdma_config; - u32 port_serial_control; + u16 force_phy_addr; /* force override if phy_addr == 0 */ + u16 phy_addr; + + /* If speed is 0, then speed and duplex are autonegotiated. */ + int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */ + int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ + + /* non-zero values of the following fields override defaults */ u32 tx_queue_size; u32 rx_queue_size; u32 tx_sram_addr; -- cgit v1.2.3 From 24056bec086aaa99923b21c0e1a0e993bb1c7e2a Mon Sep 17 00:00:00 2001 From: Zhu Yi Date: Thu, 19 Jan 2006 16:21:09 +0800 Subject: [PATCH] ieee80211: Add LEAP authentication type Signed-off-by: Hong Liu Signed-off-by: Zhu Yi Signed-off-by: John W. Linville --- include/net/ieee80211.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index 9a92aef8b0b..3424a3ed66c 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h @@ -220,6 +220,7 @@ struct ieee80211_snap_hdr { /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 +#define WLAN_AUTH_LEAP 2 #define WLAN_AUTH_CHALLENGE_LEN 128 -- cgit v1.2.3 From 9184d9348a7a0e60d70d5f4c23de79fdbc72b9a3 Mon Sep 17 00:00:00 2001 From: Zhu Yi Date: Thu, 19 Jan 2006 16:22:32 +0800 Subject: [PATCH] ieee80211: Add TKIP crypt->build_iv This patch adds ieee80211 TKIP build_iv() method to support hardwares that can do TKIP encryption but relies on ieee80211 layer to build the IV. It also changes the build_iv() interface to return the key if possible after the IV is built (this is required by TKIP). Signed-off-by: Zhu Yi Signed-off-by: John W. Linville --- include/net/ieee80211_crypt.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h index cd82c3e998e..eb476414fd7 100644 --- a/include/net/ieee80211_crypt.h +++ b/include/net/ieee80211_crypt.h @@ -47,7 +47,8 @@ struct ieee80211_crypto_ops { /* deinitialize crypto context and free allocated private data */ void (*deinit) (void *priv); - int (*build_iv) (struct sk_buff * skb, int hdr_len, void *priv); + int (*build_iv) (struct sk_buff * skb, int hdr_len, + u8 *key, int keylen, void *priv); /* encrypt/decrypt return < 0 on error or >= 0 on success. The return * value from decrypt_mpdu is passed as the keyidx value for -- cgit v1.2.3 From b79e20b60997e427b701055a2c69fb0c1d507aa9 Mon Sep 17 00:00:00 2001 From: Zhu Yi Date: Thu, 19 Jan 2006 16:21:27 +0800 Subject: [PATCH] ieee80211: Add 802.11h data type and structures Add 802.11h data types and structure definitions to ieee80211.h. Signed-off-by: Hong Liu Signed-off-by: Zhu Yi Signed-off-by: John W. Linville --- include/net/ieee80211.h | 168 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 165 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index 3424a3ed66c..ff6ef9e1a69 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h @@ -300,6 +300,23 @@ enum ieee80211_reasoncode { WLAN_REASON_CIPHER_SUITE_REJECTED = 24, }; +/* Action categories - 802.11h */ +enum ieee80211_actioncategories { + WLAN_ACTION_SPECTRUM_MGMT = 0, + /* Reserved 1-127 */ + /* Error 128-255 */ +}; + +/* Action details - 802.11h */ +enum ieee80211_actiondetails { + WLAN_ACTION_CATEGORY_MEASURE_REQUEST = 0, + WLAN_ACTION_CATEGORY_MEASURE_REPORT = 1, + WLAN_ACTION_CATEGORY_TPC_REQUEST = 2, + WLAN_ACTION_CATEGORY_TPC_REPORT = 3, + WLAN_ACTION_CATEGORY_CHANNEL_SWITCH = 4, + /* 5 - 255 Reserved */ +}; + #define IEEE80211_STATMASK_SIGNAL (1<<0) #define IEEE80211_STATMASK_RSSI (1<<1) #define IEEE80211_STATMASK_NOISE (1<<2) @@ -378,6 +395,8 @@ struct ieee80211_rx_stats { u8 mask; u8 freq; u16 len; + u64 tsf; + u32 beacon_time; }; /* IEEE 802.11 requires that STA supports concurrent reception of at least @@ -609,6 +628,28 @@ struct ieee80211_auth { struct ieee80211_info_element info_element[0]; } __attribute__ ((packed)); +struct ieee80211_channel_switch { + u8 id; + u8 len; + u8 mode; + u8 channel; + u8 count; +} __attribute__ ((packed)); + +struct ieee80211_action { + struct ieee80211_hdr_3addr header; + u8 category; + u8 action; + union { + struct ieee80211_action_exchange { + u8 token; + struct ieee80211_info_element info_element[0]; + } exchange; + struct ieee80211_channel_switch channel_switch; + + } format; +} __attribute__ ((packed)); + struct ieee80211_disassoc { struct ieee80211_hdr_3addr header; __le16 reason; @@ -693,7 +734,15 @@ struct ieee80211_txb { /* QoS structure */ #define NETWORK_HAS_QOS_PARAMETERS (1<<3) #define NETWORK_HAS_QOS_INFORMATION (1<<4) -#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | NETWORK_HAS_QOS_INFORMATION) +#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \ + NETWORK_HAS_QOS_INFORMATION) + +/* 802.11h */ +#define NETWORK_HAS_POWER_CONSTRAINT (1<<5) +#define NETWORK_HAS_CSA (1<<6) +#define NETWORK_HAS_QUIET (1<<7) +#define NETWORK_HAS_IBSS_DFS (1<<8) +#define NETWORK_HAS_TPC_REPORT (1<<9) #define QOS_QUEUE_NUM 4 #define QOS_OUI_LEN 3 @@ -749,6 +798,91 @@ struct ieee80211_tim_parameters { /*******************************************************/ +enum { /* ieee80211_basic_report.map */ + IEEE80211_BASIC_MAP_BSS = (1 << 0), + IEEE80211_BASIC_MAP_OFDM = (1 << 1), + IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), + IEEE80211_BASIC_MAP_RADAR = (1 << 3), + IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), + /* Bits 5-7 are reserved */ + +}; +struct ieee80211_basic_report { + u8 channel; + __le64 start_time; + __le16 duration; + u8 map; +} __attribute__ ((packed)); + +enum { /* ieee80211_measurement_request.mode */ + /* Bit 0 is reserved */ + IEEE80211_MEASUREMENT_ENABLE = (1 << 1), + IEEE80211_MEASUREMENT_REQUEST = (1 << 2), + IEEE80211_MEASUREMENT_REPORT = (1 << 3), + /* Bits 4-7 are reserved */ +}; + +enum { + IEEE80211_REPORT_BASIC = 0, /* required */ + IEEE80211_REPORT_CCA = 1, /* optional */ + IEEE80211_REPORT_RPI = 2, /* optional */ + /* 3-255 reserved */ +}; + +struct ieee80211_measurement_params { + u8 channel; + __le64 start_time; + __le16 duration; +} __attribute__ ((packed)); + +struct ieee80211_measurement_request { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + struct ieee80211_measurement_params params[0]; +} __attribute__ ((packed)); + +struct ieee80211_measurement_report { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + union { + struct ieee80211_basic_report basic[0]; + } u; +} __attribute__ ((packed)); + +struct ieee80211_tpc_report { + u8 transmit_power; + u8 link_margin; +} __attribute__ ((packed)); + +struct ieee80211_channel_map { + u8 channel; + u8 map; +} __attribute__ ((packed)); + +struct ieee80211_ibss_dfs { + struct ieee80211_info_element ie; + u8 owner[ETH_ALEN]; + u8 recovery_interval; + struct ieee80211_channel_map channel_map[0]; +}; + +struct ieee80211_csa { + u8 mode; + u8 channel; + u8 count; +} __attribute__ ((packed)); + +struct ieee80211_quiet { + u8 count; + u8 period; + u8 duration; + u8 offset; +} __attribute__ ((packed)); + struct ieee80211_network { /* These entries are used to identify a unique network */ u8 bssid[ETH_ALEN]; @@ -768,7 +902,7 @@ struct ieee80211_network { u8 rates_ex_len; unsigned long last_scanned; u8 mode; - u8 flags; + u32 flags; u32 last_associate; u32 time_stamp[2]; u16 beacon_interval; @@ -780,6 +914,25 @@ struct ieee80211_network { u8 rsn_ie[MAX_WPA_IE_LEN]; size_t rsn_ie_len; struct ieee80211_tim_parameters tim; + + /* 802.11h info */ + + /* Power Constraint - mandatory if spctrm mgmt required */ + u8 power_constraint; + + /* TPC Report - mandatory if spctrm mgmt required */ + struct ieee80211_tpc_report tpc_report; + + /* IBSS DFS - mandatory if spctrm mgmt required and IBSS + * NOTE: This is variable length and so must be allocated dynamically */ + struct ieee80211_ibss_dfs *ibss_dfs; + + /* Channel Switch Announcement - optional if spctrm mgmt required */ + struct ieee80211_csa csa; + + /* Quiet - optional if spctrm mgmt required */ + struct ieee80211_quiet quiet; + struct list_head list; }; @@ -925,7 +1078,10 @@ struct ieee80211_device { int (*handle_auth) (struct net_device * dev, struct ieee80211_auth * auth); int (*handle_deauth) (struct net_device * dev, - struct ieee80211_auth * auth); + struct ieee80211_deauth * auth); + int (*handle_action) (struct net_device * dev, + struct ieee80211_action * action, + struct ieee80211_rx_stats * stats); int (*handle_disassoc) (struct net_device * dev, struct ieee80211_disassoc * assoc); int (*handle_beacon) (struct net_device * dev, @@ -1094,6 +1250,7 @@ extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, struct ieee80211_hdr_4addr *header, struct ieee80211_rx_stats *stats); +extern void ieee80211_network_reset(struct ieee80211_network *network); /* ieee80211_geo.c */ extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device @@ -1106,6 +1263,11 @@ extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee, extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel); extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); +extern u8 ieee80211_get_channel_flags(struct ieee80211_device *ieee, + u8 channel); +extern const struct ieee80211_channel *ieee80211_get_channel(struct + ieee80211_device + *ieee, u8 channel); /* ieee80211_wx.c */ extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, -- cgit v1.2.3 From dd5eeb461ea572f82d34e1f2c4b88037df5afedb Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Mon, 30 Jan 2006 13:12:50 +0100 Subject: [PATCH] ieee80211: common wx auth code This patch creates two functions ieee80211_wx_set_auth and ieee80211_wx_get_auth that can be used by drivers for the wireless extension handlers instead of writing their own, if the implementation should be software only. These patches enable using bcm43xx devices with WPA and this seems (as far as I can tell) to be the only difference between the stock ieee80211 and softmac's ieee80211 left. Signed-Off-By: Johannes Berg Signed-off-by: John W. Linville --- include/net/ieee80211.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include') diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index ff6ef9e1a69..4725ff861c5 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h @@ -1285,6 +1285,14 @@ extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra); +extern int ieee80211_wx_set_auth(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra); +extern int ieee80211_wx_get_auth(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra); static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) { -- cgit v1.2.3 From 8f903c708fcc2b579ebf16542bf6109bad593a1d Mon Sep 17 00:00:00 2001 From: Jay Vosburgh Date: Tue, 21 Feb 2006 16:36:44 -0800 Subject: [PATCH] bonding: suppress duplicate packets Originally submitted by Kenzo Iwami; his original description is: The current bonding driver receives duplicate packets when broadcast/ multicast packets are sent by other devices or packets are flooded by the switch. In this patch, new flags are added in priv_flags of net_device structure to let the bonding driver discard duplicate packets in dev.c:skb_bond(). Modified by Jay Vosburgh to change a define name, update some comments, rearrange the new skb_bond() for clarity, clear all bonding priv_flags on slave release, and update the driver version. Signed-off-by: Kenzo Iwami Signed-off-by: Jay Vosburgh Signed-off-by: Jeff Garzik --- include/linux/if.h | 3 +++ include/linux/if_ether.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/if.h b/include/linux/if.h index ce627d9092e..12c6f6d157c 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -52,6 +52,9 @@ /* Private (from user) interface flags (netdevice->priv_flags). */ #define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */ #define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */ +#define IFF_SLAVE_INACTIVE 0x4 /* bonding slave not the curr. active */ +#define IFF_MASTER_8023AD 0x8 /* bonding master, 802.3ad. */ +#define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 7a92c1ce145..ab08f35cbc3 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -61,6 +61,7 @@ #define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ #define ETH_P_IPX 0x8137 /* IPX over DIX */ #define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ +#define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */ #define ETH_P_WCCP 0x883E /* Web-cache coordination protocol * defined in draft-wilson-wrec-wccp-v2-00.txt */ #define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ -- cgit v1.2.3 From 12a057321529df2fb650ac5f34dfd7abcca667df Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Mar 2006 13:38:01 -0500 Subject: [PATCH] keep sync and async cfq_queue separate Signed-off-by: Al Viro --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 860e7a485a5..e19cb631084 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -58,7 +58,7 @@ struct cfq_io_context { * circular list of cfq_io_contexts belonging to a process io context */ struct list_head list; - struct cfq_queue *cfqq; + struct cfq_queue *cfqq[2]; void *key; struct io_context *ioc; -- cgit v1.2.3 From d9ff41879364cfca7c15abc20ae398e35de3f883 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Mar 2006 13:51:22 -0500 Subject: [PATCH] make cfq_exit_queue() prune the cfq_io_context for that queue Signed-off-by: Al Viro --- include/linux/blkdev.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e19cb631084..80518f70353 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -69,6 +69,8 @@ struct cfq_io_context { unsigned long ttime_samples; unsigned long ttime_mean; + struct list_head queue_list; + void (*dtor)(struct cfq_io_context *); void (*exit)(struct cfq_io_context *); }; -- cgit v1.2.3 From e17a9489b4a686bb5e9615e1d375c67619cb99c5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Mar 2006 13:21:20 -0500 Subject: [PATCH] stop elv_unregister() from rogering other iosched's data, fix locking Signed-off-by: Al Viro --- include/linux/elevator.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 18cf1f3e118..f65766ef053 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -48,6 +48,7 @@ struct elevator_ops elevator_init_fn *elevator_init_fn; elevator_exit_fn *elevator_exit_fn; + void (*trim)(struct io_context *); }; #define ELV_NAME_MAX (16) -- cgit v1.2.3 From 483f4afc421435b7cfe5e88f74eea0b73a476d75 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Mar 2006 18:34:37 -0500 Subject: [PATCH] fix sysfs interaction and lifetime rules handling for queues --- include/linux/blkdev.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 80518f70353..56bb6a4e15f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -406,8 +406,6 @@ struct request_queue struct blk_queue_tag *queue_tags; - atomic_t refcnt; - unsigned int nr_sorted; unsigned int in_flight; @@ -426,6 +424,8 @@ struct request_queue struct request pre_flush_rq, bar_rq, post_flush_rq; struct request *orig_bar_rq; unsigned int bi_size; + + struct mutex sysfs_lock; }; #define RQ_INACTIVE (-1) @@ -727,7 +727,7 @@ extern long nr_blockdev_pages(void); int blk_get_queue(request_queue_t *); request_queue_t *blk_alloc_queue(gfp_t); request_queue_t *blk_alloc_queue_node(gfp_t, int); -#define blk_put_queue(q) blk_cleanup_queue((q)) +extern void blk_put_queue(request_queue_t *); /* * tag stuff -- cgit v1.2.3 From 3d1ab40f4c20767afbd361b258a531d73e3e6fc2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Mar 2006 18:35:43 -0500 Subject: [PATCH] elevator_t lifetime rules and sysfs fixes --- include/linux/elevator.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index f65766ef053..4d0a80f13ee 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -61,7 +61,7 @@ struct elevator_type struct list_head list; struct elevator_ops ops; struct elevator_type *elevator_type; - struct kobj_type *elevator_ktype; + struct attribute **elevator_attrs; char elevator_name[ELV_NAME_MAX]; struct module *elevator_owner; }; @@ -75,6 +75,7 @@ struct elevator_queue void *elevator_data; struct kobject kobj; struct elevator_type *elevator_type; + struct mutex sysfs_lock; }; /* @@ -141,6 +142,12 @@ enum { ELV_MQUEUE_MUST, }; +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(elevator_t *, char *); + ssize_t (*store)(elevator_t *, const char *, size_t); +}; + #define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) #endif -- cgit v1.2.3 From e572ec7e4e432de7ecf7bd2e62117646fa64e518 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Mar 2006 22:27:18 -0500 Subject: [PATCH] fix rmmod problems with elevator attributes, clean them up --- include/linux/elevator.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 4d0a80f13ee..ad133fcfb23 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -53,6 +53,12 @@ struct elevator_ops #define ELV_NAME_MAX (16) +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(elevator_t *, char *); + ssize_t (*store)(elevator_t *, const char *, size_t); +}; + /* * identifies an elevator type, such as AS or deadline */ @@ -61,7 +67,7 @@ struct elevator_type struct list_head list; struct elevator_ops ops; struct elevator_type *elevator_type; - struct attribute **elevator_attrs; + struct elv_fs_entry *elevator_attrs; char elevator_name[ELV_NAME_MAX]; struct module *elevator_owner; }; @@ -142,12 +148,6 @@ enum { ELV_MQUEUE_MUST, }; -struct elv_fs_entry { - struct attribute attr; - ssize_t (*show)(elevator_t *, char *); - ssize_t (*store)(elevator_t *, const char *, size_t); -}; - #define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) #endif -- cgit v1.2.3 From 74bf4312fff083ab25c3f357cc653ada7995e5f6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:29:18 -0800 Subject: [SPARC64]: Move away from virtual page tables, part 1. We now use the TSB hardware assist features of the UltraSPARC MMUs. SMP is currently knowingly broken, we need to find another place to store the per-cpu base pointers. We hid them away in the TSB base register, and that obviously will not work any more :-) Another known broken case is non-8KB base page size. Also noticed that flush_tlb_all() is not referenced anywhere, only the internal __flush_tlb_all() (local cpu only) is used by the sparc64 port, so we can get rid of flush_tlb_all(). The kernel gets it's own 8KB TSB (swapper_tsb) and each address space gets it's own private 8K TSB. Later we can add code to dynamically increase the size of per-process TSB as the RSS grows. An 8KB TSB is good enough for up to about a 4MB RSS, after which the TSB starts to incur many capacity and conflict misses. We even accumulate OBP translations into the kernel TSB. Another area for refinement is large page size support. We could use a secondary address space TSB to handle those. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 1 + include/asm-sparc64/mmu_context.h | 46 +++-------- include/asm-sparc64/pgalloc.h | 1 + include/asm-sparc64/pgtable.h | 9 +-- include/asm-sparc64/processor.h | 14 +--- include/asm-sparc64/tlbflush.h | 25 ++++-- include/asm-sparc64/tsb.h | 165 ++++++++++++++++++++++++++++++++++++++ 7 files changed, 204 insertions(+), 57 deletions(-) create mode 100644 include/asm-sparc64/tsb.h (limited to 'include') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 8627eed6e83..36384cf7faa 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -92,6 +92,7 @@ typedef struct { unsigned long sparc64_ctx_val; + unsigned long *sparc64_tsb; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 57ee7b30618..34640a370ab 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -25,7 +25,13 @@ extern void get_new_mmu_context(struct mm_struct *mm); * This just needs to set mm->context to an invalid context. */ #define init_new_context(__tsk, __mm) \ - (((__mm)->context.sparc64_ctx_val = 0UL), 0) +({ unsigned long __pg = get_zeroed_page(GFP_KERNEL); \ + (__mm)->context.sparc64_ctx_val = 0UL; \ + (__mm)->context.sparc64_tsb = \ + (unsigned long *) __pg; \ + (__pg ? 0 : -ENOMEM); \ +}) + /* Destroy a dead context. This occurs when mmput drops the * mm_users count to zero, the mmaps have been released, and @@ -35,7 +41,8 @@ extern void get_new_mmu_context(struct mm_struct *mm); * this task if valid. */ #define destroy_context(__mm) \ -do { spin_lock(&ctx_alloc_lock); \ +do { free_page((unsigned long)(__mm)->context.sparc64_tsb); \ + spin_lock(&ctx_alloc_lock); \ if (CTX_VALID((__mm)->context)) { \ unsigned long nr = CTX_NRBITS((__mm)->context); \ mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \ @@ -43,35 +50,7 @@ do { spin_lock(&ctx_alloc_lock); \ spin_unlock(&ctx_alloc_lock); \ } while(0) -/* Reload the two core values used by TLB miss handler - * processing on sparc64. They are: - * 1) The physical address of mm->pgd, when full page - * table walks are necessary, this is where the - * search begins. - * 2) A "PGD cache". For 32-bit tasks only pgd[0] is - * ever used since that maps the entire low 4GB - * completely. To speed up TLB miss processing we - * make this value available to the handlers. This - * decreases the amount of memory traffic incurred. - */ -#define reload_tlbmiss_state(__tsk, __mm) \ -do { \ - register unsigned long paddr asm("o5"); \ - register unsigned long pgd_cache asm("o4"); \ - paddr = __pa((__mm)->pgd); \ - pgd_cache = 0UL; \ - if (task_thread_info(__tsk)->flags & _TIF_32BIT) \ - pgd_cache = get_pgd_cache((__mm)->pgd); \ - __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ - "mov %3, %%g4\n\t" \ - "mov %0, %%g7\n\t" \ - "stxa %1, [%%g4] %2\n\t" \ - "membar #Sync\n\t" \ - "wrpr %%g0, 0x096, %%pstate" \ - : /* no outputs */ \ - : "r" (paddr), "r" (pgd_cache),\ - "i" (ASI_DMMU), "i" (TSB_REG)); \ -} while(0) +extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb); /* Set MMU context in the actual hardware. */ #define load_secondary_context(__mm) \ @@ -101,7 +80,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str if (!ctx_valid || (old_mm != mm)) { load_secondary_context(mm); - reload_tlbmiss_state(tsk, mm); + tsb_context_switch(__pa(mm->pgd), + mm->context.sparc64_tsb); } /* Even if (mm == old_mm) we _must_ check @@ -139,7 +119,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); - reload_tlbmiss_state(current, mm); + tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb); } #endif /* !(__ASSEMBLY__) */ diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index a96067cca96..baf59c00ea4 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h @@ -61,6 +61,7 @@ static __inline__ void free_pgd_slow(pgd_t *pgd) free_page((unsigned long)pgd); } +/* XXX This crap can die, no longer using virtual page tables... */ #ifdef DCACHE_ALIASING_POSSIBLE #define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL) #define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL) diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index f0a9b44d3eb..f3ba1e05819 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -25,7 +25,8 @@ #include /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). - * The page copy blockops can use 0x2000000 to 0x10000000. + * The page copy blockops can use 0x2000000 to 0x4000000. + * The TSB is mapped in the 0x4000000 to 0x6000000 range. * The PROM resides in an area spanning 0xf0000000 to 0x100000000. * The vmalloc area spans 0x100000000 to 0x200000000. * Since modules need to be in the lowest 32-bits of the address space, @@ -34,6 +35,7 @@ * 0x400000000. */ #define TLBTEMP_BASE _AC(0x0000000002000000,UL) +#define TSBMAP_BASE _AC(0x0000000004000000,UL) #define MODULES_VADDR _AC(0x0000000010000000,UL) #define MODULES_LEN _AC(0x00000000e0000000,UL) #define MODULES_END _AC(0x00000000f0000000,UL) @@ -296,11 +298,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) -/* extract the pgd cache used for optimizing the tlb miss - * slow path when executing 32-bit compat processes - */ -#define get_pgd_cache(pgd) ((unsigned long) pgd_val(*pgd) << 11) - /* Find an entry in the second-level page table.. */ #define pmd_offset(pudp, address) \ ((pmd_t *) pud_page(*(pudp)) + \ diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index cd8d9b4c865..b3889f3f943 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -28,6 +28,8 @@ * User lives in his very own context, and cannot reference us. Note * that TASK_SIZE is a misnomer, it really gives maximum user virtual * address that the kernel will allocate out. + * + * XXX No longer using virtual page tables, kill this upper limit... */ #define VA_BITS 44 #ifndef __ASSEMBLY__ @@ -37,18 +39,6 @@ #endif #define TASK_SIZE ((unsigned long)-VPTE_SIZE) -/* - * The vpte base must be able to hold the entire vpte, half - * of which lives above, and half below, the base. And it - * is placed as close to the highest address range as possible. - */ -#define VPTE_BASE_SPITFIRE (-(VPTE_SIZE/2)) -#if 1 -#define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE -#else -#define VPTE_BASE_CHEETAH 0xffe0000000000000 -#endif - #ifndef __ASSEMBLY__ typedef struct { diff --git a/include/asm-sparc64/tlbflush.h b/include/asm-sparc64/tlbflush.h index 3ef9909ac3a..9ad5d9c51d4 100644 --- a/include/asm-sparc64/tlbflush.h +++ b/include/asm-sparc64/tlbflush.h @@ -5,6 +5,11 @@ #include #include +/* TSB flush operations. */ +struct mmu_gather; +extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); +extern void flush_tsb_user(struct mmu_gather *mp); + /* TLB flush operations. */ extern void flush_tlb_pending(void); @@ -14,28 +19,36 @@ extern void flush_tlb_pending(void); #define flush_tlb_page(vma,addr) flush_tlb_pending() #define flush_tlb_mm(mm) flush_tlb_pending() +/* Local cpu only. */ extern void __flush_tlb_all(void); + extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r); extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifndef CONFIG_SMP -#define flush_tlb_all() __flush_tlb_all() #define flush_tlb_kernel_range(start,end) \ - __flush_tlb_kernel_range(start,end) +do { flush_tsb_kernel_range(start,end); \ + __flush_tlb_kernel_range(start,end); \ +} while (0) #else /* CONFIG_SMP */ -extern void smp_flush_tlb_all(void); extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); -#define flush_tlb_all() smp_flush_tlb_all() #define flush_tlb_kernel_range(start, end) \ - smp_flush_tlb_kernel_range(start, end) +do { flush_tsb_kernel_range(start,end); \ + smp_flush_tlb_kernel_range(start, end); \ +} while (0) #endif /* ! CONFIG_SMP */ -extern void flush_tlb_pgtables(struct mm_struct *, unsigned long, unsigned long); +static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) +{ + /* We don't use virtual page tables for TLB miss processing + * any more. Nowadays we use the TSB. + */ +} #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h new file mode 100644 index 00000000000..03d272e0e47 --- /dev/null +++ b/include/asm-sparc64/tsb.h @@ -0,0 +1,165 @@ +#ifndef _SPARC64_TSB_H +#define _SPARC64_TSB_H + +/* The sparc64 TSB is similar to the powerpc hashtables. It's a + * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes + * pointers into this table for 8K and 64K page sizes, and also a + * comparison TAG based upon the virtual address and context which + * faults. + * + * TLB miss trap handler software does the actual lookup via something + * of the form: + * + * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1 + * ldxa [%g0] ASI_{D,I}MMU, %g6 + * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 + * cmp %g4, %g6 + * bne,pn %xcc, tsb_miss_{d,i}tlb + * mov FAULT_CODE_{D,I}TLB, %g3 + * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN + * retry + * + + * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte + * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu + * register which is: + * + * ------------------------------------------------- + * | - | CONTEXT | - | VADDR bits 63:22 | + * ------------------------------------------------- + * 63 61 60 48 47 42 41 0 + * + * Like the powerpc hashtables we need to use locking in order to + * synchronize while we update the entries. PTE updates need locking + * as well. + * + * We need to carefully choose a lock bits for the TSB entry. We + * choose to use bit 47 in the tag. Also, since we never map anything + * at page zero in context zero, we use zero as an invalid tag entry. + * When the lock bit is set, this forces a tag comparison failure. + * + * Currently, we allocate an 8K TSB per-process and we use it for both + * I-TLB and D-TLB misses. Perhaps at some point we'll add code that + * monitors the number of active pages in the process as we get + * major/minor faults, and grow the TSB in response. The only trick + * in implementing that is synchronizing the freeing of the old TSB + * wrt. parallel TSB updates occuring on other processors. On + * possible solution is to use RCU for the freeing of the TSB. + */ + +#define TSB_TAG_LOCK (1 << (47 - 32)) + +#define TSB_MEMBAR membar #StoreStore + +#define TSB_LOCK_TAG(TSB, REG1, REG2) \ +99: lduwa [TSB] ASI_N, REG1; \ + sethi %hi(TSB_TAG_LOCK), REG2;\ + andcc REG1, REG2, %g0; \ + bne,pn %icc, 99b; \ + nop; \ + casa [TSB] ASI_N, REG1, REG2;\ + cmp REG1, REG2; \ + bne,pn %icc, 99b; \ + nop; \ + TSB_MEMBAR + +#define TSB_WRITE(TSB, TTE, TAG) \ + stx TTE, [TSB + 0x08]; \ + TSB_MEMBAR; \ + stx TAG, [TSB + 0x00]; + + /* Do a kernel page table walk. Leaves physical PTE pointer in + * REG1. Jumps to FAIL_LABEL on early page table walk termination. + * VADDR will not be clobbered, but REG2 will. + */ +#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ + sethi %hi(swapper_pg_dir), REG1; \ + or REG1, %lo(swapper_pg_dir), REG1; \ + sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ + andn REG2, 0x3, REG2; \ + lduw [REG1 + REG2], REG1; \ + brz,pn REG1, FAIL_LABEL; \ + sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ + sllx REG1, 11, REG1; \ + andn REG2, 0x3, REG2; \ + lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + brz,pn REG1, FAIL_LABEL; \ + sllx VADDR, 64 - PMD_SHIFT, REG2; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ + sllx REG1, 11, REG1; \ + andn REG2, 0x7, REG2; \ + add REG1, REG2, REG1; + + /* Do a user page table walk in MMU globals. Leaves physical PTE + * pointer in REG1. Jumps to FAIL_LABEL on early page table walk + * termination. Physical base of page tables is in PHYS_PGD which + * will not be modified. + * + * VADDR will not be clobbered, but REG1 and REG2 will. + */ +#define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \ + sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ + andn REG2, 0x3, REG2; \ + lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ + brz,pn REG1, FAIL_LABEL; \ + sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ + sllx REG1, 11, REG1; \ + andn REG2, 0x3, REG2; \ + lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + brz,pn REG1, FAIL_LABEL; \ + sllx VADDR, 64 - PMD_SHIFT, REG2; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ + sllx REG1, 11, REG1; \ + andn REG2, 0x7, REG2; \ + add REG1, REG2, REG1; + +/* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0. + * If no entry is found, FAIL_LABEL will be branched to. On success + * the resulting PTE value will be left in REG1. VADDR is preserved + * by this routine. + */ +#define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \ + sethi %hi(prom_trans), REG1; \ + or REG1, %lo(prom_trans), REG1; \ +97: ldx [REG1 + 0x00], REG2; \ + brz,pn REG2, FAIL_LABEL; \ + nop; \ + ldx [REG1 + 0x08], REG3; \ + add REG2, REG3, REG3; \ + cmp REG2, VADDR; \ + bgu,pt %xcc, 98f; \ + cmp VADDR, REG3; \ + bgeu,pt %xcc, 98f; \ + ldx [REG1 + 0x10], REG3; \ + sub VADDR, REG2, REG2; \ + ba,pt %xcc, 99f; \ + add REG3, REG2, REG1; \ +98: ba,pt %xcc, 97b; \ + add REG1, (3 * 8), REG1; \ +99: + + /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL + * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries + * and the found TTE will be left in REG1. REG3 and REG4 must + * be an even/odd pair of registers. + * + * VADDR and TAG will be preserved and not clobbered by this macro. + */ + /* XXX non-8K base page size support... */ +#define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ + sethi %hi(swapper_tsb), REG1; \ + or REG1, %lo(swapper_tsb), REG1; \ + srlx VADDR, 13, REG2; \ + and REG2, (512 - 1), REG2; \ + sllx REG2, 4, REG2; \ + add REG1, REG2, REG2; \ + ldda [REG2] ASI_NUCLEUS_QUAD_LDD, REG3; \ + cmp REG3, TAG; \ + be,a,pt %xcc, OK_LABEL; \ + mov REG4, REG1; + +#endif /* !(_SPARC64_TSB_H) */ -- cgit v1.2.3 From 05e28f9de65a38bb0c769080e91b6976e7e1e70c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:30:13 -0800 Subject: [SPARC64]: No need to D-cache color page tables any longer. Unlike the virtual page tables, the new TSB scheme does not require this ugly hack. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 5 ++- include/asm-sparc64/pgalloc.h | 101 +++++++++++++++++++----------------------- 2 files changed, 49 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 74de79dca91..45a9a2cfaf7 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -20,8 +20,9 @@ typedef struct { /* Dcache line 2 */ unsigned int pgcache_size; unsigned int __pad1; - unsigned long *pte_cache[2]; + unsigned long *pte_cache; unsigned long *pgd_cache; + unsigned long __pad2; /* Dcache line 3, rarely used */ unsigned int dcache_size; @@ -30,8 +31,8 @@ typedef struct { unsigned int icache_line_size; unsigned int ecache_size; unsigned int ecache_line_size; - unsigned int __pad2; unsigned int __pad3; + unsigned int __pad4; } cpuinfo_sparc; DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index baf59c00ea4..ecea1bbdc11 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h @@ -19,16 +19,15 @@ #else extern struct pgtable_cache_struct { unsigned long *pgd_cache; - unsigned long *pte_cache[2]; + unsigned long *pte_cache; unsigned int pgcache_size; } pgt_quicklists; #endif #define pgd_quicklist (pgt_quicklists.pgd_cache) -#define pmd_quicklist ((unsigned long *)0) #define pte_quicklist (pgt_quicklists.pte_cache) #define pgtable_cache_size (pgt_quicklists.pgcache_size) -static __inline__ void free_pgd_fast(pgd_t *pgd) +static inline void free_pgd_fast(pgd_t *pgd) { preempt_disable(); *(unsigned long *)pgd = (unsigned long) pgd_quicklist; @@ -37,7 +36,7 @@ static __inline__ void free_pgd_fast(pgd_t *pgd) preempt_enable(); } -static __inline__ pgd_t *get_pgd_fast(void) +static inline pgd_t *get_pgd_fast(void) { unsigned long *ret; @@ -56,47 +55,35 @@ static __inline__ pgd_t *get_pgd_fast(void) return (pgd_t *)ret; } -static __inline__ void free_pgd_slow(pgd_t *pgd) +static inline void free_pgd_slow(pgd_t *pgd) { free_page((unsigned long)pgd); } -/* XXX This crap can die, no longer using virtual page tables... */ -#ifdef DCACHE_ALIASING_POSSIBLE -#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL) -#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL) -#else -#define VPTE_COLOR(address) 0 -#define DCACHE_COLOR(address) 0 -#endif - #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) -static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) +static inline pmd_t *pmd_alloc_one_fast(void) { unsigned long *ret; - int color = 0; preempt_disable(); - if (pte_quicklist[color] == NULL) - color = 1; - - if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { - pte_quicklist[color] = (unsigned long *)(*ret); + ret = (unsigned long *) pte_quicklist; + if (likely(ret)) { + pte_quicklist = (unsigned long *)(*ret); ret[0] = 0; pgtable_cache_size--; } preempt_enable(); - return (pmd_t *)ret; + return (pmd_t *) ret; } -static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { pmd_t *pmd; - pmd = pmd_alloc_one_fast(mm, address); - if (!pmd) { + pmd = pmd_alloc_one_fast(); + if (unlikely(!pmd)) { pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); if (pmd) memset(pmd, 0, PAGE_SIZE); @@ -104,18 +91,16 @@ static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addre return pmd; } -static __inline__ void free_pmd_fast(pmd_t *pmd) +static inline void free_pmd_fast(pmd_t *pmd) { - unsigned long color = DCACHE_COLOR((unsigned long)pmd); - preempt_disable(); - *(unsigned long *)pmd = (unsigned long) pte_quicklist[color]; - pte_quicklist[color] = (unsigned long *) pmd; + *(unsigned long *)pmd = (unsigned long) pte_quicklist; + pte_quicklist = (unsigned long *) pmd; pgtable_cache_size++; preempt_enable(); } -static __inline__ void free_pmd_slow(pmd_t *pmd) +static inline void free_pmd_slow(pmd_t *pmd) { free_page((unsigned long)pmd); } @@ -124,48 +109,54 @@ static __inline__ void free_pmd_slow(pmd_t *pmd) #define pmd_populate(MM,PMD,PTE_PAGE) \ pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) -extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); - -static inline struct page * -pte_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - pte_t *pte = pte_alloc_one_kernel(mm, addr); - - if (pte) - return virt_to_page(pte); - - return NULL; -} - -static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) +static inline pte_t *pte_alloc_one_fast(void) { - unsigned long color = VPTE_COLOR(address); unsigned long *ret; preempt_disable(); - if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { - pte_quicklist[color] = (unsigned long *)(*ret); + ret = (unsigned long *) pte_quicklist; + if (likely(ret)) { + pte_quicklist = (unsigned long *)(*ret); ret[0] = 0; pgtable_cache_size--; } preempt_enable(); - return (pte_t *)ret; + + return (pte_t *) ret; +} + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +{ + pte_t *ptep = pte_alloc_one_fast(); + + if (likely(ptep)) + return ptep; + + return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); } -static __inline__ void free_pte_fast(pte_t *pte) +static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr) { - unsigned long color = DCACHE_COLOR((unsigned long)pte); + pte_t *pte = pte_alloc_one_fast(); + if (likely(pte)) + return virt_to_page(pte); + + return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); +} + +static inline void free_pte_fast(pte_t *pte) +{ preempt_disable(); - *(unsigned long *)pte = (unsigned long) pte_quicklist[color]; - pte_quicklist[color] = (unsigned long *) pte; + *(unsigned long *)pte = (unsigned long) pte_quicklist; + pte_quicklist = (unsigned long *) pte; pgtable_cache_size++; preempt_enable(); } -static __inline__ void free_pte_slow(pte_t *pte) +static inline void free_pte_slow(pte_t *pte) { - free_page((unsigned long)pte); + free_page((unsigned long) pte); } static inline void pte_free_kernel(pte_t *pte) -- cgit v1.2.3 From 3c936465249f863f322154ff1aaa628b84ee5750 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:30:27 -0800 Subject: [SPARC64]: Kill pgtable quicklists and use SLAB. Taking a nod from the powerpc port. With the per-cpu caching of both the page allocator and SLAB, the pgtable quicklist scheme becomes relatively silly and primitive. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 9 +-- include/asm-sparc64/pgalloc.h | 158 ++++++++---------------------------------- include/asm-sparc64/pgtable.h | 7 +- 3 files changed, 29 insertions(+), 145 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 45a9a2cfaf7..f7c0faede8b 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -17,14 +17,7 @@ typedef struct { unsigned long clock_tick; /* %tick's per second */ unsigned long udelay_val; - /* Dcache line 2 */ - unsigned int pgcache_size; - unsigned int __pad1; - unsigned long *pte_cache; - unsigned long *pgd_cache; - unsigned long __pad2; - - /* Dcache line 3, rarely used */ + /* Dcache line 2, rarely used */ unsigned int dcache_size; unsigned int dcache_line_size; unsigned int icache_size; diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index ecea1bbdc11..12e4a273bd4 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -13,164 +14,59 @@ #include /* Page table allocation/freeing. */ -#ifdef CONFIG_SMP -/* Sliiiicck */ -#define pgt_quicklists local_cpu_data() -#else -extern struct pgtable_cache_struct { - unsigned long *pgd_cache; - unsigned long *pte_cache; - unsigned int pgcache_size; -} pgt_quicklists; -#endif -#define pgd_quicklist (pgt_quicklists.pgd_cache) -#define pte_quicklist (pgt_quicklists.pte_cache) -#define pgtable_cache_size (pgt_quicklists.pgcache_size) +extern kmem_cache_t *pgtable_cache; -static inline void free_pgd_fast(pgd_t *pgd) +static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - preempt_disable(); - *(unsigned long *)pgd = (unsigned long) pgd_quicklist; - pgd_quicklist = (unsigned long *) pgd; - pgtable_cache_size++; - preempt_enable(); + return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); } -static inline pgd_t *get_pgd_fast(void) +static inline void pgd_free(pgd_t *pgd) { - unsigned long *ret; - - preempt_disable(); - if((ret = pgd_quicklist) != NULL) { - pgd_quicklist = (unsigned long *)(*ret); - ret[0] = 0; - pgtable_cache_size--; - preempt_enable(); - } else { - preempt_enable(); - ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); - if(ret) - memset(ret, 0, PAGE_SIZE); - } - return (pgd_t *)ret; -} - -static inline void free_pgd_slow(pgd_t *pgd) -{ - free_page((unsigned long)pgd); + kmem_cache_free(pgtable_cache, pgd); } #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) -static inline pmd_t *pmd_alloc_one_fast(void) -{ - unsigned long *ret; - - preempt_disable(); - ret = (unsigned long *) pte_quicklist; - if (likely(ret)) { - pte_quicklist = (unsigned long *)(*ret); - ret[0] = 0; - pgtable_cache_size--; - } - preempt_enable(); - - return (pmd_t *) ret; -} - -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) -{ - pmd_t *pmd; - - pmd = pmd_alloc_one_fast(); - if (unlikely(!pmd)) { - pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (pmd) - memset(pmd, 0, PAGE_SIZE); - } - return pmd; -} - -static inline void free_pmd_fast(pmd_t *pmd) -{ - preempt_disable(); - *(unsigned long *)pmd = (unsigned long) pte_quicklist; - pte_quicklist = (unsigned long *) pmd; - pgtable_cache_size++; - preempt_enable(); -} - -static inline void free_pmd_slow(pmd_t *pmd) -{ - free_page((unsigned long)pmd); -} - -#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) -#define pmd_populate(MM,PMD,PTE_PAGE) \ - pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) - -static inline pte_t *pte_alloc_one_fast(void) +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - unsigned long *ret; - - preempt_disable(); - ret = (unsigned long *) pte_quicklist; - if (likely(ret)) { - pte_quicklist = (unsigned long *)(*ret); - ret[0] = 0; - pgtable_cache_size--; - } - preempt_enable(); - - return (pte_t *) ret; + return kmem_cache_alloc(pgtable_cache, + GFP_KERNEL|__GFP_REPEAT); } -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +static inline void pmd_free(pmd_t *pmd) { - pte_t *ptep = pte_alloc_one_fast(); - - if (likely(ptep)) - return ptep; - - return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + kmem_cache_free(pgtable_cache, pmd); } -static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) { - pte_t *pte = pte_alloc_one_fast(); - - if (likely(pte)) - return virt_to_page(pte); - - return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); + return kmem_cache_alloc(pgtable_cache, + GFP_KERNEL|__GFP_REPEAT); } -static inline void free_pte_fast(pte_t *pte) +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) { - preempt_disable(); - *(unsigned long *)pte = (unsigned long) pte_quicklist; - pte_quicklist = (unsigned long *) pte; - pgtable_cache_size++; - preempt_enable(); + return virt_to_page(pte_alloc_one_kernel(mm, address)); } - -static inline void free_pte_slow(pte_t *pte) -{ - free_page((unsigned long) pte); -} - + static inline void pte_free_kernel(pte_t *pte) { - free_pte_fast(pte); + kmem_cache_free(pgtable_cache, pte); } static inline void pte_free(struct page *ptepage) { - free_pte_fast(page_address(ptepage)); + pte_free_kernel(page_address(ptepage)); } -#define pmd_free(pmd) free_pmd_fast(pmd) -#define pgd_free(pgd) free_pgd_fast(pgd) -#define pgd_alloc(mm) get_pgd_fast() + +#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) +#define pmd_populate(MM,PMD,PTE_PAGE) \ + pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) + +#define check_pgt_cache() do { } while (0) #endif /* _SPARC64_PGALLOC_H */ diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index f3ba1e05819..77ba0b6cc1c 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -432,12 +432,7 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, unsigned long); #define HAVE_ARCH_FB_UNMAPPED_AREA -/* - * No page table caches to initialise - */ -#define pgtable_cache_init() do { } while (0) - -extern void check_pgt_cache(void); +extern void pgtable_cache_init(void); #endif /* !(__ASSEMBLY__) */ -- cgit v1.2.3 From 56fb4df6da76c35dca22036174e2d1edef83ff1f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 23:24:22 -0800 Subject: [SPARC64]: Elminate all usage of hard-coded trap globals. UltraSPARC has special sets of global registers which are switched to for certain trap types. There is one set for MMU related traps, one set of Interrupt Vector processing, and another set (called the Alternate globals) for all other trap types. For what seems like forever we've hard coded the values in some of these trap registers. Some examples include: 1) Interrupt Vector global %g6 holds current processors interrupt work struct where received interrupts are managed for IRQ handler dispatch. 2) MMU global %g7 holds the base of the page tables of the currently active address space. 3) Alternate global %g6 held the current_thread_info() value. Such hardcoding has resulted in some serious issues in many areas. There are some code sequences where having another register available would help clean up the implementation. Taking traps such as cross-calls from the OBP firmware requires some trick code sequences wherein we have to save away and restore all of the special sets of global registers when we enter/exit OBP. We were also using the IMMU TSB register on SMP to hold the per-cpu area base address, which doesn't work any longer now that we actually use the TSB facility of the cpu. The implementation is pretty straight forward. One tricky bit is getting the current processor ID as that is different on different cpu variants. We use a stub with a fancy calling convention which we patch at boot time. The calling convention is that the stub is branched to and the (PC - 4) to return to is in register %g1. The cpu number is left in %g6. This stub can be invoked by using the __GET_CPUID macro. We use an array of per-cpu trap state to store the current thread and physical address of the current address space's page tables. The TRAP_LOAD_THREAD_REG loads %g6 with the current thread from this table, it uses __GET_CPUID and also clobbers %g1. TRAP_LOAD_IRQ_WORK is used by the interrupt vector processing to load the current processor's IRQ software state into %g6. It also uses __GET_CPUID and clobbers %g1. Finally, TRAP_LOAD_PGD_PHYS loads the physical address base of the current address space's page tables into %g7, it clobbers %g1 and uses __GET_CPUID. Many refinements are possible, as well as some tuning, with this stuff in place. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 86 ++++++++++++++++++++++++++++++++++++++++++- include/asm-sparc64/system.h | 2 + include/asm-sparc64/ttable.h | 18 ++++----- 3 files changed, 95 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index f7c0faede8b..6c57cbb9a7d 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -1,12 +1,15 @@ /* cpudata.h: Per-cpu parameters. * - * Copyright (C) 2003, 2005 David S. Miller (davem@redhat.com) + * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net) */ #ifndef _SPARC64_CPUDATA_H #define _SPARC64_CPUDATA_H +#ifndef __ASSEMBLY__ + #include +#include typedef struct { /* Dcache line 1 */ @@ -32,4 +35,85 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) #define local_cpu_data() __get_cpu_var(__cpu_data) +/* Trap handling code needs to get at a few critical values upon + * trap entry and to process TSB misses. These cannot be in the + * per_cpu() area as we really need to lock them into the TLB and + * thus make them part of the main kernel image. As a result we + * try to make this as small as possible. + * + * This is padded out and aligned to 64-bytes to avoid false sharing + * on SMP. + */ + +/* If you modify the size of this structure, please update + * TRAP_BLOCK_SZ_SHIFT below. + */ +struct thread_info; +struct trap_per_cpu { +/* D-cache line 1 */ + struct thread_info *thread; + unsigned long pgd_paddr; + unsigned long __pad1[2]; + +/* D-cache line 2 */ + unsigned long __pad2[4]; +} __attribute__((aligned(64))); +extern struct trap_per_cpu trap_block[NR_CPUS]; +extern void init_cur_cpu_trap(void); +extern void per_cpu_patch(void); + +#endif /* !(__ASSEMBLY__) */ + +#define TRAP_PER_CPU_THREAD 0x00 +#define TRAP_PER_CPU_PGD_PADDR 0x08 + +#define TRAP_BLOCK_SZ_SHIFT 6 + +/* Clobbers %g1, loads %g6 with local processor's cpuid */ +#define __GET_CPUID \ + ba,pt %xcc, __get_cpu_id; \ + rd %pc, %g1; + +/* Clobbers %g1, current address space PGD phys address into %g7. */ +#define TRAP_LOAD_PGD_PHYS \ + __GET_CPUID \ + sllx %g6, TRAP_BLOCK_SZ_SHIFT, %g6; \ + sethi %hi(trap_block), %g7; \ + or %g7, %lo(trap_block), %g7; \ + add %g7, %g6, %g7; \ + ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7; + +/* Clobbers %g1, loads local processor's IRQ work area into %g6. */ +#define TRAP_LOAD_IRQ_WORK \ + __GET_CPUID \ + sethi %hi(__irq_work), %g1; \ + sllx %g6, 6, %g6; \ + or %g1, %lo(__irq_work), %g1; \ + add %g1, %g6, %g6; + +/* Clobbers %g1, loads %g6 with current thread info pointer. */ +#define TRAP_LOAD_THREAD_REG \ + __GET_CPUID \ + sllx %g6, TRAP_BLOCK_SZ_SHIFT, %g6; \ + sethi %hi(trap_block), %g1; \ + or %g1, %lo(trap_block), %g1; \ + ldx [%g1 + %g6], %g6; + +/* Given the current thread info pointer in %g6, load the per-cpu + * area base of the current processor into %g5. REG1 and REG2 are + * clobbered. + */ +#ifdef CONFIG_SMP +#define LOAD_PER_CPU_BASE(REG1, REG2) \ + ldub [%g6 + TI_CPU], REG1; \ + sethi %hi(__per_cpu_shift), %g5; \ + sethi %hi(__per_cpu_base), REG2; \ + ldx [%g5 + %lo(__per_cpu_shift)], %g5; \ + ldx [REG2 + %lo(__per_cpu_base)], REG2; \ + sllx REG1, %g5, %g5; \ + add %g5, REG2, %g5; +#else +#define LOAD_PER_CPU_BASE(REG1, REG2) +#endif + #endif /* _SPARC64_CPUDATA_H */ diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index af254e58183..26c0807af3e 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -209,6 +209,8 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ + trap_block[current_thread_info()->cpu].thread = \ + task_thread_info(next); \ __asm__ __volatile__( \ "mov %%g4, %%g7\n\t" \ "wrpr %%g0, 0x95, %%pstate\n\t" \ diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index 2784f80094c..f557db4faf8 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h @@ -109,14 +109,14 @@ nop;nop;nop; #define TRAP_UTRAP(handler,lvl) \ - ldx [%g6 + TI_UTRAPS], %g1; \ - sethi %hi(109f), %g7; \ - brz,pn %g1, utrap; \ - or %g7, %lo(109f), %g7; \ - ba,pt %xcc, utrap; \ -109: ldx [%g1 + handler*8], %g1; \ - ba,pt %xcc, utrap_ill; \ - mov lvl, %o1; + mov handler, %g3; \ + ba,pt %xcc, utrap_trap; \ + mov lvl, %g4; \ + nop; \ + nop; \ + nop; \ + nop; \ + nop; #ifdef CONFIG_SUNOS_EMUL #define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sunos_sys_table) @@ -136,8 +136,6 @@ #else #define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall) #endif -/* FIXME: Write these actually */ -#define NETBSD_SYSCALL_TRAP TRAP(netbsd_syscall) #define BREAKPOINT_TRAP TRAP(breakpoint_trap) #define TRAP_IRQ(routine, level) \ -- cgit v1.2.3 From 09f94287f7260e03bbeab497e743691fafcc22c3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:06 -0800 Subject: [SPARC64]: TSB refinements. Move {init_new,destroy}_context() out of line. Do not put huge pages into the TSB, only base page size translations. There are some clever things we could do here, but for now let's be correct instead of fancy. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu_context.h | 32 ++------------------------------ include/asm-sparc64/pgtable.h | 4 ++++ 2 files changed, 6 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 34640a370ab..0dffb4ce8a1 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -19,36 +19,8 @@ extern unsigned long tlb_context_cache; extern unsigned long mmu_context_bmap[]; extern void get_new_mmu_context(struct mm_struct *mm); - -/* Initialize a new mmu context. This is invoked when a new - * address space instance (unique or shared) is instantiated. - * This just needs to set mm->context to an invalid context. - */ -#define init_new_context(__tsk, __mm) \ -({ unsigned long __pg = get_zeroed_page(GFP_KERNEL); \ - (__mm)->context.sparc64_ctx_val = 0UL; \ - (__mm)->context.sparc64_tsb = \ - (unsigned long *) __pg; \ - (__pg ? 0 : -ENOMEM); \ -}) - - -/* Destroy a dead context. This occurs when mmput drops the - * mm_users count to zero, the mmaps have been released, and - * all the page tables have been flushed. Our job is to destroy - * any remaining processor-specific state, and in the sparc64 - * case this just means freeing up the mmu context ID held by - * this task if valid. - */ -#define destroy_context(__mm) \ -do { free_page((unsigned long)(__mm)->context.sparc64_tsb); \ - spin_lock(&ctx_alloc_lock); \ - if (CTX_VALID((__mm)->context)) { \ - unsigned long nr = CTX_NRBITS((__mm)->context); \ - mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \ - } \ - spin_unlock(&ctx_alloc_lock); \ -} while(0) +extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +extern void destroy_context(struct mm_struct *mm); extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb); diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 77ba0b6cc1c..2b2ecd6104d 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -116,6 +116,10 @@ #define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */ #define _PAGE_G _AC(0x0000000000000001,UL) /* Global */ +#define _PAGE_ALL_SZ_BITS \ + (_PAGE_SZ4MB | _PAGE_SZ512K | _PAGE_SZ64K | \ + _PAGE_SZ8K | _PAGE_SZ32MB | _PAGE_SZ256MB) + /* Here are the SpitFire software bits we use in the TTE's. * * WARNING: If you are going to try and start using some -- cgit v1.2.3 From 98c5584cfc47932c4f3ccf5eee2e0bae1447b85e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:20 -0800 Subject: [SPARC64]: Add infrastructure for dynamic TSB sizing. This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 13 ++++++++++++- include/asm-sparc64/mmu_context.h | 15 +++++++++++---- include/asm-sparc64/tsb.h | 2 +- 3 files changed, 24 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 36384cf7faa..2effeba2476 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -90,9 +90,20 @@ #ifndef __ASSEMBLY__ +#define TSB_ENTRY_ALIGNMENT 16 + +struct tsb { + unsigned long tag; + unsigned long pte; +} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); + typedef struct { unsigned long sparc64_ctx_val; - unsigned long *sparc64_tsb; + struct tsb *tsb; + unsigned long tsb_nentries; + unsigned long tsb_reg_val; + unsigned long tsb_map_vaddr; + unsigned long tsb_map_pte; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 0dffb4ce8a1..0a950f151d2 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -22,7 +22,15 @@ extern void get_new_mmu_context(struct mm_struct *mm); extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm); -extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb); +extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg, + unsigned long tsb_vaddr, unsigned long tsb_pte); + +static inline void tsb_context_switch(struct mm_struct *mm) +{ + __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val, + mm->context.tsb_map_vaddr, + mm->context.tsb_map_pte); +} /* Set MMU context in the actual hardware. */ #define load_secondary_context(__mm) \ @@ -52,8 +60,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str if (!ctx_valid || (old_mm != mm)) { load_secondary_context(mm); - tsb_context_switch(__pa(mm->pgd), - mm->context.sparc64_tsb); + tsb_context_switch(mm); } /* Even if (mm == old_mm) we _must_ check @@ -91,7 +98,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); - tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb); + tsb_context_switch(mm); } #endif /* !(__ASSEMBLY__) */ diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index 03d272e0e47..1f93b7d8cdb 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h @@ -19,7 +19,7 @@ * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN * retry * - + * * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu * register which is: -- cgit v1.2.3 From bd40791e1d289d807b8580abe1f117e9c62894e4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:38 -0800 Subject: [SPARC64]: Dynamically grow TSB in response to RSS growth. As the RSS grows, grow the TSB in order to reduce the likelyhood of hash collisions and thus poor hit rates in the TSB. This definitely needs some serious tuning. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 1 + include/asm-sparc64/mmu_context.h | 7 +++++++ 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 2effeba2476..76008ff6a90 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -100,6 +100,7 @@ struct tsb { typedef struct { unsigned long sparc64_ctx_val; struct tsb *tsb; + unsigned long tsb_rss_limit; unsigned long tsb_nentries; unsigned long tsb_reg_val; unsigned long tsb_map_vaddr; diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 0a950f151d2..1d232678821 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -32,6 +32,13 @@ static inline void tsb_context_switch(struct mm_struct *mm) mm->context.tsb_map_pte); } +extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags); +#ifdef CONFIG_SMP +extern void smp_tsb_sync(struct mm_struct *mm); +#else +#define smp_tsb_sync(__mm) do { } while (0) +#endif + /* Set MMU context in the actual hardware. */ #define load_secondary_context(__mm) \ __asm__ __volatile__("stxa %0, [%1] %2\n\t" \ -- cgit v1.2.3 From b70c0fa1613c4f69b4a340a0e2bee387560ebbb1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:32:04 -0800 Subject: [SPARC64]: Preload TSB entries from update_mmu_cache(). Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 76008ff6a90..18f98edfbcd 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -97,6 +97,8 @@ struct tsb { unsigned long pte; } __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); +extern void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte); + typedef struct { unsigned long sparc64_ctx_val; struct tsb *tsb; -- cgit v1.2.3 From 4753eb2ac7022b999e5e484f1a5dc001dba22bd3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:32:44 -0800 Subject: [SPARC64]: Fix incorrect TSB lock bit handling. The TSB_LOCK_BIT define is actually a special value shifted down by 32-bits for the assembler code macros. In C code, this isn't what we want. Signed-off-by: David S. Miller --- include/asm-sparc64/tsb.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index 1f93b7d8cdb..09ab3aaa8d2 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h @@ -47,13 +47,14 @@ * possible solution is to use RCU for the freeing of the TSB. */ -#define TSB_TAG_LOCK (1 << (47 - 32)) +#define TSB_TAG_LOCK_BIT 47 +#define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32)) #define TSB_MEMBAR membar #StoreStore #define TSB_LOCK_TAG(TSB, REG1, REG2) \ 99: lduwa [TSB] ASI_N, REG1; \ - sethi %hi(TSB_TAG_LOCK), REG2;\ + sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ andcc REG1, REG2, %g0; \ bne,pn %icc, 99b; \ nop; \ -- cgit v1.2.3 From a8b900d801697609d1b56cc9c110148c64678068 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:37 -0800 Subject: [SPARC64]: Kill sole argument passed to setup_tba(). No longer used, and move extern declaration to a header file. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 6c57cbb9a7d..16d62891383 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -61,6 +61,7 @@ struct trap_per_cpu { extern struct trap_per_cpu trap_block[NR_CPUS]; extern void init_cur_cpu_trap(void); extern void per_cpu_patch(void); +extern void setup_tba(void); #endif /* !(__ASSEMBLY__) */ -- cgit v1.2.3 From 2f7ee7c63f08b7f883b710a29d91c1891b81b8e1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:49 -0800 Subject: [SPARC64]: Increase swapper_tsb size to 32K. Signed-off-by: David S. Miller --- include/asm-sparc64/tsb.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index 09ab3aaa8d2..1addd91d720 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h @@ -143,6 +143,14 @@ add REG1, (3 * 8), REG1; \ 99: + /* We use a 32K TSB for the whole kernel, this allows to + * handle about 16MB of modules and vmalloc mappings without + * incurring many hash conflicts. + */ +#define KERNEL_TSB_SIZE_BYTES (32 * 1024) +#define KERNEL_TSB_NENTRIES \ + (KERNEL_TSB_SIZE_BYTES / 16) + /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries * and the found TTE will be left in REG1. REG3 and REG4 must @@ -150,12 +158,11 @@ * * VADDR and TAG will be preserved and not clobbered by this macro. */ - /* XXX non-8K base page size support... */ #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ sethi %hi(swapper_tsb), REG1; \ or REG1, %lo(swapper_tsb), REG1; \ - srlx VADDR, 13, REG2; \ - and REG2, (512 - 1), REG2; \ + srlx VADDR, PAGE_SHIFT, REG2; \ + and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ sllx REG2, 4, REG2; \ add REG1, REG2, REG2; \ ldda [REG2] ASI_NUCLEUS_QUAD_LDD, REG3; \ -- cgit v1.2.3 From 86b818687d4894063ecd1190e54717a0cce8c009 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:34:51 -0800 Subject: [SPARC64]: Fix race in LOAD_PER_CPU_BASE() Since we use %g5 itself as a temporary, it can get clobbered if we take an interrupt mid-stream and thus cause end up with the final %g5 value too early as a result of rtrap processing. Set %g5 at the very end, atomically, to avoid this problem. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 16d62891383..f83768883e9 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -101,20 +101,25 @@ extern void setup_tba(void); ldx [%g1 + %g6], %g6; /* Given the current thread info pointer in %g6, load the per-cpu - * area base of the current processor into %g5. REG1 and REG2 are + * area base of the current processor into %g5. REG1, REG2, and REG3 are * clobbered. + * + * You absolutely cannot use %g5 as a temporary in this code. The + * reason is that traps can happen during execution, and return from + * trap will load the fully resolved %g5 per-cpu base. This can corrupt + * the calculations done by the macro mid-stream. */ #ifdef CONFIG_SMP -#define LOAD_PER_CPU_BASE(REG1, REG2) \ +#define LOAD_PER_CPU_BASE(REG1, REG2, REG3) \ ldub [%g6 + TI_CPU], REG1; \ - sethi %hi(__per_cpu_shift), %g5; \ + sethi %hi(__per_cpu_shift), REG3; \ sethi %hi(__per_cpu_base), REG2; \ - ldx [%g5 + %lo(__per_cpu_shift)], %g5; \ + ldx [REG3 + %lo(__per_cpu_shift)], REG3; \ ldx [REG2 + %lo(__per_cpu_base)], REG2; \ - sllx REG1, %g5, %g5; \ - add %g5, REG2, %g5; + sllx REG1, REG3, REG3; \ + add REG3, REG2, %g5; #else -#define LOAD_PER_CPU_BASE(REG1, REG2) +#define LOAD_PER_CPU_BASE(REG1, REG2, REG3) #endif #endif /* _SPARC64_CPUDATA_H */ -- cgit v1.2.3 From b0fd4e49aea8a460afab7bc67cd618e2d19291d4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 23:13:29 -0800 Subject: [SPARC64]: Kill out-of-date commentary in asm-sparc64/tsb.h Signed-off-by: David S. Miller --- include/asm-sparc64/tsb.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index 1addd91d720..f384565212f 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h @@ -37,14 +37,6 @@ * choose to use bit 47 in the tag. Also, since we never map anything * at page zero in context zero, we use zero as an invalid tag entry. * When the lock bit is set, this forces a tag comparison failure. - * - * Currently, we allocate an 8K TSB per-process and we use it for both - * I-TLB and D-TLB misses. Perhaps at some point we'll add code that - * monitors the number of active pages in the process as we get - * major/minor faults, and grow the TSB in response. The only trick - * in implementing that is synchronizing the freeing of the old TSB - * wrt. parallel TSB updates occuring on other processors. On - * possible solution is to use RCU for the freeing of the TSB. */ #define TSB_TAG_LOCK_BIT 47 -- cgit v1.2.3 From 517af33237ecfc3c8a93b335365fa61e741ceca4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 1 Feb 2006 15:55:21 -0800 Subject: [SPARC64]: Access TSB with physical addresses when possible. This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 3 +- include/asm-sparc64/tsb.h | 94 ++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 91 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 18f98edfbcd..55e622711b9 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -97,7 +97,8 @@ struct tsb { unsigned long pte; } __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); -extern void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte); +extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte); +extern void tsb_flush(unsigned long ent, unsigned long tag); typedef struct { unsigned long sparc64_ctx_val; diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index f384565212f..44709cde561 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h @@ -44,7 +44,89 @@ #define TSB_MEMBAR membar #StoreStore +/* Some cpus support physical address quad loads. We want to use + * those if possible so we don't need to hard-lock the TSB mapping + * into the TLB. We encode some instruction patching in order to + * support this. + * + * The kernel TSB is locked into the TLB by virtue of being in the + * kernel image, so we don't play these games for swapper_tsb access. + */ +#ifndef __ASSEMBLY__ +struct tsb_phys_patch_entry { + unsigned int addr; + unsigned int insn; +}; +extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; +#endif +#define TSB_LOAD_QUAD(TSB, REG) \ +661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \ + .section .tsb_phys_patch, "ax"; \ + .word 661b; \ + ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \ + .previous + +#define TSB_LOAD_TAG_HIGH(TSB, REG) \ +661: lduwa [TSB] ASI_N, REG; \ + .section .tsb_phys_patch, "ax"; \ + .word 661b; \ + lduwa [TSB] ASI_PHYS_USE_EC, REG; \ + .previous + +#define TSB_LOAD_TAG(TSB, REG) \ +661: ldxa [TSB] ASI_N, REG; \ + .section .tsb_phys_patch, "ax"; \ + .word 661b; \ + ldxa [TSB] ASI_PHYS_USE_EC, REG; \ + .previous + +#define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \ +661: casa [TSB] ASI_N, REG1, REG2; \ + .section .tsb_phys_patch, "ax"; \ + .word 661b; \ + casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \ + .previous + +#define TSB_CAS_TAG(TSB, REG1, REG2) \ +661: casxa [TSB] ASI_N, REG1, REG2; \ + .section .tsb_phys_patch, "ax"; \ + .word 661b; \ + casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \ + .previous + +#define TSB_STORE(ADDR, VAL) \ +661: stxa VAL, [ADDR] ASI_N; \ + .section .tsb_phys_patch, "ax"; \ + .word 661b; \ + stxa VAL, [ADDR] ASI_PHYS_USE_EC; \ + .previous + #define TSB_LOCK_TAG(TSB, REG1, REG2) \ +99: TSB_LOAD_TAG_HIGH(TSB, REG1); \ + sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ + andcc REG1, REG2, %g0; \ + bne,pn %icc, 99b; \ + nop; \ + TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \ + cmp REG1, REG2; \ + bne,pn %icc, 99b; \ + nop; \ + TSB_MEMBAR + +#define TSB_WRITE(TSB, TTE, TAG) \ + add TSB, 0x8, TSB; \ + TSB_STORE(TSB, TTE); \ + sub TSB, 0x8, TSB; \ + TSB_MEMBAR; \ + TSB_STORE(TSB, TAG); + +#define KTSB_LOAD_QUAD(TSB, REG) \ + ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; + +#define KTSB_STORE(ADDR, VAL) \ + stxa VAL, [ADDR] ASI_N; + +#define KTSB_LOCK_TAG(TSB, REG1, REG2) \ 99: lduwa [TSB] ASI_N, REG1; \ sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ andcc REG1, REG2, %g0; \ @@ -56,10 +138,12 @@ nop; \ TSB_MEMBAR -#define TSB_WRITE(TSB, TTE, TAG) \ - stx TTE, [TSB + 0x08]; \ - TSB_MEMBAR; \ - stx TAG, [TSB + 0x00]; +#define KTSB_WRITE(TSB, TTE, TAG) \ + add TSB, 0x8, TSB; \ + stxa TTE, [TSB] ASI_N; \ + sub TSB, 0x8, TSB; \ + TSB_MEMBAR; \ + stxa TAG, [TSB] ASI_N; /* Do a kernel page table walk. Leaves physical PTE pointer in * REG1. Jumps to FAIL_LABEL on early page table walk termination. @@ -157,7 +241,7 @@ and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ sllx REG2, 4, REG2; \ add REG1, REG2, REG2; \ - ldda [REG2] ASI_NUCLEUS_QUAD_LDD, REG3; \ + KTSB_LOAD_QUAD(REG2, REG3); \ cmp REG3, TAG; \ be,a,pt %xcc, OK_LABEL; \ mov REG4, REG1; -- cgit v1.2.3 From 7bec08e38a7d0f088994f6eec9b6374652ea71fb Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Feb 2006 01:20:18 -0800 Subject: [SPARC64]: Correctable ECC errors cannot occur at trap level > 0. The are distrupting, which by the sparc v9 definition means they can only occur when interrupts are enabled in the %pstate register. This never occurs in any of the trap handling code running at trap levels > 0. So just mark it as an unexpected trap. This allows us to kill off the cee_stuff member of struct thread_info. Signed-off-by: David S. Miller --- include/asm-sparc64/thread_info.h | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index ac9d068aab4..2ebf7f27bf9 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h @@ -64,8 +64,6 @@ struct thread_info { __u64 kernel_cntd0, kernel_cntd1; __u64 pcr_reg; - __u64 cee_stuff; - struct restart_block restart_block; struct pt_regs *kern_una_regs; @@ -104,10 +102,9 @@ struct thread_info { #define TI_KERN_CNTD0 0x00000480 #define TI_KERN_CNTD1 0x00000488 #define TI_PCR 0x00000490 -#define TI_CEE_STUFF 0x00000498 -#define TI_RESTART_BLOCK 0x000004a0 -#define TI_KUNA_REGS 0x000004c8 -#define TI_KUNA_INSN 0x000004d0 +#define TI_RESTART_BLOCK 0x00000498 +#define TI_KUNA_REGS 0x000004c0 +#define TI_KUNA_INSN 0x000004c8 #define TI_FPREGS 0x00000500 /* We embed this in the uppermost byte of thread_info->flags */ -- cgit v1.2.3 From 92704a1c63c3b481870d02636d0b5a70c7e21cd1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 23:27:19 -0800 Subject: [SPARC64]: Refine code sequences to get the cpu id. On uniprocessor, it's always zero for optimize that. On SMP, the jmpl to the stub kills the return address stack in the cpu branch prediction logic, so expand the code sequence inline and use a code patching section to fix things up. This also always better and explicit register selection, which will be taken advantage of in a future changeset. The hard_smp_processor_id() function is big, so do not inline it. Fix up tests for Jalapeno to also test for Serrano chips too. These tests want "jbus Ultra-IIIi" cases to match, so that is what we should test for. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 89 +++++++++++++++++++++++++++++++++---------- include/asm-sparc64/head.h | 1 + include/asm-sparc64/smp.h | 28 +------------- 3 files changed, 71 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index f83768883e9..da54b4f3540 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -60,9 +60,18 @@ struct trap_per_cpu { } __attribute__((aligned(64))); extern struct trap_per_cpu trap_block[NR_CPUS]; extern void init_cur_cpu_trap(void); -extern void per_cpu_patch(void); extern void setup_tba(void); +#ifdef CONFIG_SMP +struct cpuid_patch_entry { + unsigned int addr; + unsigned int cheetah_safari[4]; + unsigned int cheetah_jbus[4]; + unsigned int starfire[4]; +}; +extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; +#endif + #endif /* !(__ASSEMBLY__) */ #define TRAP_PER_CPU_THREAD 0x00 @@ -70,35 +79,58 @@ extern void setup_tba(void); #define TRAP_BLOCK_SZ_SHIFT 6 -/* Clobbers %g1, loads %g6 with local processor's cpuid */ -#define __GET_CPUID \ - ba,pt %xcc, __get_cpu_id; \ - rd %pc, %g1; +#ifdef CONFIG_SMP + +#define __GET_CPUID(REG) \ + /* Spitfire implementation (default). */ \ +661: ldxa [%g0] ASI_UPA_CONFIG, REG; \ + srlx REG, 17, REG; \ + and REG, 0x1f, REG; \ + nop; \ + .section .cpuid_patch, "ax"; \ + /* Instruction location. */ \ + .word 661b; \ + /* Cheetah Safari implementation. */ \ + ldxa [%g0] ASI_SAFARI_CONFIG, REG; \ + srlx REG, 17, REG; \ + and REG, 0x3ff, REG; \ + nop; \ + /* Cheetah JBUS implementation. */ \ + ldxa [%g0] ASI_JBUS_CONFIG, REG; \ + srlx REG, 17, REG; \ + and REG, 0x1f, REG; \ + nop; \ + /* Starfire implementation. */ \ + sethi %hi(0x1fff40000d0 >> 9), REG; \ + sllx REG, 9, REG; \ + or REG, 0xd0, REG; \ + lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ + .previous; /* Clobbers %g1, current address space PGD phys address into %g7. */ #define TRAP_LOAD_PGD_PHYS \ - __GET_CPUID \ - sllx %g6, TRAP_BLOCK_SZ_SHIFT, %g6; \ + __GET_CPUID(%g1) \ sethi %hi(trap_block), %g7; \ + sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1; \ or %g7, %lo(trap_block), %g7; \ - add %g7, %g6, %g7; \ + add %g7, %g1, %g7; \ ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7; /* Clobbers %g1, loads local processor's IRQ work area into %g6. */ #define TRAP_LOAD_IRQ_WORK \ - __GET_CPUID \ - sethi %hi(__irq_work), %g1; \ - sllx %g6, 6, %g6; \ - or %g1, %lo(__irq_work), %g1; \ - add %g1, %g6, %g6; + __GET_CPUID(%g1) \ + sethi %hi(__irq_work), %g6; \ + sllx %g1, 6, %g1; \ + or %g6, %lo(__irq_work), %g6; \ + add %g6, %g1, %g6; /* Clobbers %g1, loads %g6 with current thread info pointer. */ #define TRAP_LOAD_THREAD_REG \ - __GET_CPUID \ - sllx %g6, TRAP_BLOCK_SZ_SHIFT, %g6; \ - sethi %hi(trap_block), %g1; \ - or %g1, %lo(trap_block), %g1; \ - ldx [%g1 + %g6], %g6; + __GET_CPUID(%g1) \ + sethi %hi(trap_block), %g6; \ + sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1; \ + or %g6, %lo(trap_block), %g6; \ + ldx [%g6 + %g1], %g6; /* Given the current thread info pointer in %g6, load the per-cpu * area base of the current processor into %g5. REG1, REG2, and REG3 are @@ -109,7 +141,6 @@ extern void setup_tba(void); * trap will load the fully resolved %g5 per-cpu base. This can corrupt * the calculations done by the macro mid-stream. */ -#ifdef CONFIG_SMP #define LOAD_PER_CPU_BASE(REG1, REG2, REG3) \ ldub [%g6 + TI_CPU], REG1; \ sethi %hi(__per_cpu_shift), REG3; \ @@ -118,8 +149,26 @@ extern void setup_tba(void); ldx [REG2 + %lo(__per_cpu_base)], REG2; \ sllx REG1, REG3, REG3; \ add REG3, REG2, %g5; + #else + +/* Uniprocessor versions, we know the cpuid is zero. */ +#define TRAP_LOAD_PGD_PHYS \ + sethi %hi(trap_block), %g7; \ + or %g7, %lo(trap_block), %g7; \ + ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7; + +#define TRAP_LOAD_IRQ_WORK \ + sethi %hi(__irq_work), %g6; \ + or %g6, %lo(__irq_work), %g6; + +#define TRAP_LOAD_THREAD_REG \ + sethi %hi(trap_block), %g6; \ + ldx [%g6 + %lo(trap_block)], %g6; + +/* No per-cpu areas on uniprocessor, so no need to load %g5. */ #define LOAD_PER_CPU_BASE(REG1, REG2, REG3) -#endif + +#endif /* !(CONFIG_SMP) */ #endif /* _SPARC64_CPUDATA_H */ diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index 0abd3a674e8..731c842f3d1 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h @@ -10,6 +10,7 @@ #define __CHEETAH_ID 0x003e0014 #define __JALAPENO_ID 0x003e0016 +#define __SERRANO_ID 0x003e0022 #define CHEETAH_MANUF 0x003e #define CHEETAH_IMPL 0x0014 /* Ultra-III */ diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index 473edb2603e..ad1d35a7d13 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -37,33 +37,7 @@ extern cpumask_t phys_cpu_present_map; * General functions that each host system must provide. */ -static __inline__ int hard_smp_processor_id(void) -{ - if (tlb_type == cheetah || tlb_type == cheetah_plus) { - unsigned long cfg, ver; - __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); - if ((ver >> 32) == 0x003e0016) { - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=r" (cfg) - : "i" (ASI_JBUS_CONFIG)); - return ((cfg >> 17) & 0x1f); - } else { - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=r" (cfg) - : "i" (ASI_SAFARI_CONFIG)); - return ((cfg >> 17) & 0x3ff); - } - } else if (this_is_starfire != 0) { - return starfire_hard_smp_processor_id(); - } else { - unsigned long upaconfig; - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=r" (upaconfig) - : "i" (ASI_UPA_CONFIG)); - return ((upaconfig >> 17) & 0x1f); - } -} - +extern int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) extern void smp_setup_cpu_possible_map(void); -- cgit v1.2.3 From ffe483d55229fadbaf4cc7316d47024a24ecd1a2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Feb 2006 21:55:10 -0800 Subject: [SPARC64]: Add explicit register args to trap state loading macros. This, as well as making the code cleaner, allows a simplification in the TSB miss handling path. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 88 +++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index da54b4f3540..c15514f82c3 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -107,67 +107,67 @@ extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ .previous; -/* Clobbers %g1, current address space PGD phys address into %g7. */ -#define TRAP_LOAD_PGD_PHYS \ - __GET_CPUID(%g1) \ - sethi %hi(trap_block), %g7; \ - sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1; \ - or %g7, %lo(trap_block), %g7; \ - add %g7, %g1, %g7; \ - ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7; - -/* Clobbers %g1, loads local processor's IRQ work area into %g6. */ -#define TRAP_LOAD_IRQ_WORK \ - __GET_CPUID(%g1) \ - sethi %hi(__irq_work), %g6; \ - sllx %g1, 6, %g1; \ - or %g6, %lo(__irq_work), %g6; \ - add %g6, %g1, %g6; - -/* Clobbers %g1, loads %g6 with current thread info pointer. */ -#define TRAP_LOAD_THREAD_REG \ - __GET_CPUID(%g1) \ - sethi %hi(trap_block), %g6; \ - sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1; \ - or %g6, %lo(trap_block), %g6; \ - ldx [%g6 + %g1], %g6; - -/* Given the current thread info pointer in %g6, load the per-cpu - * area base of the current processor into %g5. REG1, REG2, and REG3 are +/* Clobbers TMP, current address space PGD phys address into DEST. */ +#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ + __GET_CPUID(TMP) \ + sethi %hi(trap_block), DEST; \ + sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ + or DEST, %lo(trap_block), DEST; \ + add DEST, TMP, DEST; \ + ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; + +/* Clobbers TMP, loads local processor's IRQ work area into DEST. */ +#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ + __GET_CPUID(TMP) \ + sethi %hi(__irq_work), DEST; \ + sllx TMP, 6, TMP; \ + or DEST, %lo(__irq_work), DEST; \ + add DEST, TMP, DEST; + +/* Clobbers TMP, loads DEST with current thread info pointer. */ +#define TRAP_LOAD_THREAD_REG(DEST, TMP) \ + __GET_CPUID(TMP) \ + sethi %hi(trap_block), DEST; \ + sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ + or DEST, %lo(trap_block), DEST; \ + ldx [DEST + TMP], DEST; + +/* Given the current thread info pointer in THR, load the per-cpu + * area base of the current processor into DEST. REG1, REG2, and REG3 are * clobbered. * - * You absolutely cannot use %g5 as a temporary in this code. The + * You absolutely cannot use DEST as a temporary in this code. The * reason is that traps can happen during execution, and return from - * trap will load the fully resolved %g5 per-cpu base. This can corrupt + * trap will load the fully resolved DEST per-cpu base. This can corrupt * the calculations done by the macro mid-stream. */ -#define LOAD_PER_CPU_BASE(REG1, REG2, REG3) \ - ldub [%g6 + TI_CPU], REG1; \ +#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ + ldub [THR + TI_CPU], REG1; \ sethi %hi(__per_cpu_shift), REG3; \ sethi %hi(__per_cpu_base), REG2; \ ldx [REG3 + %lo(__per_cpu_shift)], REG3; \ ldx [REG2 + %lo(__per_cpu_base)], REG2; \ sllx REG1, REG3, REG3; \ - add REG3, REG2, %g5; + add REG3, REG2, DEST; #else /* Uniprocessor versions, we know the cpuid is zero. */ -#define TRAP_LOAD_PGD_PHYS \ - sethi %hi(trap_block), %g7; \ - or %g7, %lo(trap_block), %g7; \ - ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7; +#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ + sethi %hi(trap_block), DEST; \ + or DEST, %lo(trap_block), DEST; \ + ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; -#define TRAP_LOAD_IRQ_WORK \ - sethi %hi(__irq_work), %g6; \ - or %g6, %lo(__irq_work), %g6; +#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ + sethi %hi(__irq_work), DEST; \ + or DEST, %lo(__irq_work), DEST; -#define TRAP_LOAD_THREAD_REG \ - sethi %hi(trap_block), %g6; \ - ldx [%g6 + %lo(trap_block)], %g6; +#define TRAP_LOAD_THREAD_REG(DEST, TMP) \ + sethi %hi(trap_block), DEST; \ + ldx [DEST + %lo(trap_block)], DEST; -/* No per-cpu areas on uniprocessor, so no need to load %g5. */ -#define LOAD_PER_CPU_BASE(REG1, REG2, REG3) +/* No per-cpu areas on uniprocessor, so no need to load DEST. */ +#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) #endif /* !(CONFIG_SMP) */ -- cgit v1.2.3 From 314ef6859750b6539eac48d78059bb7986f29cb1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 00:10:01 -0800 Subject: [SPARC64]: Refine register window trap handling. When saving and restoing trap state, do the window spill/fill handling inline so that we never trap deeper than 2 trap levels. This is important for chips like Niagara. The window fixup code is massively simplified, and many more improvements are now possible. Signed-off-by: David S. Miller --- include/asm-sparc64/ttable.h | 234 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 233 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index f557db4faf8..f912f52c0c7 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h @@ -93,7 +93,7 @@ #define SYSCALL_TRAP(routine, systbl) \ sethi %hi(109f), %g7; \ - ba,pt %xcc, scetrap; \ + ba,pt %xcc, etrap; \ 109: or %g7, %lo(109b), %g7; \ sethi %hi(systbl), %l7; \ ba,pt %xcc, routine; \ @@ -219,6 +219,31 @@ saved; retry; nop; nop; nop; nop; nop; nop; \ nop; nop; nop; nop; nop; nop; nop; nop; +#define SPILL_0_NORMAL_ETRAP \ +etrap_kernel_spill: \ + stx %l0, [%sp + STACK_BIAS + 0x00]; \ + stx %l1, [%sp + STACK_BIAS + 0x08]; \ + stx %l2, [%sp + STACK_BIAS + 0x10]; \ + stx %l3, [%sp + STACK_BIAS + 0x18]; \ + stx %l4, [%sp + STACK_BIAS + 0x20]; \ + stx %l5, [%sp + STACK_BIAS + 0x28]; \ + stx %l6, [%sp + STACK_BIAS + 0x30]; \ + stx %l7, [%sp + STACK_BIAS + 0x38]; \ + stx %i0, [%sp + STACK_BIAS + 0x40]; \ + stx %i1, [%sp + STACK_BIAS + 0x48]; \ + stx %i2, [%sp + STACK_BIAS + 0x50]; \ + stx %i3, [%sp + STACK_BIAS + 0x58]; \ + stx %i4, [%sp + STACK_BIAS + 0x60]; \ + stx %i5, [%sp + STACK_BIAS + 0x68]; \ + stx %i6, [%sp + STACK_BIAS + 0x70]; \ + stx %i7, [%sp + STACK_BIAS + 0x78]; \ + saved; \ + sub %g1, 2, %g1; \ + ba,pt %xcc, etrap_save; \ + wrpr %g1, %cwp; \ + nop; nop; nop; nop; nop; nop; nop; nop; \ + nop; nop; nop; nop; + /* Normal 64bit spill */ #define SPILL_1_GENERIC(ASI) \ add %sp, STACK_BIAS + 0x00, %g1; \ @@ -252,6 +277,67 @@ b,a,pt %xcc, spill_fixup_mna; \ b,a,pt %xcc, spill_fixup; +#define SPILL_1_GENERIC_ETRAP \ +etrap_user_spill_64bit: \ + stxa %l0, [%sp + STACK_BIAS + 0x00] %asi; \ + stxa %l1, [%sp + STACK_BIAS + 0x08] %asi; \ + stxa %l2, [%sp + STACK_BIAS + 0x10] %asi; \ + stxa %l3, [%sp + STACK_BIAS + 0x18] %asi; \ + stxa %l4, [%sp + STACK_BIAS + 0x20] %asi; \ + stxa %l5, [%sp + STACK_BIAS + 0x28] %asi; \ + stxa %l6, [%sp + STACK_BIAS + 0x30] %asi; \ + stxa %l7, [%sp + STACK_BIAS + 0x38] %asi; \ + stxa %i0, [%sp + STACK_BIAS + 0x40] %asi; \ + stxa %i1, [%sp + STACK_BIAS + 0x48] %asi; \ + stxa %i2, [%sp + STACK_BIAS + 0x50] %asi; \ + stxa %i3, [%sp + STACK_BIAS + 0x58] %asi; \ + stxa %i4, [%sp + STACK_BIAS + 0x60] %asi; \ + stxa %i5, [%sp + STACK_BIAS + 0x68] %asi; \ + stxa %i6, [%sp + STACK_BIAS + 0x70] %asi; \ + stxa %i7, [%sp + STACK_BIAS + 0x78] %asi; \ + saved; \ + sub %g1, 2, %g1; \ + ba,pt %xcc, etrap_save; \ + wrpr %g1, %cwp; \ + nop; nop; nop; nop; nop; \ + nop; nop; nop; nop; \ + ba,a,pt %xcc, etrap_spill_fixup_64bit; \ + ba,a,pt %xcc, etrap_spill_fixup_64bit; \ + ba,a,pt %xcc, etrap_spill_fixup_64bit; + +#define SPILL_1_GENERIC_ETRAP_FIXUP \ +etrap_spill_fixup_64bit: \ + ldub [%g6 + TI_WSAVED], %g1; \ + sll %g1, 3, %g3; \ + add %g6, %g3, %g3; \ + stx %sp, [%g3 + TI_RWIN_SPTRS]; \ + sll %g1, 7, %g3; \ + add %g6, %g3, %g3; \ + stx %l0, [%g3 + TI_REG_WINDOW + 0x00]; \ + stx %l1, [%g3 + TI_REG_WINDOW + 0x08]; \ + stx %l2, [%g3 + TI_REG_WINDOW + 0x10]; \ + stx %l3, [%g3 + TI_REG_WINDOW + 0x18]; \ + stx %l4, [%g3 + TI_REG_WINDOW + 0x20]; \ + stx %l5, [%g3 + TI_REG_WINDOW + 0x28]; \ + stx %l6, [%g3 + TI_REG_WINDOW + 0x30]; \ + stx %l7, [%g3 + TI_REG_WINDOW + 0x38]; \ + stx %i0, [%g3 + TI_REG_WINDOW + 0x40]; \ + stx %i1, [%g3 + TI_REG_WINDOW + 0x48]; \ + stx %i2, [%g3 + TI_REG_WINDOW + 0x50]; \ + stx %i3, [%g3 + TI_REG_WINDOW + 0x58]; \ + stx %i4, [%g3 + TI_REG_WINDOW + 0x60]; \ + stx %i5, [%g3 + TI_REG_WINDOW + 0x68]; \ + stx %i6, [%g3 + TI_REG_WINDOW + 0x70]; \ + stx %i7, [%g3 + TI_REG_WINDOW + 0x78]; \ + add %g1, 1, %g1; \ + stb %g1, [%g6 + TI_WSAVED]; \ + saved; \ + rdpr %cwp, %g1; \ + sub %g1, 2, %g1; \ + ba,pt %xcc, etrap_save; \ + wrpr %g1, %cwp; \ + nop; nop; nop + /* Normal 32bit spill */ #define SPILL_2_GENERIC(ASI) \ srl %sp, 0, %sp; \ @@ -285,6 +371,68 @@ b,a,pt %xcc, spill_fixup_mna; \ b,a,pt %xcc, spill_fixup; +#define SPILL_2_GENERIC_ETRAP \ +etrap_user_spill_32bit: \ + srl %sp, 0, %sp; \ + stwa %l0, [%sp + 0x00] %asi; \ + stwa %l1, [%sp + 0x04] %asi; \ + stwa %l2, [%sp + 0x08] %asi; \ + stwa %l3, [%sp + 0x0c] %asi; \ + stwa %l4, [%sp + 0x10] %asi; \ + stwa %l5, [%sp + 0x14] %asi; \ + stwa %l6, [%sp + 0x18] %asi; \ + stwa %l7, [%sp + 0x1c] %asi; \ + stwa %i0, [%sp + 0x20] %asi; \ + stwa %i1, [%sp + 0x24] %asi; \ + stwa %i2, [%sp + 0x28] %asi; \ + stwa %i3, [%sp + 0x2c] %asi; \ + stwa %i4, [%sp + 0x30] %asi; \ + stwa %i5, [%sp + 0x34] %asi; \ + stwa %i6, [%sp + 0x38] %asi; \ + stwa %i7, [%sp + 0x3c] %asi; \ + saved; \ + sub %g1, 2, %g1; \ + ba,pt %xcc, etrap_save; \ + wrpr %g1, %cwp; \ + nop; nop; nop; nop; \ + nop; nop; nop; nop; \ + ba,a,pt %xcc, etrap_spill_fixup_32bit; \ + ba,a,pt %xcc, etrap_spill_fixup_32bit; \ + ba,a,pt %xcc, etrap_spill_fixup_32bit; + +#define SPILL_2_GENERIC_ETRAP_FIXUP \ +etrap_spill_fixup_32bit: \ + ldub [%g6 + TI_WSAVED], %g1; \ + sll %g1, 3, %g3; \ + add %g6, %g3, %g3; \ + stx %sp, [%g3 + TI_RWIN_SPTRS]; \ + sll %g1, 7, %g3; \ + add %g6, %g3, %g3; \ + stw %l0, [%g3 + TI_REG_WINDOW + 0x00]; \ + stw %l1, [%g3 + TI_REG_WINDOW + 0x04]; \ + stw %l2, [%g3 + TI_REG_WINDOW + 0x08]; \ + stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]; \ + stw %l4, [%g3 + TI_REG_WINDOW + 0x10]; \ + stw %l5, [%g3 + TI_REG_WINDOW + 0x14]; \ + stw %l6, [%g3 + TI_REG_WINDOW + 0x18]; \ + stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]; \ + stw %i0, [%g3 + TI_REG_WINDOW + 0x20]; \ + stw %i1, [%g3 + TI_REG_WINDOW + 0x24]; \ + stw %i2, [%g3 + TI_REG_WINDOW + 0x28]; \ + stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]; \ + stw %i4, [%g3 + TI_REG_WINDOW + 0x30]; \ + stw %i5, [%g3 + TI_REG_WINDOW + 0x34]; \ + stw %i6, [%g3 + TI_REG_WINDOW + 0x38]; \ + stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]; \ + add %g1, 1, %g1; \ + stb %g1, [%g6 + TI_WSAVED]; \ + saved; \ + rdpr %cwp, %g1; \ + sub %g1, 2, %g1; \ + ba,pt %xcc, etrap_save; \ + wrpr %g1, %cwp; \ + nop; nop; nop + #define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP) #define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP) #define SPILL_3_NORMAL SPILL_0_NORMAL @@ -323,6 +471,35 @@ restored; retry; nop; nop; nop; nop; nop; nop; \ nop; nop; nop; nop; nop; nop; nop; nop; +#define FILL_0_NORMAL_RTRAP \ +kern_rtt_fill: \ + rdpr %cwp, %g1; \ + sub %g1, 1, %g1; \ + wrpr %g1, %cwp; \ + ldx [%sp + STACK_BIAS + 0x00], %l0; \ + ldx [%sp + STACK_BIAS + 0x08], %l1; \ + ldx [%sp + STACK_BIAS + 0x10], %l2; \ + ldx [%sp + STACK_BIAS + 0x18], %l3; \ + ldx [%sp + STACK_BIAS + 0x20], %l4; \ + ldx [%sp + STACK_BIAS + 0x28], %l5; \ + ldx [%sp + STACK_BIAS + 0x30], %l6; \ + ldx [%sp + STACK_BIAS + 0x38], %l7; \ + ldx [%sp + STACK_BIAS + 0x40], %i0; \ + ldx [%sp + STACK_BIAS + 0x48], %i1; \ + ldx [%sp + STACK_BIAS + 0x50], %i2; \ + ldx [%sp + STACK_BIAS + 0x58], %i3; \ + ldx [%sp + STACK_BIAS + 0x60], %i4; \ + ldx [%sp + STACK_BIAS + 0x68], %i5; \ + ldx [%sp + STACK_BIAS + 0x70], %i6; \ + ldx [%sp + STACK_BIAS + 0x78], %i7; \ + restored; \ + add %g1, 1, %g1; \ + ba,pt %xcc, kern_rtt_restore; \ + wrpr %g1, %cwp; \ + nop; nop; nop; nop; nop; \ + nop; nop; nop; nop; + + /* Normal 64bit fill */ #define FILL_1_GENERIC(ASI) \ add %sp, STACK_BIAS + 0x00, %g1; \ @@ -354,6 +531,33 @@ b,a,pt %xcc, fill_fixup_mna; \ b,a,pt %xcc, fill_fixup; +#define FILL_1_GENERIC_RTRAP \ +user_rtt_fill_64bit: \ + ldxa [%sp + STACK_BIAS + 0x00] %asi, %l0; \ + ldxa [%sp + STACK_BIAS + 0x08] %asi, %l1; \ + ldxa [%sp + STACK_BIAS + 0x10] %asi, %l2; \ + ldxa [%sp + STACK_BIAS + 0x18] %asi, %l3; \ + ldxa [%sp + STACK_BIAS + 0x20] %asi, %l4; \ + ldxa [%sp + STACK_BIAS + 0x28] %asi, %l5; \ + ldxa [%sp + STACK_BIAS + 0x30] %asi, %l6; \ + ldxa [%sp + STACK_BIAS + 0x38] %asi, %l7; \ + ldxa [%sp + STACK_BIAS + 0x40] %asi, %i0; \ + ldxa [%sp + STACK_BIAS + 0x48] %asi, %i1; \ + ldxa [%sp + STACK_BIAS + 0x50] %asi, %i2; \ + ldxa [%sp + STACK_BIAS + 0x58] %asi, %i3; \ + ldxa [%sp + STACK_BIAS + 0x60] %asi, %i4; \ + ldxa [%sp + STACK_BIAS + 0x68] %asi, %i5; \ + ldxa [%sp + STACK_BIAS + 0x70] %asi, %i6; \ + ldxa [%sp + STACK_BIAS + 0x78] %asi, %i7; \ + ba,pt %xcc, user_rtt_pre_restore; \ + restored; \ + nop; nop; nop; nop; nop; nop; \ + nop; nop; nop; nop; nop; \ + ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup; + + /* Normal 32bit fill */ #define FILL_2_GENERIC(ASI) \ srl %sp, 0, %sp; \ @@ -385,6 +589,34 @@ b,a,pt %xcc, fill_fixup_mna; \ b,a,pt %xcc, fill_fixup; +#define FILL_2_GENERIC_RTRAP \ +user_rtt_fill_32bit: \ + srl %sp, 0, %sp; \ + lduwa [%sp + 0x00] %asi, %l0; \ + lduwa [%sp + 0x04] %asi, %l1; \ + lduwa [%sp + 0x08] %asi, %l2; \ + lduwa [%sp + 0x0c] %asi, %l3; \ + lduwa [%sp + 0x10] %asi, %l4; \ + lduwa [%sp + 0x14] %asi, %l5; \ + lduwa [%sp + 0x18] %asi, %l6; \ + lduwa [%sp + 0x1c] %asi, %l7; \ + lduwa [%sp + 0x20] %asi, %i0; \ + lduwa [%sp + 0x24] %asi, %i1; \ + lduwa [%sp + 0x28] %asi, %i2; \ + lduwa [%sp + 0x2c] %asi, %i3; \ + lduwa [%sp + 0x30] %asi, %i4; \ + lduwa [%sp + 0x34] %asi, %i5; \ + lduwa [%sp + 0x38] %asi, %i6; \ + lduwa [%sp + 0x3c] %asi, %i7; \ + ba,pt %xcc, user_rtt_pre_restore; \ + restored; \ + nop; nop; nop; nop; nop; \ + nop; nop; nop; nop; nop; \ + ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup; + + #define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP) #define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP) #define FILL_3_NORMAL FILL_0_NORMAL -- cgit v1.2.3 From 766f861fbbd968a1850295ed6dec4504b4500dcc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:01:45 -0800 Subject: [SPARC64]: SUN4V hypervisor interface defines. Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 2072 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 2072 insertions(+) create mode 100644 include/asm-sparc64/hypervisor.h (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h new file mode 100644 index 00000000000..9c8e453abe9 --- /dev/null +++ b/include/asm-sparc64/hypervisor.h @@ -0,0 +1,2072 @@ +#ifndef _SPARC64_HYPERVISOR_H +#define _SPARC64_HYPERVISOR_H + +/* Sun4v hypervisor interfaces and defines. + * + * Hypervisor calls are made via traps to software traps number 0x80 + * and above. Registers %o0 to %o5 serve as argument, status, and + * return value registers. + * + * There are two kinds of these traps. First there are the normal + * "fast traps" which use software trap 0x80 and encode the function + * to invoke by number in register %o5. Argument and return value + * handling is as follows: + * + * ----------------------------------------------- + * | %o5 | function number | undefined | + * | %o0 | argument 0 | return status | + * | %o1 | argument 1 | return value 1 | + * | %o2 | argument 2 | return value 2 | + * | %o3 | argument 3 | return value 3 | + * | %o4 | argument 4 | return value 4 | + * ----------------------------------------------- + * + * The second type are "hyper-fast traps" which encode the function + * number in the software trap number itself. So these use trap + * numbers > 0x80. The register usage for hyper-fast traps is as + * follows: + * + * ----------------------------------------------- + * | %o0 | argument 0 | return status | + * | %o1 | argument 1 | return value 1 | + * | %o2 | argument 2 | return value 2 | + * | %o3 | argument 3 | return value 3 | + * | %o4 | argument 4 | return value 4 | + * ----------------------------------------------- + * + * Registers providing explicit arguments to the hypervisor calls + * are volatile across the call. Upon return their values are + * undefined unless explicitly specified as containing a particular + * return value by the specific call. The return status is always + * returned in register %o0, zero indicates a successful execution of + * the hypervisor call and other values indicate an error status as + * defined below. So, for example, if a hyper-fast trap takes + * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across + * the call and %o3, %o4, and %o5 would be preserved. + * + * If the hypervisor trap is invalid, or the fast trap function number + * is invalid, HV_EBADTRAP will be returned in %o0. Also, all 64-bits + * of the argument and return values are significant. + */ + +/* Trap numbers. */ +#define HV_FAST_TRAP 0x80 +#define HV_MMU_MAP_ADDR_TRAP 0x83 +#define HV_MMU_UNMAP_ADDR_TRAP 0x84 +#define HV_TTRACE_ADDENTRY_TRAP 0x85 +#define HV_CORE_TRAP 0xff + +/* Error codes. */ +#define HV_EOK 0 /* Successful return */ +#define HV_ENOCPU 1 /* Invalid CPU id */ +#define HV_ENORADDR 2 /* Invalid real address */ +#define HV_ENOINTR 3 /* Invalid interrupt id */ +#define HV_EBADPGSZ 4 /* Invalid pagesize encoding */ +#define HV_EBADTSB 5 /* Invalid TSB description */ +#define HV_EINVAL 6 /* Invalid argument */ +#define HV_EBADTRAP 7 /* Invalid function number */ +#define HV_EBADALIGN 8 /* Invalid address alignment */ +#define HV_EWOULDBLOCK 9 /* Cannot complete w/o blocking */ +#define HV_ENOACCESS 10 /* No access to resource */ +#define HV_EIO 11 /* I/O error */ +#define HV_ECPUERROR 12 /* CPU in error state */ +#define HV_ENOTSUPPORTED 13 /* Function not supported */ +#define HV_ENOMAP 14 /* No mapping found */ +#define HV_ETOOMANY 15 /* Too many items specified */ + +/* mach_exit() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MACH_EXIT + * ARG0: exit code + * ERRORS: This service does not return. + * + * Stop all CPUs in the virtual domain and place them into the stopped + * state. The 64-bit exit code may be passed to a service entity as + * the domain's exit status. On systems without a service entity, the + * domain will undergo a reset, and the boot firmware will be + * reloaded. + * + * This function will never return to the guest that invokes it. + * + * Note: By convention an exit code of zero denotes a successful exit by + * the guest code. A non-zero exit code denotes a guest specific + * error indication. + * + */ +#define HV_FAST_MACH_EXIT 0x00 + +/* Domain services. */ + +/* mach_desc() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MACH_DESC + * ARG0: buffer + * ARG1: length + * RET0: status + * RET1: length + * ERRORS: HV_EBADALIGN Buffer is badly aligned + * HV_ENORADDR Buffer is to an illegal real address. + * HV_EINVAL Buffer length is too small for complete + * machine description. + * + * Copy the most current machine description into the buffer indicated + * by the real address in ARG0. The buffer provided must be 16 byte + * aligned. Upon success or HV_EINVAL, this service returns the + * actual size of the machine description in the RET1 return value. + * + * Note: A method of determining the appropriate buffer size for the + * machine description is to first call this service with a buffer + * length of 0 bytes. + */ +#define HV_FAST_MACH_DESC 0x01 + +/* mach_exit() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MACH_SIR + * ERRORS: This service does not return. + * + * Perform a software initiated reset of the virtual machine domain. + * All CPUs are captured as soon as possible, all hardware devices are + * returned to the entry default state, and the domain is restarted at + * the SIR (trap type 0x04) real trap table (RTBA) entry point on one + * of the CPUs. The single CPU restarted is selected as determined by + * platform specific policy. Memory is preserved across this + * operation. + */ +#define HV_FAST_MACH_SIR 0x02 + +/* mach_set_soft_state() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE + * ARG0: software state + * ARG1: software state description pointer + * RET0: status + * ERRORS: EINVAL software state not valid or software state + * description is not NULL terminated + * ENORADDR software state description pointer is not a + * valid real address + * EBADALIGNED software state description is not correctly + * aligned + * + * This allows the guest to report it's soft state to the hypervisor. There + * are two primary components to this state. The first part states whether + * the guest software is running or not. The second containts optional + * details specific to the software. + * + * The software state argument is defined below in HV_SOFT_STATE_*, and + * indicates whether the guest is operating normally or in a transitional + * state. + * + * The software state description argument is a real address of a data buffer + * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL + * terminated 7-bit ASCII string of up to 31 characters not including the + * NULL termination. + */ +#define HV_FAST_MACH_SET_SOFT_STATE 0x03 +#define HV_SOFT_STATE_NORMAL 0x01 +#define HV_SOFT_STATE_TRANSITION 0x02 + +/* mach_get_soft_state() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE + * ARG0: software state description pointer + * RET0: status + * RET1: software state + * ERRORS: ENORADDR software state description pointer is not a + * valid real address + * EBADALIGNED software state description is not correctly + * aligned + * + * Retrieve the current value of the guest's software state. The rules + * for the software state pointer are the same as for mach_set_soft_state() + * above. + */ +#define HV_FAST_MACH_GET_SOFT_STATE 0x04 + +/* CPU services. + * + * CPUs represent devices that can execute software threads. A single + * chip that contains multiple cores or strands is represented as + * multiple CPUs with unique CPU identifiers. CPUs are exported to + * OBP via the machine description (and to the OS via the OBP device + * tree). CPUs are always in one of three states: stopped, running, + * or error. + * + * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a + * CPU within a logical domain. Operations that are to be performed + * on multiple CPUs specify them via a CPU list. A CPU list is an + * array in real memory, of which each 16-bit word is a CPU ID. CPU + * lists are passed through the API as two arguments. The first is + * the number of entries (16-bit words) in the CPU list, and the + * second is the (real address) pointer to the CPU ID list. + */ + +/* cpu_start() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_START + * ARG0: CPU ID + * ARG1: PC + * ARG1: RTBA + * ARG1: target ARG0 + * RET0: status + * ERRORS: ENOCPU Invalid CPU ID + * EINVAL Target CPU ID is not in the stopped state + * ENORADDR Invalid PC or RTBA real address + * EBADALIGN Unaligned PC or unaligned RTBA + * EWOULDBLOCK Starting resources are not available + * + * Start CPU with given CPU ID with PC in %pc and with a real trap + * base address value of RTBA. The indicated CPU must be in the + * stopped state. The supplied RTBA must be aligned on a 256 byte + * boundary. On successful completion, the specified CPU will be in + * the running state and will be supplied with "target ARG0" in %o0 + * and RTBA in %tba. + */ +#define HV_FAST_CPU_START 0x10 + +/* cpu_stop() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_STOP + * ARG0: CPU ID + * RET0: status + * ERRORS: ENOCPU Invalid CPU ID + * EINVAL Target CPU ID is the current cpu + * EINVAL Target CPU ID is not in the running state + * EWOULDBLOCK Stopping resources are not available + * ENOTSUPPORTED Not supported on this platform + * + * The specified CPU is stopped. The indicated CPU must be in the + * running state. On completion, it will be in the stopped state. It + * is not legal to stop the current CPU. + * + * Note: As this service cannot be used to stop the current cpu, this service + * may not be used to stop the last running CPU in a domain. To stop + * and exit a running domain, a guest must use the mach_exit() service. + */ +#define HV_FAST_CPU_STOP 0x11 + +/* cpu_yield() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_YIELD + * RET0: status + * ERRORS: No possible error. + * + * Suspend execution on the current CPU. Execution will resume when + * an interrupt (device, %stick_compare, or cross-call) is targeted to + * the CPU. On some CPUs, this API may be used by the hypervisor to + * save power by disabling hardware strands. + */ +#define HV_FAST_CPU_YIELD 0x12 + + +/* cpu_qconf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_QCONF + * ARG0: queue + * ARG1: base real address + * ARG2: number of entries + * RET0: status + * ERRORS: ENORADDR Invalid base real address + * EINVAL Invalid queue or number of entries is less + * than 2 or too large. + * EBADALIGN Base real address is not correctly aligned + * for size. + * + * Configure the given queue to be placed at the givem base real + * address, with the given number of entries. The number of entries + * must be a power of 2. The base real address must be aligned + * exactly to match the queue size. Each queue entry is 64 bytes + * long, so for example a 32 entry queue must be aligned on a 2048 + * byte real address boundary. + * + * The specified queue is unconfigured is number of entries is given as zero. + * + * For the current version of this API service, the argument queue is defined + * as follows: + * queue description + * ----- ------------------------- + * 0x3c cpu mondo queue + * 0x3d device mondo queue + * 0x3e resumable error queue + * 0x3f non-resumable error queue + * + * Note: The maximum number of entries for each queue for a specific cpu may + * be determined from the machine description. + */ +#define HV_FAST_CPU_QCONF 0x14 +#define HV_CPU_QUEUE_CPU_MONDO 0x3c +#define HV_CPU_QUEUE_DEVICE_MONDO 0x3d +#define HV_CPU_QUEUE_RES_ERROR 0x3e +#define HV_CPU_QUEUE_NONRES_ERROR 0x3f + +/* cpu_qinfo() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_QINFO + * ARG0: queue + * RET0: status + * RET1: base real address + * RET1: number of entries + * ERRORS: EINVAL Invalid queue + * + * Return the configuration info for the given queue. The base real + * address and number of entries of the defined queue are returned. + * The queue argument values are the same as for cpu_qconf() above. + * + * If the specified queue is a valid queue number, but no queue has + * been defined, the number of entries will be set to zero and the + * base real address returned is undefined. + */ +#define HV_FAST_CPU_QINFO 0x15 + +/* cpu_mondo_send() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_MONDO_SEND + * ARG0-1: CPU list + * ARG2: data real address + * RET0: status + * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list + * is not 2-byte aligned. + * ENORADDR Invalid data mondo address, or invalid cpu list + * address. + * ENOCPU Invalid cpu in CPU list + * EWOULDBLOCK Some or all of the listed CPUs did not receive + * the mondo + * EINVAL CPU list includes caller's CPU ID + * + * Send a mondo interrupt to the CPUs in the given CPU list with the + * 64-bytes at the given data real address. The data must be 64-byte + * aligned. The mondo data will be delivered to the cpu_mondo queues + * of the recipient CPUs. + * + * In all cases, error or not, the CPUs in the CPU list to which the + * mondo has been successfully delivered will be indicated by having + * their entry in CPU list updated with the value 0xffff. + */ +#define HV_FAST_CPU_MONDO_SEND 0x42 + +/* cpu_myid() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_MYID + * RET0: status + * RET1: CPU ID + * ERRORS: No errors defined. + * + * Return the hypervisor ID handle for the current CPU. Use by a + * virtual CPU to discover it's own identity. + */ +#define HV_FAST_CPU_MYID 0x16 + +/* cpu_state() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_STATE + * ARG0: CPU ID + * RET0: status + * RET1: state + * ERRORS: ENOCPU Invalid CPU ID + * + * Retrieve the current state of the CPU with the given CPU ID. + */ +#define HV_FAST_CPU_STATE 0x17 +#define HV_CPU_STATE_STOPPED 0x01 +#define HV_CPU_STATE_RUNNING 0x02 +#define HV_CPU_STATE_ERROR 0x03 + +/* cpu_set_rtba() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_SET_RTBA + * ARG0: RTBA + * RET0: status + * RET1: previous RTBA + * ERRORS: ENORADDR Invalid RTBA real address + * EBADALIGN RTBA is incorrectly aligned for a trap table + * + * Set the real trap base address of the local cpu to the given RTBA. + * The supplied RTBA must be aligned on a 256 byte boundary. Upon + * success the previous value of the RTBA is returned in RET1. + * + * Note: This service does not affect %tba + */ +#define HV_FAST_CPU_SET_RTBA 0x18 + +/* cpu_set_rtba() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_GET_RTBA + * RET0: status + * RET1: previous RTBA + * ERRORS: No possible error. + * + * Returns the current value of RTBA in RET1. + */ +#define HV_FAST_CPU_GET_RTBA 0x19 + +/* MMU services. + * + * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls. + */ +#ifndef __ASSEMBLY__ +struct hv_tsb_descr { + unsigned short pgsz_idx; + unsigned short assoc; + unsigned int num_ttes; /* in TTEs */ + unsigned int ctx_idx; + unsigned int pgsz_mask; + unsigned long tsb_base; + unsigned long resv; +}; +#endif +#define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00 +#define HV_TSB_DESCR_ASSOC_OFFSET 0x02 +#define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04 +#define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08 +#define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c +#define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10 +#define HV_TSB_DESCR_RESV_OFFSET 0x18 + +/* Page size bitmask. */ +#define HV_PGSZ_MASK_8K (1 << 0) +#define HV_PGSZ_MASK_64K (1 << 1) +#define HV_PGSZ_MASK_512K (1 << 2) +#define HV_PGSZ_MASK_4MB (1 << 3) +#define HV_PGSZ_MASK_32MB (1 << 4) +#define HV_PGSZ_MASK_256MB (1 << 5) +#define HV_PGSZ_MASK_2GB (1 << 6) +#define HV_PGSZ_MASK_16GB (1 << 7) + +/* Page size index. The value given in the TSB descriptor must correspond + * to the smallest page size specified in the pgsz_mask page size bitmask. + */ +#define HV_PGSZ_IDX_8K 0 +#define HV_PGSZ_IDX_64K 1 +#define HV_PGSZ_IDX_512K 2 +#define HV_PGSZ_IDX_4MB 3 +#define HV_PGSZ_IDX_32MB 4 +#define HV_PGSZ_IDX_256MB 5 +#define HV_PGSZ_IDX_2GB 6 +#define HV_PGSZ_IDX_16GB 7 + +/* MMU fault status area. + * + * MMU related faults have their status and fault address information + * placed into a memory region made available by privileged code. Each + * virtual processor must make a mmu_fault_area_conf() call to tell the + * hypervisor where that processor's fault status should be stored. + * + * The fault status block is a multiple of 64-bytes and must be aligned + * on a 64-byte boundary. + */ +#ifndef __ASSEMBLY__ +struct hv_fault_status { + unsigned long i_fault_type; + unsigned long i_fault_addr; + unsigned long i_fault_ctx; + unsigned long i_reserved[5]; + unsigned long d_fault_type; + unsigned long d_fault_addr; + unsigned long d_fault_ctx; + unsigned long d_reserved[5]; +}; +#endif +#define HV_FAULT_I_TYPE_OFFSET 0x00 +#define HV_FAULT_I_ADDR_OFFSET 0x08 +#define HV_FAULT_I_CTX_OFFSET 0x10 +#define HV_FAULT_D_TYPE_OFFSET 0x40 +#define HV_FAULT_D_ADDR_OFFSET 0x48 +#define HV_FAULT_D_CTX_OFFSET 0x50 + +#define HV_FAULT_TYPE_FAST_MISS 1 +#define HV_FAULT_TYPE_FAST_PROT 2 +#define HV_FAULT_TYPE_MMU_MISS 3 +#define HV_FAULT_TYPE_INV_RA 4 +#define HV_FAULT_TYPE_PRIV_VIOL 5 +#define HV_FAULT_TYPE_PROT_VIOL 6 +#define HV_FAULT_TYPE_NFO 7 +#define HV_FAULT_TYPE_NFO_SEFF 8 +#define HV_FAULT_TYPE_INV_VA 9 +#define HV_FAULT_TYPE_INV_ASI 10 +#define HV_FAULT_TYPE_NC_ATOMIC 11 +#define HV_FAULT_TYPE_PRIV_ACT 12 +#define HV_FAULT_TYPE_RESV1 13 +#define HV_FAULT_TYPE_UNALIGNED 14 +#define HV_FAULT_TYPE_INV_PGSZ 15 +/* Values 16 --> -2 are reserved. */ +#define HV_FAULT_TYPE_MULTIPLE -1 + +/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(), + * and mmu_{map,unmap}_perm_addr(). + */ +#define HV_MMU_DMMU 0x01 +#define HV_MMU_IMMU 0x02 +#define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU) + +/* mmu_map_addr() + * TRAP: HV_MMU_MAP_ADDR_TRAP + * ARG0: virtual address + * ARG1: mmu context + * ARG2: TTE + * ARG3: flags (HV_MMU_{IMMU,DMMU}) + * ERRORS: EINVAL Invalid virtual address, mmu context, or flags + * EBADPGSZ Invalid page size value + * ENORADDR Invalid real address in TTE + * + * Create a non-permanent mapping using the given TTE, virtual + * address, and mmu context. The flags argument determines which + * (data, or instruction, or both) TLB the mapping gets loaded into. + * + * The behavior is undefined if the valid bit is clear in the TTE. + * + * Note: This API call is for privileged code to specify temporary translation + * mappings without the need to create and manage a TSB. + */ + +/* mmu_unmap_addr() + * TRAP: HV_MMU_UNMAP_ADDR_TRAP + * ARG0: virtual address + * ARG1: mmu context + * ARG2: flags (HV_MMU_{IMMU,DMMU}) + * ERRORS: EINVAL Invalid virtual address, mmu context, or flags + * + * Demaps the given virtual address in the given mmu context on this + * CPU. This function is intended to be used to demap pages mapped + * with mmu_map_addr. This service is equivalent to invoking + * mmu_demap_page() with only the current CPU in the CPU list. The + * flags argument determines which (data, or instruction, or both) TLB + * the mapping gets unmapped from. + * + * Attempting to perform an unmap operation for a previously defined + * permanent mapping will have undefined results. + */ + +/* mmu_tsb_ctx0() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_TSB_CTX0 + * ARG0: number of TSB descriptions + * ARG1: TSB descriptions pointer + * RET0: status + * ERRORS: ENORADDR Invalid TSB descriptions pointer or + * TSB base within a descriptor + * EBADALIGN TSB descriptions pointer is not aligned + * to an 8-byte boundary, or TSB base + * within a descriptor is not aligned for + * the given TSB size + * EBADPGSZ Invalid page size in a TSB descriptor + * EBADTSB Invalid associativity or size in a TSB + * descriptor + * EINVAL Invalid number of TSB descriptions, or + * invalid context index in a TSB + * descriptor, or index page size not + * equal to smallest page size in page + * size bitmask field. + * + * Configures the TSBs for the current CPU for virtual addresses with + * context zero. The TSB descriptions pointer is a pointer to an + * array of the given number of TSB descriptions. + * + * Note: The maximum number of TSBs available to a virtual CPU is given by the + * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the + * machine description. + */ +#define HV_FAST_MMU_TSB_CTX0 0x20 + +/* mmu_tsb_ctxnon0() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_TSB_CTXNON0 + * ARG0: number of TSB descriptions + * ARG1: TSB descriptions pointer + * RET0: status + * ERRORS: Same as for mmu_tsb_ctx0() above. + * + * Configures the TSBs for the current CPU for virtual addresses with + * non-zero contexts. The TSB descriptions pointer is a pointer to an + * array of the given number of TSB descriptions. + * + * Note: A maximum of 16 TSBs may be specified in the TSB description list. + */ +#define HV_FAST_MMU_TSB_CTXNON0 0x21 + +/* mmu_demap_page() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_DEMAP_PAGE + * ARG0: reserved, must be zero + * ARG1: reserved, must be zero + * ARG2: virtual address + * ARG3: mmu context + * ARG4: flags (HV_MMU_{IMMU,DMMU}) + * RET0: status + * ERRORS: EINVAL Invalid virutal address, context, or + * flags value + * ENOTSUPPORTED ARG0 or ARG1 is non-zero + * + * Demaps any page mapping of the given virtual address in the given + * mmu context for the current virtual CPU. Any virtually tagged + * caches are guaranteed to be kept consistent. The flags argument + * determines which TLB (instruction, or data, or both) participate in + * the operation. + * + * ARG0 and ARG1 are both reserved and must be set to zero. + */ +#define HV_FAST_MMU_DEMAP_PAGE 0x22 + +/* mmu_demap_ctx() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_DEMAP_CTX + * ARG0: reserved, must be zero + * ARG1: reserved, must be zero + * ARG2: mmu context + * ARG3: flags (HV_MMU_{IMMU,DMMU}) + * RET0: status + * ERRORS: EINVAL Invalid context or flags value + * ENOTSUPPORTED ARG0 or ARG1 is non-zero + * + * Demaps all non-permanent virtual page mappings previously specified + * for the given context for the current virtual CPU. Any virtual + * tagged caches are guaranteed to be kept consistent. The flags + * argument determines which TLB (instruction, or data, or both) + * participate in the operation. + * + * ARG0 and ARG1 are both reserved and must be set to zero. + */ +#define HV_FAST_MMU_DEMAP_CTX 0x23 + +/* mmu_demap_all() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_DEMAP_ALL + * ARG0: reserved, must be zero + * ARG1: reserved, must be zero + * ARG2: flags (HV_MMU_{IMMU,DMMU}) + * RET0: status + * ERRORS: EINVAL Invalid flags value + * ENOTSUPPORTED ARG0 or ARG1 is non-zero + * + * Demaps all non-permanent virtual page mappings previously specified + * for the current virtual CPU. Any virtual tagged caches are + * guaranteed to be kept consistent. The flags argument determines + * which TLB (instruction, or data, or both) participate in the + * operation. + * + * ARG0 and ARG1 are both reserved and must be set to zero. + */ +#define HV_FAST_MMU_DEMAP_ALL 0x24 + +/* mmu_map_perm_addr() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR + * ARG0: virtual address + * ARG1: reserved, must be zero + * ARG2: TTE + * ARG3: flags (HV_MMU_{IMMU,DMMU}) + * RET0: status + * ERRORS: EINVAL Invalid virutal address or flags value + * EBADPGSZ Invalid page size value + * ENORADDR Invalid real address in TTE + * ETOOMANY Too many mappings (max of 8 reached) + * + * Create a permanent mapping using the given TTE and virtual address + * for context 0 on the calling virtual CPU. A maximum of 8 such + * permanent mappings may be specified by privileged code. Mappings + * may be removed with mmu_unmap_perm_addr(). + * + * The behavior is undefined if a TTE with the valid bit clear is given. + * + * Note: This call is used to specify address space mappings for which + * privileged code does not expect to receive misses. For example, + * this mechanism can be used to map kernel nucleus code and data. + */ +#define HV_FAST_MMU_MAP_PERM_ADDR 0x25 + +/* mmu_fault_area_conf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF + * ARG0: real address + * RET0: status + * RET1: previous mmu fault area real address + * ERRORS: ENORADDR Invalid real address + * EBADALIGN Invalid alignment for fault area + * + * Configure the MMU fault status area for the calling CPU. A 64-byte + * aligned real address specifies where MMU fault status information + * is placed. The return value is the previously specified area, or 0 + * for the first invocation. Specifying a fault area at real address + * 0 is not allowed. + */ +#define HV_FAST_MMU_FAULT_AREA_CONF 0x26 + +/* mmu_enable() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_ENABLE + * ARG0: enable flag + * ARG1: return target address + * RET0: status + * ERRORS: ENORADDR Invalid real address when disabling + * translation. + * EBADALIGN The return target address is not + * aligned to an instruction. + * EINVAL The enable flag request the current + * operating mode (e.g. disable if already + * disabled) + * + * Enable or disable virtual address translation for the calling CPU + * within the virtual machine domain. If the enable flag is zero, + * translation is disabled, any non-zero value will enable + * translation. + * + * When this function returns, the newly selected translation mode + * will be active. If the mmu is being enabled, then the return + * target address is a virtual address else it is a real address. + * + * Upon successful completion, control will be returned to the given + * return target address (ie. the cpu will jump to that address). On + * failure, the previous mmu mode remains and the trap simply returns + * as normal with the appropriate error code in RET0. + */ +#define HV_FAST_MMU_ENABLE 0x27 + +/* mmu_unmap_perm_addr() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_UNMAP_PERM_ADDR + * ARG0: virtual address + * ARG1: reserved, must be zero + * ARG2: flags (HV_MMU_{IMMU,DMMU}) + * RET0: status + * ERRORS: EINVAL Invalid virutal address or flags value + * ENOMAP Specified mapping was not found + * + * Demaps any permanent page mapping (established via + * mmu_map_perm_addr()) at the given virtual address for context 0 on + * the current virtual CPU. Any virtual tagged caches are guaranteed + * to be kept consistent. + */ +#define HV_FAST_MMU_UNMAP_PERM_ADDR 0x28 + +/* mmu_tsb_ctx0_info() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_TSB_CTX0_INFO + * ARG0: max TSBs + * ARG1: buffer pointer + * RET0: status + * RET1: number of TSBs + * ERRORS: EINVAL Supplied buffer is too small + * EBADALIGN The buffer pointer is badly aligned + * ENORADDR Invalid real address for buffer pointer + * + * Return the TSB configuration as previous defined by mmu_tsb_ctx0() + * into the provided buffer. The size of the buffer is given in ARG1 + * in terms of the number of TSB description entries. + * + * Upon return, RET1 always contains the number of TSB descriptions + * previously configured. If zero TSBs were configured, EOK is + * returned with RET1 containing 0. + */ +#define HV_FAST_MMU_TSB_CTX0_INFO 0x29 + +/* mmu_tsb_ctxnon0_info() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_TSB_CTXNON0_INFO + * ARG0: max TSBs + * ARG1: buffer pointer + * RET0: status + * RET1: number of TSBs + * ERRORS: EINVAL Supplied buffer is too small + * EBADALIGN The buffer pointer is badly aligned + * ENORADDR Invalid real address for buffer pointer + * + * Return the TSB configuration as previous defined by + * mmu_tsb_ctxnon0() into the provided buffer. The size of the buffer + * is given in ARG1 in terms of the number of TSB description entries. + * + * Upon return, RET1 always contains the number of TSB descriptions + * previously configured. If zero TSBs were configured, EOK is + * returned with RET1 containing 0. + */ +#define HV_FAST_MMU_TSB_CTXNON0_INFO 0x2a + +/* mmu_fault_area_info() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_FAULT_AREA_INFO + * RET0: status + * RET1: fault area real address + * ERRORS: No errors defined. + * + * Return the currently defined MMU fault status area for the current + * CPU. The real address of the fault status area is returned in + * RET1, or 0 is returned in RET1 if no fault status area is defined. + * + * Note: mmu_fault_area_conf() may be called with the return value (RET1) + * from this service if there is a need to save and restore the fault + * area for a cpu. + */ +#define HV_FAST_MMU_FAULT_AREA_INFO 0x2b + +/* Cache and Memory services. */ + +/* mem_scrub() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MEM_SCRUB + * ARG0: real address + * ARG1: length + * RET0: status + * RET1: length scrubbed + * ERRORS: ENORADDR Invalid real address + * EBADALIGN Start address or length are not correctly + * aligned + * EINVAL Length is zero + * + * Zero the memory contents in the range real address to real address + * plus length minus 1. Also, valid ECC will be generated for that + * memory address range. Scrubbing is started at the given real + * address, but may not scrub the entire given length. The actual + * length scrubbed will be returned in RET1. + * + * The real address and length must be aligned on an 8K boundary, or + * contain the start address and length from a sun4v error report. + * + * Note: There are two uses for this function. The first use is to block clear + * and initialize memory and the second is to scrub an u ncorrectable + * error reported via a resumable or non-resumable trap. The second + * use requires the arguments to be equal to the real address and length + * provided in a sun4v memory error report. + */ +#define HV_FAST_MEM_SCRUB 0x31 + +/* mem_sync() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MEM_SYNC + * ARG0: real address + * ARG1: length + * RET0: status + * RET1: length synced + * ERRORS: ENORADDR Invalid real address + * EBADALIGN Start address or length are not correctly + * aligned + * EINVAL Length is zero + * + * Force the next access within the real address to real address plus + * length minus 1 to be fetches from main system memory. Less than + * the given length may be synced, the actual amount synced is + * returned in RET1. The real address and length must be aligned on + * an 8K boundary. + */ +#define HV_FAST_MEM_SYNC 0x32 + +/* Time of day services. + * + * The hypervisor maintains the time of day on a per-domain basis. + * Changing the time of day in one domain does not affect the time of + * day on any other domain. + * + * Time is described by a single unsigned 64-bit word which is the + * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1, + * 1970). + */ + +/* tod_get() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_TOD_GET + * RET0: status + * RET1: TOD + * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable + * ENOTSUPPORTED If TOD not supported on this platform + * + * Return the current time of day. May block if TOD access is + * temporarily not possible. + */ +#define HV_FAST_TOD_GET 0x50 + +/* tod_set() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_TOD_SET + * ARG0: TOD + * RET0: status + * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable + * ENOTSUPPORTED If TOD not supported on this platform + * + * The current time of day is set to the value specified in ARG0. May + * block if TOD access is temporarily not possible. + */ +#define HV_FAST_TOD_SET 0x51 + +/* Console services */ + +/* con_getchar() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CONS_GETCHAR + * RET0: status + * RET1: character + * ERRORS: EWOULDBLOCK No character available. + * + * Returns a character from the console device. If no character is + * available then an EWOULDBLOCK error is returned. If a character is + * available, then the returned status is EOK and the character value + * is in RET1. + * + * A virtual BREAK is represented by the 64-bit value -1. + * + * A virtual HUP signal is represented by the 64-bit value -2. + */ +#define HV_FAST_CONS_GETCHAR 0x60 + +/* con_putchar() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CONS_PUTCHAR + * ARG0: character + * RET0: status + * ERRORS: EINVAL Illegal character + * EWOULDBLOCK Output buffer currentl full, would block + * + * Send a character to the console device. Only character values + * between 0 and 255 may be used. Values outside this range are + * invalid except for the 64-bit value -1 which is used to send a + * virtual BREAK. + */ +#define HV_FAST_CONS_PUTCHAR 0x61 + +/* Trap trace services. + * + * The hypervisor provides a trap tracing capability for privileged + * code running on each virtual CPU. Privileged code provides a + * round-robin trap trace queue within which the hypervisor writes + * 64-byte entries detailing hyperprivileged traps taken n behalf of + * privileged code. This is provided as a debugging capability for + * privileged code. + * + * The trap trace control structure is 64-bytes long and placed at the + * start (offset 0) of the trap trace buffer, and is described as + * follows: + */ +#ifndef __ASSEMBLY__ +struct hv_trap_trace_control { + unsigned long head_offset; + unsigned long tail_offset; + unsigned long __reserved[0x30 / sizeof(unsigned long)]; +}; +#endif +#define HV_TRAP_TRACE_CTRL_HEAD_OFFSET 0x00 +#define HV_TRAP_TRACE_CTRL_TAIL_OFFSET 0x08 + +/* The head offset is the offset of the most recently completed entry + * in the trap-trace buffer. The tail offset is the offset of the + * next entry to be written. The control structure is owned and + * modified by the hypervisor. A guest may not modify the control + * structure contents. Attempts to do so will result in undefined + * behavior for the guest. + * + * Each trap trace buffer entry is layed out as follows: + */ +#ifndef __ASSEMBLY__ +struct hv_trap_trace_entry { + unsigned char type; /* Hypervisor or guest entry? */ + unsigned char hpstate; /* Hyper-privileged state */ + unsigned char tl; /* Trap level */ + unsigned char gl; /* Global register level */ + unsigned short tt; /* Trap type */ + unsigned short tag; /* Extended trap identifier */ + unsigned long tstate; /* Trap state */ + unsigned long tick; /* Tick */ + unsigned long tpc; /* Trap PC */ + unsigned long f1; /* Entry specific */ + unsigned long f2; /* Entry specific */ + unsigned long f3; /* Entry specific */ + unsigned long f4; /* Entry specific */ +}; +#endif +#define HV_TRAP_TRACE_ENTRY_TYPE 0x00 +#define HV_TRAP_TRACE_ENTRY_HPSTATE 0x01 +#define HV_TRAP_TRACE_ENTRY_TL 0x02 +#define HV_TRAP_TRACE_ENTRY_GL 0x03 +#define HV_TRAP_TRACE_ENTRY_TT 0x04 +#define HV_TRAP_TRACE_ENTRY_TAG 0x06 +#define HV_TRAP_TRACE_ENTRY_TSTATE 0x08 +#define HV_TRAP_TRACE_ENTRY_TICK 0x10 +#define HV_TRAP_TRACE_ENTRY_TPC 0x18 +#define HV_TRAP_TRACE_ENTRY_F1 0x20 +#define HV_TRAP_TRACE_ENTRY_F2 0x28 +#define HV_TRAP_TRACE_ENTRY_F3 0x30 +#define HV_TRAP_TRACE_ENTRY_F4 0x38 + +/* The type field is encoded as follows. */ +#define HV_TRAP_TYPE_UNDEF 0x00 /* Entry content undefined */ +#define HV_TRAP_TYPE_HV 0x01 /* Hypervisor trap entry */ +#define HV_TRAP_TYPE_GUEST 0xff /* Added via ttrace_addentry() */ + +/* ttrace_buf_conf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_TTRACE_BUF_CONF + * ARG0: real address + * ARG1: number of entries + * RET0: status + * RET1: number of entries + * ERRORS: ENORADDR Invalid real address + * EINVAL Size is too small + * EBADALIGN Real address not aligned on 64-byte boundary + * + * Requests hypervisor trap tracing and declares a virtual CPU's trap + * trace buffer to the hypervisor. The real address supplies the real + * base address of the trap trace queue and must be 64-byte aligned. + * Specifying a value of 0 for the number of entries disables trap + * tracing for the calling virtual CPU. The buffer allocated must be + * sized for a power of two number of 64-byte trap trace entries plus + * an initial 64-byte control structure. + * + * This may be invoked any number of times so that a virtual CPU may + * relocate a trap trace buffer or create "snapshots" of information. + * + * If the real address is illegal or badly aligned, then trap tracing + * is disabled and an error is returned. + * + * Upon failure with EINVAL, this service call returns in RET1 the + * minimum number of buffer entries required. Upon other failures + * RET1 is undefined. + */ +#define HV_FAST_TTRACE_BUF_CONF 0x90 + +/* ttrace_buf_info() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_TTRACE_BUF_INFO + * RET0: status + * RET1: real address + * RET2: size + * ERRORS: None defined. + * + * Returns the size and location of the previously declared trap-trace + * buffer. In the event that no buffer was previously defined, or the + * buffer is disabled, this call will return a size of zero bytes. + */ +#define HV_FAST_TTRACE_BUF_INFO 0x91 + +/* ttrace_enable() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_TTRACE_ENABLE + * ARG0: enable + * RET0: status + * RET1: previous enable state + * ERRORS: EINVAL No trap trace buffer currently defined + * + * Enable or disable trap tracing, and return the previous enabled + * state in RET1. Future systems may define various flags for the + * enable argument (ARG0), for the moment a guest should pass + * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all + * tracing - which will ensure future compatability. + */ +#define HV_FAST_TTRACE_ENABLE 0x92 + +/* ttrace_freeze() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_TTRACE_FREEZE + * ARG0: freeze + * RET0: status + * RET1: previous freeze state + * ERRORS: EINVAL No trap trace buffer currently defined + * + * Freeze or unfreeze trap tracing, returning the previous freeze + * state in RET1. A guest should pass a non-zero value to freeze and + * a zero value to unfreeze all tracing. The returned previous state + * is 0 for not frozen and 1 for frozen. + */ +#define HV_FAST_TTRACE_FREEZE 0x93 + +/* ttrace_addentry() + * TRAP: HV_TTRACE_ADDENTRY_TRAP + * ARG0: tag (16-bits) + * ARG1: data word 0 + * ARG2: data word 1 + * ARG3: data word 2 + * ARG4: data word 3 + * RET0: status + * ERRORS: EINVAL No trap trace buffer currently defined + * + * Add an entry to the trap trace buffer. Upon return only ARG0/RET0 + * is modified - none of the other registers holding arguments are + * volatile across this hypervisor service. + */ + +/* Core dump services. + * + * Since the hypervisor viraulizes and thus obscures a lot of the + * physical machine layout and state, traditional OS crash dumps can + * be difficult to diagnose especially when the problem is a + * configuration error of some sort. + * + * The dump services provide an opaque buffer into which the + * hypervisor can place it's internal state in order to assist in + * debugging such situations. The contents are opaque and extremely + * platform and hypervisor implementation specific. The guest, during + * a core dump, requests that the hypervisor update any information in + * the dump buffer in preparation to being dumped as part of the + * domain's memory image. + */ + +/* dump_buf_update() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_DUMP_BUF_UPDATE + * ARG0: real address + * ARG1: size + * RET0: status + * RET1: required size of dump buffer + * ERRORS: ENORADDR Invalid real address + * EBADALIGN Real address is not aligned on a 64-byte + * boundary + * EINVAL Size is non-zero but less than minimum size + * required + * ENOTSUPPORTED Operation not supported on current logical + * domain + * + * Declare a domain dump buffer to the hypervisor. The real address + * provided for the domain dump buffer must be 64-byte aligned. The + * size specifies the size of the dump buffer and may be larger than + * the minimum size specified in the machine description. The + * hypervisor will fill the dump buffer with opaque data. + * + * Note: A guest may elect to include dump buffer contents as part of a crash + * dump to assist with debugging. This function may be called any number + * of times so that a guest may relocate a dump buffer, or create + * "snapshots" of any dump-buffer information. Each call to + * dump_buf_update() atomically declares the new dump buffer to the + * hypervisor. + * + * A specified size of 0 unconfigures the dump buffer. If the real + * address is illegal or badly aligned, then any currently active dump + * buffer is disabled and an error is returned. + * + * In the event that the call fails with EINVAL, RET1 contains the + * minimum size requires by the hypervisor for a valid dump buffer. + */ +#define HV_FAST_DUMP_BUF_UPDATE 0x94 + +/* dump_buf_info() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_DUMP_BUF_INFO + * RET0: status + * RET1: real address of current dump buffer + * RET2: size of current dump buffer + * ERRORS: No errors defined. + * + * Return the currently configures dump buffer description. A + * returned size of 0 bytes indicates an undefined dump buffer. In + * this case the return address in RET1 is undefined. + */ +#define HV_FAST_DUMP_BUF_INFO 0x95 + +/* Device interrupt services. + * + * Device interrupts are allocated to system bus bridges by the hypervisor, + * and described to OBP in the machine description. OBP then describes + * these interrupts to the OS via properties in the device tree. + * + * Terminology: + * + * cpuid Unique opaque value which represents a target cpu. + * + * devhandle Device handle. It uniquely identifies a device, and + * consistes of the lower 28-bits of the hi-cell of the + * first entry of the device's "reg" property in the + * OBP device tree. + * + * devino Device interrupt number. Specifies the relative + * interrupt number within the device. The unique + * combination of devhandle and devino are used to + * identify a specific device interrupt. + * + * Note: The devino value is the same as the values in the + * "interrupts" property or "interrupt-map" property + * in the OBP device tree for that device. + * + * sysino System interrupt number. A 64-bit unsigned interger + * representing a unique interrupt within a virtual + * machine. + * + * intr_state A flag representing the interrupt state for a given + * sysino. The state values are defined below. + * + * intr_enabled A flag representing the 'enabled' state for a given + * sysino. The enable values are defined below. + */ + +#define HV_INTR_STATE_IDLE 0 /* Nothing pending */ +#define HV_INTR_STATE_RECEIVED 1 /* Interrupt received by hardware */ +#define HV_INTR_STATE_DELIVERED 2 /* Interrupt delivered to queue */ + +#define HV_INTR_DISABLED 0 /* sysino not enabled */ +#define HV_INTR_ENABLED 1 /* sysino enabled */ + +/* intr_devino_to_sysino() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_INTR_DEVINO2SYSINO + * ARG0: devhandle + * ARG1: devino + * RET0: status + * RET1: sysino + * ERRORS: EINVAL Invalid devhandle/devino + * + * Converts a device specific interrupt number of the given + * devhandle/devino into a system specific ino (sysino). + */ +#define HV_FAST_INTR_DEVINO2SYSINO 0xa0 + +/* intr_getenabled() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_INTR_GETENABLED + * ARG0: sysino + * RET0: status + * RET1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) + * ERRORS: EINVAL Invalid sysino + * + * Returns interrupt enabled state in RET1 for the interrupt defined + * by the given sysino. + */ +#define HV_FAST_INTR_GETENABLED 0xa1 + +/* intr_setenabled() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_INTR_SETENABLED + * ARG0: sysino + * ARG1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) + * RET0: status + * ERRORS: EINVAL Invalid sysino or intr_enabled value + * + * Set the 'enabled' state of the interrupt sysino. + */ +#define HV_FAST_INTR_SETENABLED 0xa2 + +/* intr_getstate() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_INTR_GETSTATE + * ARG0: sysino + * RET0: status + * RET1: intr_state (HV_INTR_STATE_*) + * ERRORS: EINVAL Invalid sysino + * + * Returns current state of the interrupt defined by the given sysino. + */ +#define HV_FAST_INTR_GETSTATE 0xa3 + +/* intr_setstate() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_INTR_SETSTATE + * ARG0: sysino + * ARG1: intr_state (HV_INTR_STATE_*) + * RET0: status + * ERRORS: EINVAL Invalid sysino or intr_state value + * + * Sets the current state of the interrupt described by the given sysino + * value. + * + * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending + * interrupt for sysino. + */ +#define HV_FAST_INTR_SETSTATE 0xa4 + +/* intr_gettarget() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_INTR_GETTARGET + * ARG0: sysino + * RET0: status + * RET1: cpuid + * ERRORS: EINVAL Invalid sysino + * + * Returns CPU that is the current target of the interrupt defined by + * the given sysino. The CPU value returned is undefined if the target + * has not been set via intr_settarget(). + */ +#define HV_FAST_INTR_GETTARGET 0xa5 + +/* intr_settarget() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_INTR_SETTARGET + * ARG0: sysino + * ARG1: cpuid + * RET0: status + * ERRORS: EINVAL Invalid sysino + * ENOCPU Invalid cpuid + * + * Set the target CPU for the interrupt defined by the given sysino. + */ +#define HV_FAST_INTR_SETTARGET 0xa6 + +/* PCI IO services. + * + * See the terminology descriptions in the device interrupt services + * section above as those apply here too. Here are terminology + * definitions specific to these PCI IO services: + * + * tsbnum TSB number. Indentifies which io-tsb is used. + * For this version of the specification, tsbnum + * must be zero. + * + * tsbindex TSB index. Identifies which entry in the TSB + * is used. The first entry is zero. + * + * tsbid A 64-bit aligned data structure which contains + * a tsbnum and a tsbindex. Bits 63:32 contain the + * tsbnum and bits 31:00 contain the tsbindex. + * + * io_attributes IO attributes for IOMMU mappings. One of more + * of the attritbute bits are stores in a 64-bit + * value. The values are defined below. + * + * r_addr 64-bit real address + * + * pci_device PCI device address. A PCI device address identifies + * a specific device on a specific PCI bus segment. + * A PCI device address ia a 32-bit unsigned integer + * with the following format: + * + * 00000000.bbbbbbbb.dddddfff.00000000 + * + * Use the HV_PCI_DEVICE_BUILD() macro to construct + * such values. + * + * pci_config_offset + * PCI configureation space offset. For conventional + * PCI a value between 0 and 255. For extended + * configuration space, a value between 0 and 4095. + * + * Note: For PCI configuration space accesses, the offset + * must be aligned to the access size. + * + * error_flag A return value which specifies if the action succeeded + * or failed. 0 means no error, non-0 means some error + * occurred while performing the service. + * + * io_sync_direction + * Direction definition for pci_dma_sync(), defined + * below in HV_PCI_SYNC_*. + * + * io_page_list A list of io_page_addresses, an io_page_address is + * a real address. + * + * io_page_list_p A pointer to an io_page_list. + * + * "size based byte swap" - Some functions do size based byte swapping + * which allows sw to access pointers and + * counters in native form when the processor + * operates in a different endianness than the + * IO bus. Size-based byte swapping converts a + * multi-byte field between big-endian and + * little-endian format. + */ + +#define HV_PCI_MAP_ATTR_READ 0x01 +#define HV_PCI_MAP_ATTR_WRITE 0x02 + +#define HV_PCI_DEVICE_BUILD(b,d,f) \ + ((((b) & 0xff) << 16) | \ + (((d) & 0x1f) << 11) | \ + (((f) & 0x07) << 8)) + +#define HV_PCI_SYNC_FOR_DEVICE 0x01 +#define HV_PCI_SYNC_FOR_CPU 0x02 + +/* pci_iommu_map() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOMMU_MAP + * ARG0: devhandle + * ARG1: tsbid + * ARG2: #ttes + * ARG3: io_attributes + * ARG4: io_page_list_p + * RET0: status + * RET1: #ttes mapped + * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex/io_attributes + * EBADALIGN Improperly aligned real address + * ENORADDR Invalid real address + * + * Create IOMMU mappings in the sun4v device defined by the given + * devhandle. The mappings are created in the TSB defined by the + * tsbnum component of the given tsbid. The first mapping is created + * in the TSB i ndex defined by the tsbindex component of the given tsbid. + * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex, + * the second at tsbnum, tsbindex + 1, etc. + * + * All mappings are created with the attributes defined by the io_attributes + * argument. The page mapping addresses are described in the io_page_list + * defined by the given io_page_list_p, which is a pointer to the io_page_list. + * The first entry in the io_page_list is the address for the first iotte, the + * 2nd for the 2nd iotte, and so on. + * + * Each io_page_address in the io_page_list must be appropriately aligned. + * #ttes must be greater than zero. For this version of the spec, the tsbnum + * component of the given tsbid must be zero. + * + * Returns the actual number of mappings creates, which may be less than + * or equal to the argument #ttes. If the function returns a value which + * is less than the #ttes, the caller may continus to call the function with + * an updated tsbid, #ttes, io_page_list_p arguments until all pages are + * mapped. + * + * Note: This function does not imply an iotte cache flush. The guest must + * demap an entry before re-mapping it. + */ +#define HV_FAST_PCI_IOMMU_MAP 0xb0 + +/* pci_iommu_demap() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOMMU_DEMAP + * ARG0: devhandle + * ARG1: tsbid + * ARG2: #ttes + * RET0: status + * RET1: #ttes demapped + * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex + * + * Demap and flush IOMMU mappings in the device defined by the given + * devhandle. Demaps up to #ttes entries in the TSB defined by the tsbnum + * component of the given tsbid, starting at the TSB index defined by the + * tsbindex component of the given tsbid. + * + * For this version of the spec, the tsbnum of the given tsbid must be zero. + * #ttes must be greater than zero. + * + * Returns the actual number of ttes demapped, which may be less than or equal + * to the argument #ttes. If #ttes demapped is less than #ttes, the caller + * may continue to call this function with updated tsbid and #ttes arguments + * until all pages are demapped. + * + * Note: Entries do not have to be mapped to be demapped. A demap of an + * unmapped page will flush the entry from the tte cache. + */ +#define HV_FAST_PCI_IOMMU_DEMAP 0xb1 + +/* pci_iommu_getmap() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOMMU_GETMAP + * ARG0: devhandle + * ARG1: tsbid + * RET0: status + * RET1: io_attributes + * RET2: real address + * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex + * ENOMAP Mapping is not valid, no translation exists + * + * Read and return the mapping in the device described by the given devhandle + * and tsbid. If successful, the io_attributes shall be returned in RET1 + * and the page address of the mapping shall be returned in RET2. + * + * For this version of the spec, the tsbnum component of the given tsbid + * must be zero. + */ +#define HV_FAST_PCI_IOMMU_GETMAP 0xb2 + +/* pci_iommu_getbypass() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOMMU_GETBYPASS + * ARG0: devhandle + * ARG1: real address + * ARG2: io_attributes + * RET0: status + * RET1: io_addr + * ERRORS: EINVAL Invalid devhandle/io_attributes + * ENORADDR Invalid real address + * ENOTSUPPORTED Function not supported in this implementation. + * + * Create a "special" mapping in the device described by the given devhandle, + * for the given real address and attributes. Return the IO address in RET1 + * if successful. + */ +#define HV_FAST_PCI_IOMMU_GETBYPASS 0xb3 + +/* pci_config_get() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_CONFIG_GET + * ARG0: devhandle + * ARG1: pci_device + * ARG2: pci_config_offset + * ARG3: size + * RET0: status + * RET1: error_flag + * RET2: data + * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size + * EBADALIGN pci_config_offset not size aligned + * ENOACCESS Access to this offset is not permitted + * + * Read PCI configuration space for the adapter described by the given + * devhandle. Read size (1, 2, or 4) bytes of data from the given + * pci_device, at pci_config_offset from the beginning of the device's + * configuration space. If there was no error, RET1 is set to zero and + * RET2 is set to the data read. Insignificant bits in RET2 are not + * guarenteed to have any specific value and therefore must be ignored. + * + * The data returned in RET2 is size based byte swapped. + * + * If an error occurs during the read, set RET1 to a non-zero value. The + * given pci_config_offset must be 'size' aligned. + */ +#define HV_FAST_PCI_CONFIG_GET 0xb4 + +/* pci_config_put() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_CONFIG_PUT + * ARG0: devhandle + * ARG1: pci_device + * ARG2: pci_config_offset + * ARG3: size + * ARG4: data + * RET0: status + * RET1: error_flag + * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size + * EBADALIGN pci_config_offset not size aligned + * ENOACCESS Access to this offset is not permitted + * + * Write PCI configuration space for the adapter described by the given + * devhandle. Write size (1, 2, or 4) bytes of data in a single operation, + * at pci_config_offset from the beginning of the device's configuration + * space. The data argument contains the data to be written to configuration + * space. Prior to writing, the data is size based byte swapped. + * + * If an error occurs during the write access, do not generate an error + * report, do set RET1 to a non-zero value. Otherwise RET1 is zero. + * The given pci_config_offset must be 'size' aligned. + * + * This function is permitted to read from offset zero in the configuration + * space described by the given pci_device if necessary to ensure that the + * write access to config space completes. + */ +#define HV_FAST_PCI_CONFIG_PUT 0xb5 + +/* pci_peek() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_PEEK + * ARG0: devhandle + * ARG1: real address + * ARG2: size + * RET0: status + * RET1: error_flag + * RET2: data + * ERRORS: EINVAL Invalid devhandle or size + * EBADALIGN Improperly aligned real address + * ENORADDR Bad real address + * ENOACCESS Guest access prohibited + * + * Attempt to read the IO address given by the given devhandle, real address, + * and size. Size must be 1, 2, 4, or 8. The read is performed as a single + * access operation using the given size. If an error occurs when reading + * from the given location, do not generate an error report, but return a + * non-zero value in RET1. If the read was successful, return zero in RET1 + * and return the actual data read in RET2. The data returned is size based + * byte swapped. + * + * Non-significant bits in RET2 are not guarenteed to have any specific value + * and therefore must be ignored. If RET1 is returned as non-zero, the data + * value is not guarenteed to have any specific value and should be ignored. + * + * The caller must have permission to read from the given devhandle, real + * address, which must be an IO address. The argument real address must be a + * size aligned address. + * + * The hypervisor implementation of this function must block access to any + * IO address that the guest does not have explicit permission to access. + */ +#define HV_FAST_PCI_PEEK 0xb6 + +/* pci_poke() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_POKE + * ARG0: devhandle + * ARG1: real address + * ARG2: size + * ARG3: data + * ARG4: pci_device + * RET0: status + * RET1: error_flag + * ERRORS: EINVAL Invalid devhandle, size, or pci_device + * EBADALIGN Improperly aligned real address + * ENORADDR Bad real address + * ENOACCESS Guest access prohibited + * ENOTSUPPORTED Function is not supported by implementation + * + * Attempt to write data to the IO address given by the given devhandle, + * real address, and size. Size must be 1, 2, 4, or 8. The write is + * performed as a single access operation using the given size. Prior to + * writing the data is size based swapped. + * + * If an error occurs when writing to the given location, do not generate an + * error report, but return a non-zero value in RET1. If the write was + * successful, return zero in RET1. + * + * pci_device describes the configuration address of the device being + * written to. The implementation may safely read from offset 0 with + * the configuration space of the device described by devhandle and + * pci_device in order to guarantee that the write portion of the operation + * completes + * + * Any error that occurs due to the read shall be reported using the normal + * error reporting mechanisms .. the read error is not suppressed. + * + * The caller must have permission to write to the given devhandle, real + * address, which must be an IO address. The argument real address must be a + * size aligned address. The caller must have permission to read from + * the given devhandle, pci_device cofiguration space offset 0. + * + * The hypervisor implementation of this function must block access to any + * IO address that the guest does not have explicit permission to access. + */ +#define HV_FAST_PCI_POKE 0xb7 + +/* pci_dma_sync() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_DMA_SYNC + * ARG0: devhandle + * ARG1: real address + * ARG2: size + * ARG3: io_sync_direction + * RET0: status + * RET1: #synced + * ERRORS: EINVAL Invalid devhandle or io_sync_direction + * ENORADDR Bad real address + * + * Synchronize a memory region described by the given real address and size, + * for the device defined by the given devhandle using the direction(s) + * defined by the given io_sync_direction. The argument size is the size of + * the memory region in bytes. + * + * Return the actual number of bytes synchronized in the return value #synced, + * which may be less than or equal to the argument size. If the return + * value #synced is less than size, the caller must continue to call this + * function with updated real address and size arguments until the entire + * memory region is synchronized. + */ +#define HV_FAST_PCI_DMA_SYNC 0xb8 + +/* PCI MSI services. */ + +#define HV_MSITYPE_MSI32 0x00 +#define HV_MSITYPE_MSI64 0x01 + +#define HV_MSIQSTATE_IDLE 0x00 +#define HV_MSIQSTATE_ERROR 0x01 + +#define HV_MSIQ_INVALID 0x00 +#define HV_MSIQ_VALID 0x01 + +#define HV_MSISTATE_IDLE 0x00 +#define HV_MSISTATE_DELIVERED 0x01 + +#define HV_MSIVALID_INVALID 0x00 +#define HV_MSIVALID_VALID 0x01 + +#define HV_PCIE_MSGTYPE_PME_MSG 0x18 +#define HV_PCIE_MSGTYPE_PME_ACK_MSG 0x1b +#define HV_PCIE_MSGTYPE_CORR_MSG 0x30 +#define HV_PCIE_MSGTYPE_NONFATAL_MSG 0x31 +#define HV_PCIE_MSGTYPE_FATAL_MSG 0x33 + +#define HV_MSG_INVALID 0x00 +#define HV_MSG_VALID 0x01 + +/* pci_msiq_conf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSIQ_CONF + * ARG0: devhandle + * ARG1: msiqid + * ARG2: real address + * ARG3: number of entries + * RET0: status + * ERRORS: EINVAL Invalid devhandle, msiqid or nentries + * EBADALIGN Improperly aligned real address + * ENORADDR Bad real address + * + * Configure the MSI queue given by the devhandle and msiqid arguments, + * and to be placed at the given real address and be of the given + * number of entries. The real address must be aligned exactly to match + * the queue size. Each queue entry is 64-bytes long, so f.e. a 32 entry + * queue must be aligned on a 2048 byte real address boundary. The MSI-EQ + * Head and Tail are initialized so that the MSI-EQ is 'empty'. + * + * Implementation Note: Certain implementations have fixed sized queues. In + * that case, number of entries must contain the correct + * value. + */ +#define HV_FAST_PCI_MSIQ_CONF 0xc0 + +/* pci_msiq_info() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSIQ_INFO + * ARG0: devhandle + * ARG1: msiqid + * RET0: status + * RET1: real address + * RET2: number of entries + * ERRORS: EINVAL Invalid devhandle or msiqid + * + * Return the configuration information for the MSI queue described + * by the given devhandle and msiqid. The base address of the queue + * is returned in ARG1 and the number of entries is returned in ARG2. + * If the queue is unconfigured, the real address is undefined and the + * number of entries will be returned as zero. + */ +#define HV_FAST_PCI_MSIQ_INFO 0xc1 + +/* pci_msiq_getvalid() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID + * ARG0: devhandle + * ARG1: msiqid + * RET0: status + * RET1: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) + * ERRORS: EINVAL Invalid devhandle or msiqid + * + * Get the valid state of the MSI-EQ described by the given devhandle and + * msiqid. + */ +#define HV_FAST_PCI_MSIQ_GETVALID 0xc2 + +/* pci_msiq_setvalid() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSIQ_SETVALID + * ARG0: devhandle + * ARG1: msiqid + * ARG2: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) + * RET0: status + * ERRORS: EINVAL Invalid devhandle or msiqid or msiqvalid + * value or MSI EQ is uninitialized + * + * Set the valid state of the MSI-EQ described by the given devhandle and + * msiqid to the given msiqvalid. + */ +#define HV_FAST_PCI_MSIQ_SETVALID 0xc3 + +/* pci_msiq_getstate() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSIQ_GETSTATE + * ARG0: devhandle + * ARG1: msiqid + * RET0: status + * RET1: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) + * ERRORS: EINVAL Invalid devhandle or msiqid + * + * Get the state of the MSI-EQ described by the given devhandle and + * msiqid. + */ +#define HV_FAST_PCI_MSIQ_GETSTATE 0xc4 + +/* pci_msiq_getvalid() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID + * ARG0: devhandle + * ARG1: msiqid + * ARG2: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) + * RET0: status + * ERRORS: EINVAL Invalid devhandle or msiqid or msiqstate + * value or MSI EQ is uninitialized + * + * Set the state of the MSI-EQ described by the given devhandle and + * msiqid to the given msiqvalid. + */ +#define HV_FAST_PCI_MSIQ_SETSTATE 0xc5 + +/* pci_msiq_gethead() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSIQ_GETHEAD + * ARG0: devhandle + * ARG1: msiqid + * RET0: status + * RET1: msiqhead + * ERRORS: EINVAL Invalid devhandle or msiqid + * + * Get the current MSI EQ queue head for the MSI-EQ described by the + * given devhandle and msiqid. + */ +#define HV_FAST_PCI_MSIQ_GETHEAD 0xc6 + +/* pci_msiq_sethead() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSIQ_SETHEAD + * ARG0: devhandle + * ARG1: msiqid + * ARG2: msiqhead + * RET0: status + * ERRORS: EINVAL Invalid devhandle or msiqid or msiqhead, + * or MSI EQ is uninitialized + * + * Set the current MSI EQ queue head for the MSI-EQ described by the + * given devhandle and msiqid. + */ +#define HV_FAST_PCI_MSIQ_SETHEAD 0xc7 + +/* pci_msiq_gettail() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSIQ_GETTAIL + * ARG0: devhandle + * ARG1: msiqid + * RET0: status + * RET1: msiqtail + * ERRORS: EINVAL Invalid devhandle or msiqid + * + * Get the current MSI EQ queue tail for the MSI-EQ described by the + * given devhandle and msiqid. + */ +#define HV_FAST_PCI_MSIQ_GETTAIL 0xc8 + +/* pci_msi_getvalid() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSI_GETVALID + * ARG0: devhandle + * ARG1: msinum + * RET0: status + * RET1: msivalidstate + * ERRORS: EINVAL Invalid devhandle or msinum + * + * Get the current valid/enabled state for the MSI defined by the + * given devhandle and msinum. + */ +#define HV_FAST_PCI_MSI_GETVALID 0xc9 + +/* pci_msi_setvalid() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSI_SETVALID + * ARG0: devhandle + * ARG1: msinum + * ARG2: msivalidstate + * RET0: status + * ERRORS: EINVAL Invalid devhandle or msinum or msivalidstate + * + * Set the current valid/enabled state for the MSI defined by the + * given devhandle and msinum. + */ +#define HV_FAST_PCI_MSI_SETVALID 0xca + +/* pci_msi_getmsiq() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSI_GETMSIQ + * ARG0: devhandle + * ARG1: msinum + * RET0: status + * RET1: msiqid + * ERRORS: EINVAL Invalid devhandle or msinum or MSI is unbound + * + * Get the MSI EQ that the MSI defined by the given devhandle and + * msinum is bound to. + */ +#define HV_FAST_PCI_MSI_GETMSIQ 0xcb + +/* pci_msi_setmsiq() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSI_SETMSIQ + * ARG0: devhandle + * ARG1: msinum + * ARG2: msitype + * ARG3: msiqid + * RET0: status + * ERRORS: EINVAL Invalid devhandle or msinum or msiqid + * + * Set the MSI EQ that the MSI defined by the given devhandle and + * msinum is bound to. + */ +#define HV_FAST_PCI_MSI_SETMSIQ 0xcc + +/* pci_msi_getstate() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSI_GETSTATE + * ARG0: devhandle + * ARG1: msinum + * RET0: status + * RET1: msistate + * ERRORS: EINVAL Invalid devhandle or msinum + * + * Get the state of the MSI defined by the given devhandle and msinum. + * If not initialized, return HV_MSISTATE_IDLE. + */ +#define HV_FAST_PCI_MSI_GETSTATE 0xcd + +/* pci_msi_setstate() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSI_SETSTATE + * ARG0: devhandle + * ARG1: msinum + * ARG2: msistate + * RET0: status + * ERRORS: EINVAL Invalid devhandle or msinum or msistate + * + * Set the state of the MSI defined by the given devhandle and msinum. + */ +#define HV_FAST_PCI_MSI_SETSTATE 0xce + +/* pci_msg_getmsiq() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSG_GETMSIQ + * ARG0: devhandle + * ARG1: msgtype + * RET0: status + * RET1: msiqid + * ERRORS: EINVAL Invalid devhandle or msgtype + * + * Get the MSI EQ of the MSG defined by the given devhandle and msgtype. + */ +#define HV_FAST_PCI_MSG_GETMSIQ 0xd0 + +/* pci_msg_setmsiq() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSG_SETMSIQ + * ARG0: devhandle + * ARG1: msgtype + * ARG2: msiqid + * RET0: status + * ERRORS: EINVAL Invalid devhandle, msgtype, or msiqid + * + * Set the MSI EQ of the MSG defined by the given devhandle and msgtype. + */ +#define HV_FAST_PCI_MSG_SETMSIQ 0xd1 + +/* pci_msg_getvalid() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSG_GETVALID + * ARG0: devhandle + * ARG1: msgtype + * RET0: status + * RET1: msgvalidstate + * ERRORS: EINVAL Invalid devhandle or msgtype + * + * Get the valid/enabled state of the MSG defined by the given + * devhandle and msgtype. + */ +#define HV_FAST_PCI_MSG_GETVALID 0xd2 + +/* pci_msg_setvalid() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_MSG_SETVALID + * ARG0: devhandle + * ARG1: msgtype + * ARG2: msgvalidstate + * RET0: status + * ERRORS: EINVAL Invalid devhandle or msgtype or msgvalidstate + * + * Set the valid/enabled state of the MSG defined by the given + * devhandle and msgtype. + */ +#define HV_FAST_PCI_MSG_SETVALID 0xd3 + +/* Performance counter services. */ + +#define HV_PERF_JBUS_PERF_CTRL_REG 0x00 +#define HV_PERF_JBUS_PERF_CNT_REG 0x01 +#define HV_PERF_DRAM_PERF_CTRL_REG_0 0x02 +#define HV_PERF_DRAM_PERF_CNT_REG_0 0x03 +#define HV_PERF_DRAM_PERF_CTRL_REG_1 0x04 +#define HV_PERF_DRAM_PERF_CNT_REG_1 0x05 +#define HV_PERF_DRAM_PERF_CTRL_REG_2 0x06 +#define HV_PERF_DRAM_PERF_CNT_REG_2 0x07 +#define HV_PERF_DRAM_PERF_CTRL_REG_3 0x08 +#define HV_PERF_DRAM_PERF_CNT_REG_3 0x09 + +/* get_perfreg() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_GET_PERFREG + * ARG0: performance reg number + * RET0: status + * RET1: performance reg value + * ERRORS: EINVAL Invalid performance register number + * ENOACCESS No access allowed to performance counters + * + * Read the value of the given DRAM/JBUS performance counter/control register. + */ +#define HV_FAST_GET_PERFREG 0x100 + +/* set_perfreg() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_SET_PERFREG + * ARG0: performance reg number + * ARG1: performance reg value + * RET0: status + * ERRORS: EINVAL Invalid performance register number + * ENOACCESS No access allowed to performance counters + * + * Write the given performance reg value to the given DRAM/JBUS + * performance counter/control register. + */ +#define HV_FAST_SET_PERFREG 0x101 + +/* MMU statistics services. + * + * The hypervisor maintains MMU statistics and privileged code provides + * a buffer where these statistics can be collected. It is continually + * updated once configured. The layout is as follows: + */ +#ifndef __ASSEMBLY__ +struct hv_mmu_statistics { + unsigned long immu_tsb_hits_ctx0_8k_tte; + unsigned long immu_tsb_ticks_ctx0_8k_tte; + unsigned long immu_tsb_hits_ctx0_64k_tte; + unsigned long immu_tsb_ticks_ctx0_64k_tte; + unsigned long __reserved1[2]; + unsigned long immu_tsb_hits_ctx0_4mb_tte; + unsigned long immu_tsb_ticks_ctx0_4mb_tte; + unsigned long __reserved2[2]; + unsigned long immu_tsb_hits_ctx0_256mb_tte; + unsigned long immu_tsb_ticks_ctx0_256mb_tte; + unsigned long __reserved3[4]; + unsigned long immu_tsb_hits_ctxnon0_8k_tte; + unsigned long immu_tsb_ticks_ctxnon0_8k_tte; + unsigned long immu_tsb_hits_ctxnon0_64k_tte; + unsigned long immu_tsb_ticks_ctxnon0_64k_tte; + unsigned long __reserved4[2]; + unsigned long immu_tsb_hits_ctxnon0_4mb_tte; + unsigned long immu_tsb_ticks_ctxnon0_4mb_tte; + unsigned long __reserved5[2]; + unsigned long immu_tsb_hits_ctxnon0_256mb_tte; + unsigned long immu_tsb_ticks_ctxnon0_256mb_tte; + unsigned long __reserved6[4]; + unsigned long dmmu_tsb_hits_ctx0_8k_tte; + unsigned long dmmu_tsb_ticks_ctx0_8k_tte; + unsigned long dmmu_tsb_hits_ctx0_64k_tte; + unsigned long dmmu_tsb_ticks_ctx0_64k_tte; + unsigned long __reserved7[2]; + unsigned long dmmu_tsb_hits_ctx0_4mb_tte; + unsigned long dmmu_tsb_ticks_ctx0_4mb_tte; + unsigned long __reserved8[2]; + unsigned long dmmu_tsb_hits_ctx0_256mb_tte; + unsigned long dmmu_tsb_ticks_ctx0_256mb_tte; + unsigned long __reserved9[4]; + unsigned long dmmu_tsb_hits_ctxnon0_8k_tte; + unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte; + unsigned long dmmu_tsb_hits_ctxnon0_64k_tte; + unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte; + unsigned long __reserved10[2]; + unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte; + unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte; + unsigned long __reserved11[2]; + unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte; + unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte; + unsigned long __reserved12[4]; +}; +#endif + +/* mmustat_conf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMUSTAT_CONF + * ARG0: real address + * RET0: status + * RET1: real address + * ERRORS: ENORADDR Invalid real address + * EBADALIGN Real address not aligned on 64-byte boundary + * EBADTRAP API not supported on this processor + * + * Enable MMU statistic gathering using the buffer at the given real + * address on the current virtual CPU. The new buffer real address + * is given in ARG1, and the previously specified buffer real address + * is returned in RET1, or is returned as zero for the first invocation. + * + * If the passed in real address argument is zero, this will disable + * MMU statistic collection on the current virtual CPU. If an error is + * returned then no statistics are collected. + * + * The buffer contents should be initialized to all zeros before being + * given to the hypervisor or else the statistics will be meaningless. + */ +#define HV_FAST_MMUSTAT_CONF 0x102 + +/* mmustat_info() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMUSTAT_INFO + * RET0: status + * RET1: real address + * ERRORS: EBADTRAP API not supported on this processor + * + * Return the current state and real address of the currently configured + * MMU statistics buffer on the current virtual CPU. + */ +#define HV_FAST_MMUSTAT_INFO 0x103 + +/* Function numbers for HV_CORE_TRAP. */ +#define HV_CORE_VER 0x00 +#define HV_CORE_PUTCHAR 0x01 +#define HV_CORE_EXIT 0x02 + +#endif /* !(_SPARC64_HYPERVISOR_H) */ -- cgit v1.2.3 From 1633a53c79498455b16d051451f4e3f83ab4e7dd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:09:03 -0800 Subject: [SPARC64]: Add 'hypervisor' to ultra_tlb_type enumeration. Signed-off-by: David S. Miller --- include/asm-sparc64/spitfire.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h index 962638c9d12..23ad8a7987a 100644 --- a/include/asm-sparc64/spitfire.h +++ b/include/asm-sparc64/spitfire.h @@ -44,6 +44,7 @@ enum ultra_tlb_layout { spitfire = 0, cheetah = 1, cheetah_plus = 2, + hypervisor = 3, }; extern enum ultra_tlb_layout tlb_type; -- cgit v1.2.3 From 30ddbdb03339fc62480ddbff800a44066bb14455 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:11:17 -0800 Subject: [SPARC64]: Add Niagara init-store twin-load ASI defines. Signed-off-by: David S. Miller --- include/asm-sparc64/asi.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sparc64/asi.h b/include/asm-sparc64/asi.h index 534855660f2..4b36f5f4c8b 100644 --- a/include/asm-sparc64/asi.h +++ b/include/asm-sparc64/asi.h @@ -25,12 +25,16 @@ /* SpitFire and later extended ASIs. The "(III)" marker designates * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates - * Chip Multi Threading specific ASIs. + * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific + * ASIs, "(4V)" designates SUN4V specific ASIs. */ #define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ #define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ #define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ #define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ +#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load, + * secondary, user + */ #define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ #define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ #define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ @@ -137,6 +141,9 @@ #define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/ #define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */ #define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */ +#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load, + * primary, implicit + */ #define ASI_BLK_P 0xf0 /* Primary, blk ld/st */ #define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */ #define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */ -- cgit v1.2.3 From d398ee230f94a8ba386d8abb63f4fea129e4eaba Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:11:50 -0800 Subject: [SPARC64]: Sun4v specific ASI defines. Signed-off-by: David S. Miller --- include/asm-sparc64/asi.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/asi.h b/include/asm-sparc64/asi.h index 4b36f5f4c8b..662a21107ae 100644 --- a/include/asm-sparc64/asi.h +++ b/include/asm-sparc64/asi.h @@ -30,13 +30,22 @@ */ #define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */ #define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */ +#define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */ +#define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */ #define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/ #define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */ +#define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/ +#define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */ +#define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */ +#define ASI_MMU 0x21 /* (4V) MMU Context Registers */ #define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load, * secondary, user */ #define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */ +#define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */ +#define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */ #define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */ +#define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */ #define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */ #define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */ #define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */ -- cgit v1.2.3 From 277b6dd9600613b01f66cadef2f0065514fecf69 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:12:02 -0800 Subject: [SPARC64]: Sun4v scratchpad register layout. Signed-off-by: David S. Miller --- include/asm-sparc64/scratchpad.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 include/asm-sparc64/scratchpad.h (limited to 'include') diff --git a/include/asm-sparc64/scratchpad.h b/include/asm-sparc64/scratchpad.h new file mode 100644 index 00000000000..5e8b01fb334 --- /dev/null +++ b/include/asm-sparc64/scratchpad.h @@ -0,0 +1,14 @@ +#ifndef _SPARC64_SCRATCHPAD_H +#define _SPARC64_SCRATCHPAD_H + +/* Sun4v scratchpad registers, accessed via ASI_SCRATCHPAD. */ + +#define SCRATCHPAD_MMU_MISS 0x00 /* Shared with OBP - set by OBP */ +#define SCRATCHPAD_CPUID 0x08 /* Shared with OBP - set by hypervisor */ +#define SCRATCHPAD_UTSBREG1 0x10 +#define SCRATCHPAD_UTSBREG2 0x18 + /* 0x20 and 0x28, hypervisor only... */ +#define SCRATCHPAD_UNUSED1 0x30 +#define SCRATCHPAD_UNUSED2 0x38 /* Reserved for OBP */ + +#endif /* !(_SPARC64_SCRATCHPAD_H) */ -- cgit v1.2.3 From e1c21c4f476f2270c98aad1fe55e5f33e25f77f5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:12:14 -0800 Subject: [SPARC64]: Sun4v interrupt queue register definitions. Signed-off-by: David S. Miller --- include/asm-sparc64/intr_queue.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 include/asm-sparc64/intr_queue.h (limited to 'include') diff --git a/include/asm-sparc64/intr_queue.h b/include/asm-sparc64/intr_queue.h new file mode 100644 index 00000000000..206077dedc2 --- /dev/null +++ b/include/asm-sparc64/intr_queue.h @@ -0,0 +1,15 @@ +#ifndef _SPARC64_INTR_QUEUE_H +#define _SPARC64_INTR_QUEUE_H + +/* Sun4v interrupt queue registers, accessed via ASI_QUEUE. */ + +#define INTRQ_CPU_MONDO_HEAD 0x3c0 /* CPU mondo head */ +#define INTRQ_CPU_MONDO_TAIL 0x3c8 /* CPU mondo tail */ +#define INTRQ_DEVICE_MONDO_HEAD 0x3d0 /* Device mondo head */ +#define INTRQ_DEVICE_MONDO_TAIL 0x3d8 /* Device mondo tail */ +#define INTRQ_RESUM_MONDO_HEAD 0x3e0 /* Resumable error mondo head */ +#define INTRQ_RESUM_MONDO_TAIL 0x3e8 /* Resumable error mondo tail */ +#define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */ +#define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */ + +#endif /* !(_SPARC64_INTR_QUEUE_H) */ -- cgit v1.2.3 From d96b81533ba3d5775e45aee6986b2aa33c10801c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 15:40:53 -0800 Subject: [SPARC64]: Add sun4v case to __GET_CPUID() patch tables. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index c15514f82c3..4f28a85c104 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -68,6 +68,7 @@ struct cpuid_patch_entry { unsigned int cheetah_safari[4]; unsigned int cheetah_jbus[4]; unsigned int starfire[4]; + unsigned int sun4v[4]; }; extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; #endif @@ -79,6 +80,8 @@ extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; #define TRAP_BLOCK_SZ_SHIFT 6 +#include + #ifdef CONFIG_SMP #define __GET_CPUID(REG) \ @@ -105,6 +108,11 @@ extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; sllx REG, 9, REG; \ or REG, 0xd0, REG; \ lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ + /* sun4v implementation. */ \ + mov SCRATCHPAD_CPUID, REG; \ + nop; \ + ldxa [REG] ASI_SCRATCHPAD, REG; \ + nop; \ .previous; /* Clobbers TMP, current address space PGD phys address into DEST. */ -- cgit v1.2.3 From d619d7f11670f5b1cfca30e6645e44c8a6014820 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 23:59:38 -0800 Subject: [SPARC64]: Add define for "GL" field of sun4v %tstate register. Signed-off-by: David S. Miller --- include/asm-sparc64/pstate.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h index 29fb74aa805..1181d73315a 100644 --- a/include/asm-sparc64/pstate.h +++ b/include/asm-sparc64/pstate.h @@ -29,10 +29,11 @@ /* The V9 TSTATE Register (with SpitFire and Linux extensions). * * --------------------------------------------------------------- - * | Resv | CCR | ASI | %pil | PSTATE | Resv | CWP | + * | Resv | GL | CCR | ASI | %pil | PSTATE | Resv | CWP | * --------------------------------------------------------------- - * 63 40 39 32 31 24 23 20 19 8 7 5 4 0 + * 63 43 42 40 39 32 31 24 23 20 19 8 7 5 4 0 */ +#define TSTATE_GL _AC(0x0000070000000000,UL) /* Global reg level */ #define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */ #define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */ #define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */ -- cgit v1.2.3 From 936f482af1743141d637483ec10eb881537c26dc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Feb 2006 21:29:28 -0800 Subject: [SPARC64]: Add initial code to twiddle %gl on trap entry/exit. Instead of setting/clearing PSTATE_AG we have to change the %gl register value on sun4v. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 5 +++++ include/asm-sparc64/head.h | 4 ++++ 2 files changed, 9 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 4f28a85c104..8666440c89a 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -73,6 +73,11 @@ struct cpuid_patch_entry { extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; #endif +struct gl_1insn_patch_entry { + unsigned int addr; + unsigned int insn; +}; +extern struct gl_1insn_patch_entry __gl_1insn_patch, __gl_1insn_patch_end; #endif /* !(__ASSEMBLY__) */ #define TRAP_PER_CPU_THREAD 0x00 diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index 731c842f3d1..ff76c0981b6 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h @@ -4,6 +4,10 @@ #include + /* wrpr %g0, val, %gl */ +#define SET_GL(val) \ + .word 0xa1902000 | val + #define KERNBASE 0x400000 #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) -- cgit v1.2.3 From 314981ac7177a933319e3c071a5cf0a579205e6e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Feb 2006 21:59:03 -0800 Subject: [SPARC64]: Kill all %pstate changes in context switch code. They are totally unnecessary because: 1) Interrupts are already disabled when switch_to() runs. 2) We don't use hard-coded alternate globals any longer. This found a case in rtrap, which still assumed alternate global %g6 was current_thread_info(), and that is fixed by this changeset as well. Signed-off-by: David S. Miller --- include/asm-sparc64/system.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index 26c0807af3e..a18ec87a52c 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -213,7 +213,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ task_thread_info(next); \ __asm__ __volatile__( \ "mov %%g4, %%g7\n\t" \ - "wrpr %%g0, 0x95, %%pstate\n\t" \ "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ "rdpr %%wstate, %%o5\n\t" \ @@ -227,14 +226,10 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ "ldx [%%g6 + %3], %%o6\n\t" \ "ldub [%%g6 + %2], %%o5\n\t" \ "ldub [%%g6 + %4], %%o7\n\t" \ - "mov %%g6, %%l2\n\t" \ "wrpr %%o5, 0x0, %%wstate\n\t" \ "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ - "wrpr %%g0, 0x94, %%pstate\n\t" \ - "mov %%l2, %%g6\n\t" \ "ldx [%%g6 + %6], %%g4\n\t" \ - "wrpr %%g0, 0x96, %%pstate\n\t" \ "brz,pt %%o7, 1f\n\t" \ " mov %%g7, %0\n\t" \ "b,a ret_from_syscall\n\t" \ -- cgit v1.2.3 From 45fec05f805a113372c9a7ff4c653ac749f6921c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Feb 2006 22:27:28 -0800 Subject: [SPARC64]: Sanitize %pstate writes for sun4v. If we're just switching between different alternate global sets, nop it out on sun4v. Also, get rid of all of the alternate global save/restore in the OBP CIF trampoline code. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 8666440c89a..998145b9265 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -78,6 +78,12 @@ struct gl_1insn_patch_entry { unsigned int insn; }; extern struct gl_1insn_patch_entry __gl_1insn_patch, __gl_1insn_patch_end; + +struct gl_2insn_patch_entry { + unsigned int addr; + unsigned int insns[2]; +}; +extern struct gl_2insn_patch_entry __gl_2insn_patch, __gl_2insn_patch_end; #endif /* !(__ASSEMBLY__) */ #define TRAP_PER_CPU_THREAD 0x00 -- cgit v1.2.3 From d257d5da39a78b32721ca84b2ba7f461f2f7ed7f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Feb 2006 23:44:37 -0800 Subject: [SPARC64]: Initial sun4v TLB miss handling infrastructure. Things are a little tricky because, unlike sun4u, we have to: 1) do a hypervisor trap to do the TLB load. 2) do the TSB lookup calculations by hand Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 8 +++++++- include/asm-sparc64/tsb.h | 11 ++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 998145b9265..a3dc4afc4b2 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -6,6 +6,8 @@ #ifndef _SPARC64_CPUDATA_H #define _SPARC64_CPUDATA_H +#include + #ifndef __ASSEMBLY__ #include @@ -57,6 +59,9 @@ struct trap_per_cpu { /* D-cache line 2 */ unsigned long __pad2[4]; + +/* Dcache lines 3 and 4 */ + struct hv_fault_status fault_info; } __attribute__((aligned(64))); extern struct trap_per_cpu trap_block[NR_CPUS]; extern void init_cur_cpu_trap(void); @@ -88,8 +93,9 @@ extern struct gl_2insn_patch_entry __gl_2insn_patch, __gl_2insn_patch_end; #define TRAP_PER_CPU_THREAD 0x00 #define TRAP_PER_CPU_PGD_PADDR 0x08 +#define TRAP_PER_CPU_FAULT_INFO 0x20 -#define TRAP_BLOCK_SZ_SHIFT 6 +#define TRAP_BLOCK_SZ_SHIFT 7 #include diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index 44709cde561..7f3abc32c4d 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h @@ -53,6 +53,14 @@ * kernel image, so we don't play these games for swapper_tsb access. */ #ifndef __ASSEMBLY__ +struct tsb_ldquad_phys_patch_entry { + unsigned int addr; + unsigned int sun4u_insn; + unsigned int sun4v_insn; +}; +extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch, + __tsb_ldquad_phys_patch_end; + struct tsb_phys_patch_entry { unsigned int addr; unsigned int insn; @@ -61,9 +69,10 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; #endif #define TSB_LOAD_QUAD(TSB, REG) \ 661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \ - .section .tsb_phys_patch, "ax"; \ + .section .tsb_ldquad_phys_patch, "ax"; \ .word 661b; \ ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \ + ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \ .previous #define TSB_LOAD_TAG_HIGH(TSB, REG) \ -- cgit v1.2.3 From df7d6aec96ab98cb182dd5138a85bdc363a9bf0d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 00:00:16 -0800 Subject: [SPARC64]: Rename gl_{1,2}insn_patch --> sun4v_{1,2}insn_patch Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index a3dc4afc4b2..26b1dc9afbf 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -78,17 +78,20 @@ struct cpuid_patch_entry { extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; #endif -struct gl_1insn_patch_entry { +struct sun4v_1insn_patch_entry { unsigned int addr; unsigned int insn; }; -extern struct gl_1insn_patch_entry __gl_1insn_patch, __gl_1insn_patch_end; +extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, + __sun4v_1insn_patch_end; -struct gl_2insn_patch_entry { +struct sun4v_2insn_patch_entry { unsigned int addr; unsigned int insns[2]; }; -extern struct gl_2insn_patch_entry __gl_2insn_patch, __gl_2insn_patch_end; +extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, + __sun4v_2insn_patch_end; + #endif /* !(__ASSEMBLY__) */ #define TRAP_PER_CPU_THREAD 0x00 -- cgit v1.2.3 From 89a5264f065672a882f555228000614a6b2182b7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 21:15:41 -0800 Subject: [SPARC64]: asm/cpudata.h needs asm/asi.h For the expansion of __GET_CPUID() on SMP. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 26b1dc9afbf..8a171ad7727 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -7,6 +7,7 @@ #define _SPARC64_CPUDATA_H #include +#include #ifndef __ASSEMBLY__ @@ -130,9 +131,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ /* sun4v implementation. */ \ mov SCRATCHPAD_CPUID, REG; \ - nop; \ ldxa [REG] ASI_SCRATCHPAD, REG; \ nop; \ + nop; \ .previous; /* Clobbers TMP, current address space PGD phys address into DEST. */ -- cgit v1.2.3 From 481295f982b21b1dbe71cbf41d3a93028fee30d1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 21:51:08 -0800 Subject: [SPARC64]: Register per-cpu fault status area with sun4v hypervisor. Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 2b2ecd6104d..c42c06a37d1 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -437,6 +437,7 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, #define HAVE_ARCH_FB_UNMAPPED_AREA extern void pgtable_cache_init(void); +extern void sun4v_register_fault_status(void); #endif /* !(__ASSEMBLY__) */ -- cgit v1.2.3 From 8b11bd12aff76e02cdc2cbc9e439bba88d281223 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 22:13:05 -0800 Subject: [SPARC64]: Patch up mmu context register writes for sun4v. sun4v uses ASI_MMU instead of ASI_DMMU Signed-off-by: David S. Miller --- include/asm-sparc64/mmu_context.h | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 1d232678821..2760353591a 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -41,11 +41,16 @@ extern void smp_tsb_sync(struct mm_struct *mm); /* Set MMU context in the actual hardware. */ #define load_secondary_context(__mm) \ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" \ - "flush %%g6" \ - : /* No outputs */ \ - : "r" (CTX_HWBITS((__mm)->context)), \ - "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU)) + __asm__ __volatile__( \ + "\n661: stxa %0, [%1] %2\n" \ + " .section .sun4v_1insn_patch, \"ax\"\n" \ + " .word 661b\n" \ + " stxa %0, [%1] %3\n" \ + " .previous\n" \ + " flush %%g6\n" \ + : /* No outputs */ \ + : "r" (CTX_HWBITS((__mm)->context)), \ + "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU)) extern void __flush_tlb_mm(unsigned long, unsigned long); -- cgit v1.2.3 From 3bfd6f3e77f58479ec53aa91d0b078abbb4c0868 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 22:49:38 -0800 Subject: [SPARC64]: Fix some comment typos in asm/hypervisor.h Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index 9c8e453abe9..b4e0d52acd5 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -272,17 +272,19 @@ * EBADALIGN Base real address is not correctly aligned * for size. * - * Configure the given queue to be placed at the givem base real + * Configure the given queue to be placed at the given base real * address, with the given number of entries. The number of entries * must be a power of 2. The base real address must be aligned * exactly to match the queue size. Each queue entry is 64 bytes * long, so for example a 32 entry queue must be aligned on a 2048 * byte real address boundary. * - * The specified queue is unconfigured is number of entries is given as zero. + * The specified queue is unconfigured if the number of entries is given + * as zero. * * For the current version of this API service, the argument queue is defined * as follows: + * * queue description * ----- ------------------------- * 0x3c cpu mondo queue -- cgit v1.2.3 From 7202c55c5c57d2ad4611a751544c9368d7fba93a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 22:53:56 -0800 Subject: [SPARC64]: Add sun4v mondo queue bases to struct trap_per_cpu. Also, correct TRAP_PER_CPU_FAULT_INFO define, it should be 0x40 not 0x20. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 8a171ad7727..492314b5347 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -53,15 +53,18 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); */ struct thread_info; struct trap_per_cpu { -/* D-cache line 1 */ +/* D-cache line 1: Basic thread information */ struct thread_info *thread; unsigned long pgd_paddr; unsigned long __pad1[2]; -/* D-cache line 2 */ - unsigned long __pad2[4]; +/* D-cache line 2: Sun4V Mondo Queue pointers */ + unsigned long cpu_mondo_pa; + unsigned long dev_mondo_pa; + unsigned long resum_mondo_pa; + unsigned long nonresum_mondo_pa; -/* Dcache lines 3 and 4 */ +/* Dcache lines 3 and 4: Hypervisor Fault Status */ struct hv_fault_status fault_info; } __attribute__((aligned(64))); extern struct trap_per_cpu trap_block[NR_CPUS]; @@ -95,11 +98,15 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, #endif /* !(__ASSEMBLY__) */ -#define TRAP_PER_CPU_THREAD 0x00 -#define TRAP_PER_CPU_PGD_PADDR 0x08 -#define TRAP_PER_CPU_FAULT_INFO 0x20 +#define TRAP_PER_CPU_THREAD 0x00 +#define TRAP_PER_CPU_PGD_PADDR 0x08 +#define TRAP_PER_CPU_CPU_MONDO_PA 0x20 +#define TRAP_PER_CPU_DEV_MONDO_PA 0x28 +#define TRAP_PER_CPU_RESUM_MONDO_PA 0x30 +#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x38 +#define TRAP_PER_CPU_FAULT_INFO 0x40 -#define TRAP_BLOCK_SZ_SHIFT 7 +#define TRAP_BLOCK_SZ_SHIFT 7 #include -- cgit v1.2.3 From 5b0c0572fcd6204675c5f7ddfa572b5017f817dd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Feb 2006 02:53:50 -0800 Subject: [SPARC64]: Sun4v interrupt handling. Sun4v has 4 interrupt queues: cpu, device, resumable errors, and non-resumable errors. A set of head/tail offset pointers help maintain a work queue in physical memory. The entries are 64-bytes in size. Each queue is allocated then registered with the hypervisor as we bring cpus up. The two error queues each get a kernel side buffer that we use to quickly empty the main interrupt queue before we call up to C code to log the event and possibly take evasive action. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 492314b5347..7f0a74ec47f 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -53,16 +53,17 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); */ struct thread_info; struct trap_per_cpu { -/* D-cache line 1: Basic thread information */ +/* D-cache line 1: Basic thread information, cpu and device mondo queues */ struct thread_info *thread; unsigned long pgd_paddr; - unsigned long __pad1[2]; - -/* D-cache line 2: Sun4V Mondo Queue pointers */ unsigned long cpu_mondo_pa; unsigned long dev_mondo_pa; + +/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */ unsigned long resum_mondo_pa; + unsigned long resum_kernel_buf_pa; unsigned long nonresum_mondo_pa; + unsigned long nonresum_kernel_buf_pa; /* Dcache lines 3 and 4: Hypervisor Fault Status */ struct hv_fault_status fault_info; @@ -100,10 +101,12 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, #define TRAP_PER_CPU_THREAD 0x00 #define TRAP_PER_CPU_PGD_PADDR 0x08 -#define TRAP_PER_CPU_CPU_MONDO_PA 0x20 -#define TRAP_PER_CPU_DEV_MONDO_PA 0x28 -#define TRAP_PER_CPU_RESUM_MONDO_PA 0x30 -#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x38 +#define TRAP_PER_CPU_CPU_MONDO_PA 0x10 +#define TRAP_PER_CPU_DEV_MONDO_PA 0x18 +#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20 +#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28 +#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30 +#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38 #define TRAP_PER_CPU_FAULT_INFO 0x40 #define TRAP_BLOCK_SZ_SHIFT 7 @@ -188,6 +191,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, #else +#define __GET_CPUID(REG) \ + mov 0, REG; + /* Uniprocessor versions, we know the cpuid is zero. */ #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ sethi %hi(trap_block), DEST; \ -- cgit v1.2.3 From 1d2f1f90a1e004b0c1b8a73ed4394a93f09104b3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Feb 2006 16:41:20 -0800 Subject: [SPARC64]: Sun4v cross-call sending support. Technically the hypervisor call supports sending in a list of all cpus to get the cross-call, but I only pass in one cpu at a time for now. The multi-cpu support is there, just ifdef'd out so it's easy to enable or delete it later. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 7f0a74ec47f..338b0ca5b51 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -65,8 +65,16 @@ struct trap_per_cpu { unsigned long nonresum_mondo_pa; unsigned long nonresum_kernel_buf_pa; -/* Dcache lines 3 and 4: Hypervisor Fault Status */ +/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */ struct hv_fault_status fault_info; + +/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */ + unsigned long cpu_mondo_block_pa; + unsigned long cpu_list_pa; + unsigned long __pad1[2]; + +/* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */ + unsigned long __pad2[4]; } __attribute__((aligned(64))); extern struct trap_per_cpu trap_block[NR_CPUS]; extern void init_cur_cpu_trap(void); @@ -108,8 +116,10 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, #define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30 #define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38 #define TRAP_PER_CPU_FAULT_INFO 0x40 +#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0 +#define TRAP_PER_CPU_CPU_LIST_PA 0xc8 -#define TRAP_BLOCK_SZ_SHIFT 7 +#define TRAP_BLOCK_SZ_SHIFT 8 #include -- cgit v1.2.3 From d82ace7dc4073b090a55b9740700e32b9a9ae302 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 02:52:44 -0800 Subject: [SPARC64]: Detect sun4v early in boot process. We look for "SUNW,sun4v" in the 'compatible' property of the root OBP device tree node. Protect every %ver register access, to make sure it is not touched on sun4v, as %ver is hyperprivileged there. Lock kernel TLB entries using hypervisor calls instead of calls into OBP. Signed-off-by: David S. Miller --- include/asm-sparc64/head.h | 6 ++++++ include/asm-sparc64/oplib.h | 3 +++ 2 files changed, 9 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index ff76c0981b6..c4ac3e87aa5 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h @@ -24,6 +24,12 @@ #define PANTHER_IMPL 0x0019 /* Ultra-IV+ */ #define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */ +#define BRANCH_IF_SUN4V(tmp1,label) \ + sethi %hi(is_sun4v), %tmp1; \ + lduw [%tmp1 + %lo(is_sun4v)], %tmp1; \ + brnz,pn %tmp1, label; \ + nop + #define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ rdpr %ver, %tmp1; \ sethi %hi(__CHEETAH_ID), %tmp2; \ diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index 3c59b2693fb..2ea545b931b 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -39,6 +39,9 @@ extern int prom_stdin, prom_stdout; extern int prom_chosen_node; /* Helper values and strings in arch/sparc64/kernel/head.S */ +extern const char prom_peer_name[]; +extern const char prom_compatible_name[]; +extern const char prom_root_compatible[]; extern const char prom_finddev_name[]; extern const char prom_chosen_path[]; extern const char prom_getprop_name[]; -- cgit v1.2.3 From aa9143b9719c07fb6f1f6207790c9c5086ae07e7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 16:12:22 -0800 Subject: [SPARC64]: Implement sun4v TSB miss handlers. When we register a TSB with the hypervisor, so that it or hardware can handle TLB misses and do the TSB walk for us, the hypervisor traps down to these trap when it incurs a TSB miss. Processing is simple, we load the missing virtual address and context, and do a full page table walk. Signed-off-by: David S. Miller --- include/asm-sparc64/ttable.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index f912f52c0c7..972f913709a 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h @@ -180,6 +180,26 @@ #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl) #endif +#define SUN4V_ITSB_MISS \ + mov SCRATCHPAD_CPUID, %g1; \ + ldxa [%g1] ASI_SCRATCHPAD, %g2; \ + ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1;\ + sethi %hi(trap_block), %g5; \ + sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2; \ + or %g5, %lo(trap_block), %g5; \ + ba,pt %xcc, sun4v_itsb_miss; \ + add %g5, %g2, %g5; + +#define SUN4V_DTSB_MISS \ + mov SCRATCHPAD_CPUID, %g1; \ + ldxa [%g1] ASI_SCRATCHPAD, %g2; \ + ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1;\ + sethi %hi(trap_block), %g5; \ + sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2; \ + or %g5, %lo(trap_block), %g5; \ + ba,pt %xcc, sun4v_dtsb_miss; \ + add %g5, %g2, %g5; + /* Before touching these macros, you owe it to yourself to go and * see how arch/sparc64/kernel/winfixup.S works... -DaveM * -- cgit v1.2.3 From 618e9ed98aed924a1fc664eb6522db4a5e927043 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 17:21:53 -0800 Subject: [SPARC64]: Hypervisor TSB context switching. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 16 +++++++++------- include/asm-sparc64/mmu_context.h | 10 +++++++--- 2 files changed, 16 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 55e622711b9..473d990848e 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -4,6 +4,7 @@ #include #include #include +#include /* * For the 8k pagesize kernel, use only 10 hw context bits to optimize some @@ -101,13 +102,14 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte extern void tsb_flush(unsigned long ent, unsigned long tag); typedef struct { - unsigned long sparc64_ctx_val; - struct tsb *tsb; - unsigned long tsb_rss_limit; - unsigned long tsb_nentries; - unsigned long tsb_reg_val; - unsigned long tsb_map_vaddr; - unsigned long tsb_map_pte; + unsigned long sparc64_ctx_val; + struct tsb *tsb; + unsigned long tsb_rss_limit; + unsigned long tsb_nentries; + unsigned long tsb_reg_val; + unsigned long tsb_map_vaddr; + unsigned long tsb_map_pte; + struct hv_tsb_descr tsb_descr; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 2760353591a..eb660b1609c 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -22,14 +22,18 @@ extern void get_new_mmu_context(struct mm_struct *mm); extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm); -extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg, - unsigned long tsb_vaddr, unsigned long tsb_pte); +extern void __tsb_context_switch(unsigned long pgd_pa, + unsigned long tsb_reg, + unsigned long tsb_vaddr, + unsigned long tsb_pte, + unsigned long tsb_descr_pa); static inline void tsb_context_switch(struct mm_struct *mm) { __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val, mm->context.tsb_map_vaddr, - mm->context.tsb_map_pte); + mm->context.tsb_map_pte, + __pa(&mm->context.tsb_descr)); } extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags); -- cgit v1.2.3 From 5fe91cf6254c8f23d90efb5fc11fff57dd5ab8dd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 20:45:26 -0800 Subject: [SPARC]: Clean up idprom header files. Delete unused macros, and use fixed sized types in sparc32 header. Signed-off-by: David S. Miller --- include/asm-sparc/idprom.h | 26 +++++++++----------------- include/asm-sparc64/idprom.h | 12 +----------- 2 files changed, 10 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/asm-sparc/idprom.h b/include/asm-sparc/idprom.h index d856e640acd..59083ed8523 100644 --- a/include/asm-sparc/idprom.h +++ b/include/asm-sparc/idprom.h @@ -7,27 +7,19 @@ #ifndef _SPARC_IDPROM_H #define _SPARC_IDPROM_H -/* Offset into the EEPROM where the id PROM is located on the 4c */ -#define IDPROM_OFFSET 0x7d8 +#include -/* On sun4m; physical. */ -/* MicroSPARC(-II) does not decode 31rd bit, but it works. */ -#define IDPROM_OFFSET_M 0xfd8 - -struct idprom -{ - unsigned char id_format; /* Format identifier (always 0x01) */ - unsigned char id_machtype; /* Machine type */ - unsigned char id_ethaddr[6]; /* Hardware ethernet address */ - long id_date; /* Date of manufacture */ - unsigned int id_sernum:24; /* Unique serial number */ - unsigned char id_cksum; /* Checksum - xor of the data bytes */ - unsigned char reserved[16]; +struct idprom { + u8 id_format; /* Format identifier (always 0x01) */ + u8 id_machtype; /* Machine type */ + u8 id_ethaddr[6]; /* Hardware ethernet address */ + s32 id_date; /* Date of manufacture */ + u32 id_sernum:24; /* Unique serial number */ + u8 id_cksum; /* Checksum - xor of the data bytes */ + u8 reserved[16]; }; extern struct idprom *idprom; extern void idprom_init(void); -#define IDPROM_SIZE (sizeof(struct idprom)) - #endif /* !(_SPARC_IDPROM_H) */ diff --git a/include/asm-sparc64/idprom.h b/include/asm-sparc64/idprom.h index 701483c5465..77fbf987385 100644 --- a/include/asm-sparc64/idprom.h +++ b/include/asm-sparc64/idprom.h @@ -9,15 +9,7 @@ #include -/* Offset into the EEPROM where the id PROM is located on the 4c */ -#define IDPROM_OFFSET 0x7d8 - -/* On sun4m; physical. */ -/* MicroSPARC(-II) does not decode 31rd bit, but it works. */ -#define IDPROM_OFFSET_M 0xfd8 - -struct idprom -{ +struct idprom { u8 id_format; /* Format identifier (always 0x01) */ u8 id_machtype; /* Machine type */ u8 id_ethaddr[6]; /* Hardware ethernet address */ @@ -30,6 +22,4 @@ struct idprom extern struct idprom *idprom; extern void idprom_init(void); -#define IDPROM_SIZE (sizeof(struct idprom)) - #endif /* !(_SPARC_IDPROM_H) */ -- cgit v1.2.3 From 8f6a93a196ba6c569c3e8daa6e81cca7e3ba81b1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 21:32:07 -0800 Subject: [SPARC64]: Beginnings of SUN4V PCI controller support. Abstract out IOMMU operations so that we can have a different set of calls on sun4v, which needs to do things through hypervisor calls. Signed-off-by: David S. Miller --- include/asm-sparc64/pci.h | 56 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h index 89bd71b1c0d..7c5a589ea43 100644 --- a/include/asm-sparc64/pci.h +++ b/include/asm-sparc64/pci.h @@ -41,10 +41,26 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) struct pci_dev; +struct pci_iommu_ops { + void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *); + void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t); + dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int); + void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int); + int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int); + void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int); + void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int); + void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int); +}; + +extern struct pci_iommu_ops *pci_iommu_ops; + /* Allocate and map kernel buffer using consistent mode DMA for a device. * hwdev should be valid struct pci_dev pointer for PCI devices. */ -extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); +static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) +{ + return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle); +} /* Free and unmap a consistent DMA buffer. * cpu_addr is what was returned from pci_alloc_consistent, @@ -54,7 +70,10 @@ extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t * References to the memory and mappings associated with cpu_addr/dma_addr * past this call are illegal. */ -extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); +static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) +{ + return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle); +} /* Map a single buffer of the indicated size for DMA in streaming mode. * The 32-bit bus address to use is returned. @@ -62,7 +81,10 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, * Once the device is given the dma address, the device owns this memory * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. */ -extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); +static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) +{ + return pci_iommu_ops->map_single(hwdev, ptr, size, direction); +} /* Unmap a single streaming mode DMA translation. The dma_addr and size * must match what was provided for in a previous pci_map_single call. All @@ -71,7 +93,10 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ -extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); +static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) +{ + pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction); +} /* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */ #define pci_map_page(dev, page, off, size, dir) \ @@ -107,15 +132,19 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t * Device ownership issues as mentioned above for pci_map_single are * the same here. */ -extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction); +static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) +{ + return pci_iommu_ops->map_sg(hwdev, sg, nents, direction); +} /* Unmap a set of streaming mode DMA translations. * Again, cpu read rules concerning calls here are the same as for * pci_unmap_single() above. */ -extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nhwents, int direction); +static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction) +{ + pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction); +} /* Make physical memory consistent for a single * streaming mode DMA translation after a transfer. @@ -127,8 +156,10 @@ extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, * must first perform a pci_dma_sync_for_device, and then the * device again owns the buffer. */ -extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, - size_t size, int direction); +static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) +{ + pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction); +} static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, @@ -144,7 +175,10 @@ pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, * The same as pci_dma_sync_single_* but for a scatter-gather list, * same rules and usage. */ -extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); +static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) +{ + pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction); +} static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, -- cgit v1.2.3 From bade5622167181844cd4e60087971c1f949e149f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 22:05:54 -0800 Subject: [SPARC64]: More SUN4V PCI controller work. Add assembler file for PCI hypervisor calls. Setup basic skeleton of SUN4V PCI controller driver. Add 32-bit devhandle to PBM struct, as this is needed for hypervisor calls. Signed-off-by: David S. Miller --- include/asm-sparc64/pbm.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index dd35a2c7798..1396f110939 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -139,6 +139,9 @@ struct pci_pbm_info { /* Opaque 32-bit system bus Port ID. */ u32 portid; + /* Opaque 32-bit handle used for hypervisor calls. */ + u32 devhandle; + /* Chipset version information. */ int chip_type; #define PBM_CHIP_TYPE_SABRE 1 -- cgit v1.2.3 From dedacf623283cb24933ec9f7d5bf539f19173cd4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 22:26:34 -0800 Subject: [SPARC64]: Add HV_PCI_TSBID() macro. For constructing hypervisor PCI TSB IDs. Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index b4e0d52acd5..5d795ee5192 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -1300,6 +1300,9 @@ struct hv_trap_trace_entry { * a tsbnum and a tsbindex. Bits 63:32 contain the * tsbnum and bits 31:00 contain the tsbindex. * + * Use the HV_PCI_TSBID() macro to construct such + * values. + * * io_attributes IO attributes for IOMMU mappings. One of more * of the attritbute bits are stores in a 64-bit * value. The values are defined below. @@ -1354,6 +1357,9 @@ struct hv_trap_trace_entry { (((d) & 0x1f) << 11) | \ (((f) & 0x07) << 8)) +#define HV_PCI_TSBID(__tsb_num, __tsb_index) \ + ((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index))) + #define HV_PCI_SYNC_FOR_DEVICE 0x01 #define HV_PCI_SYNC_FOR_CPU 0x02 -- cgit v1.2.3 From 12eaa328f9fb2d3fcb5afb682c762690d05a3cd8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 10 Feb 2006 15:39:51 -0800 Subject: [SPARC64]: Use ASI_SCRATCHPAD address 0x0 properly. This is where the virtual address of the fault status area belongs. To set it up we don't make a hypervisor call, instead we call OBP's SUNW,set-trap-table with the real address of the fault status area as the second argument. And right before that call we write the virtual address into ASI_SCRATCHPAD vaddr 0x0. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 26 +++++++++++++------------- include/asm-sparc64/oplib.h | 1 + include/asm-sparc64/ttable.h | 34 +++++++++++++++++----------------- 3 files changed, 31 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 338b0ca5b51..5a970f5ed9b 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -156,13 +156,16 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, nop; \ .previous; -/* Clobbers TMP, current address space PGD phys address into DEST. */ -#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ +#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ __GET_CPUID(TMP) \ sethi %hi(trap_block), DEST; \ sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ or DEST, %lo(trap_block), DEST; \ add DEST, TMP, DEST; \ + +/* Clobbers TMP, current address space PGD phys address into DEST. */ +#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ @@ -175,11 +178,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, /* Clobbers TMP, loads DEST with current thread info pointer. */ #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ - __GET_CPUID(TMP) \ - sethi %hi(trap_block), DEST; \ - sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ - or DEST, %lo(trap_block), DEST; \ - ldx [DEST + TMP], DEST; + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + ldx [DEST + TRAP_PER_CPU_THREAD], DEST; /* Given the current thread info pointer in THR, load the per-cpu * area base of the current processor into DEST. REG1, REG2, and REG3 are @@ -201,13 +201,13 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, #else -#define __GET_CPUID(REG) \ - mov 0, REG; +#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + sethi %hi(trap_block), DEST; \ + or DEST, %lo(trap_block), DEST; \ /* Uniprocessor versions, we know the cpuid is zero. */ #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ - sethi %hi(trap_block), DEST; \ - or DEST, %lo(trap_block), DEST; \ + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ @@ -215,8 +215,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, or DEST, %lo(__irq_work), DEST; #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ - sethi %hi(trap_block), DEST; \ - ldx [DEST + %lo(trap_block)], DEST; + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + ldx [DEST + TRAP_PER_CPU_THREAD], DEST; /* No per-cpu areas on uniprocessor, so no need to load DEST. */ #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index 2ea545b931b..ce5066ef2dd 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -338,6 +338,7 @@ int cpu_find_by_mid(int mid, int *prom_node); /* Client interface level routines. */ extern void prom_set_trap_table(unsigned long tba); +extern void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa); extern long p1275_cmd(const char *, long, ...); diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index 972f913709a..6bb86a7a5b4 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h @@ -180,25 +180,25 @@ #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl) #endif -#define SUN4V_ITSB_MISS \ - mov SCRATCHPAD_CPUID, %g1; \ - ldxa [%g1] ASI_SCRATCHPAD, %g2; \ - ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1;\ - sethi %hi(trap_block), %g5; \ - sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2; \ - or %g5, %lo(trap_block), %g5; \ - ba,pt %xcc, sun4v_itsb_miss; \ - add %g5, %g2, %g5; +#define SUN4V_ITSB_MISS \ + ldxa [%g0] ASI_SCRATCHPAD, %g2; \ + ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \ + ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \ + srlx %g4, 22, %g7; \ + sllx %g5, 48, %g6; \ + brz,pn %g5, kvmap_itlb_4v; \ + or %g6, %g7, %g6; \ + ba,a,pt %xcc, sun4v_itsb_miss; #define SUN4V_DTSB_MISS \ - mov SCRATCHPAD_CPUID, %g1; \ - ldxa [%g1] ASI_SCRATCHPAD, %g2; \ - ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1;\ - sethi %hi(trap_block), %g5; \ - sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2; \ - or %g5, %lo(trap_block), %g5; \ - ba,pt %xcc, sun4v_dtsb_miss; \ - add %g5, %g2, %g5; + ldxa [%g0] ASI_SCRATCHPAD, %g2; \ + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \ + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \ + srlx %g4, 22, %g7; \ + sllx %g5, 48, %g6; \ + brz,pn %g5, kvmap_dtlb_4v; \ + or %g6, %g7, %g6; \ + ba,a,pt %xcc, sun4v_dtsb_miss; /* Before touching these macros, you owe it to yourself to go and * see how arch/sparc64/kernel/winfixup.S works... -DaveM -- cgit v1.2.3 From 02fd473bd4844befc74f7ca67cd60891e0a2d890 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 02:25:21 -0800 Subject: [SPARC64]: Add SUN4V Hypervisor Console driver. Since it can do things like BREAK and HUP, we implement this as a serial uart driver. This still needs interrupt probing code, as I haven't figured out how interrupts will work or be probed for on SUN4V yet. Signed-off-by: David S. Miller --- include/linux/serial_core.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 4041122dabf..57abcea1cb5 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -127,6 +127,9 @@ /* Hilscher netx */ #define PORT_NETX 71 +/* SUN4V Hypervisor Console */ +#define PORT_SUNHV 72 + #ifdef __KERNEL__ #include -- cgit v1.2.3 From 459b6e621e0e15315c25bac47fa7113e5818d45d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 12:21:20 -0800 Subject: [SPARC64]: Fix some SUN4V TLB miss bugs. Code patching did not sign extend negative branch offsets correctly. Kernel TLB miss path needs patching and %g4 register preservation in order to handle SUN4V correctly. Signed-off-by: David S. Miller --- include/asm-sparc64/ttable.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index 6bb86a7a5b4..9e28b240f3a 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h @@ -186,19 +186,19 @@ ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \ srlx %g4, 22, %g7; \ sllx %g5, 48, %g6; \ - brz,pn %g5, kvmap_itlb_4v; \ + ba,pt %xcc, sun4v_itsb_miss; \ or %g6, %g7, %g6; \ - ba,a,pt %xcc, sun4v_itsb_miss; + nop; -#define SUN4V_DTSB_MISS \ +#define SUN4V_DTSB_MISS \ ldxa [%g0] ASI_SCRATCHPAD, %g2; \ ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \ ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \ srlx %g4, 22, %g7; \ sllx %g5, 48, %g6; \ - brz,pn %g5, kvmap_dtlb_4v; \ + ba,pt %xcc, sun4v_dtsb_miss; \ or %g6, %g7, %g6; \ - ba,a,pt %xcc, sun4v_dtsb_miss; + nop; /* Before touching these macros, you owe it to yourself to go and * see how arch/sparc64/kernel/winfixup.S works... -DaveM -- cgit v1.2.3 From 490384e752a43aa281ed533e9de2da36df25c337 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 14:41:18 -0800 Subject: [SPARC64]: Register kernel TSB with hypervisor. We do this right after we take over the trap table from OBP. Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index c42c06a37d1..a480007f0a9 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -438,6 +438,7 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, extern void pgtable_cache_init(void); extern void sun4v_register_fault_status(void); +extern void sun4v_ktsb_register(void); #endif /* !(__ASSEMBLY__) */ -- cgit v1.2.3 From c4bce90ea2069e5a87beac806de3090ab32128d5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 21:57:54 -0800 Subject: [SPARC64]: Deal with PTE layout differences in SUN4V. Yes, you heard it right, they changed the PTE layout for SUN4V. Ho hum... This is the simple and inefficient way to support this. It'll get optimized, don't worry. Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 266 ++++++++++++------------------------------ 1 file changed, 72 insertions(+), 194 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index a480007f0a9..bd8bce704a9 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -90,134 +90,48 @@ #endif /* !(__ASSEMBLY__) */ -/* Spitfire/Cheetah TTE bits. */ -#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ -#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date*/ -#define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */ -#define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */ -#define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */ -#define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */ -#define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ -#define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ -#define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ -#define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */ -#define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ -#define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ -#define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ -#define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ -#define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ -#define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */ -#define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */ -#define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */ -#define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ -#define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ -#define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */ -#define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */ -#define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */ -#define _PAGE_G _AC(0x0000000000000001,UL) /* Global */ - -#define _PAGE_ALL_SZ_BITS \ - (_PAGE_SZ4MB | _PAGE_SZ512K | _PAGE_SZ64K | \ - _PAGE_SZ8K | _PAGE_SZ32MB | _PAGE_SZ256MB) - -/* Here are the SpitFire software bits we use in the TTE's. - * - * WARNING: If you are going to try and start using some - * of the soft2 bits, you will need to make - * modifications to the swap entry implementation. - * For example, one thing that could happen is that - * swp_entry_to_pte() would BUG_ON() if you tried - * to use one of the soft2 bits for _PAGE_FILE. - * - * Like other architectures, I have aliased _PAGE_FILE with - * _PAGE_MODIFIED. This works because _PAGE_FILE is never - * interpreted that way unless _PAGE_PRESENT is clear. - */ -#define _PAGE_EXEC _AC(0x0000000000001000,UL) /* Executable SW bit */ -#define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */ -#define _PAGE_FILE _AC(0x0000000000000800,UL) /* Pagecache page */ -#define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ -#define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */ -#define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */ -#define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */ - -#if PAGE_SHIFT == 13 -#define _PAGE_SZBITS _PAGE_SZ8K -#elif PAGE_SHIFT == 16 -#define _PAGE_SZBITS _PAGE_SZ64K -#elif PAGE_SHIFT == 19 -#define _PAGE_SZBITS _PAGE_SZ512K -#elif PAGE_SHIFT == 22 -#define _PAGE_SZBITS _PAGE_SZ4MB -#else -#error Wrong PAGE_SHIFT specified -#endif - -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -#define _PAGE_SZHUGE _PAGE_SZ4MB -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define _PAGE_SZHUGE _PAGE_SZ512K -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define _PAGE_SZHUGE _PAGE_SZ64K -#endif - -#define _PAGE_CACHE (_PAGE_CP | _PAGE_CV) - -#define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W) -#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R) -#define __PRIV_BITS _PAGE_P - -#define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE) - -/* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */ -#define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ - __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC) - -#define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ - __ACCESS_BITS | _PAGE_EXEC) - -#define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ - __ACCESS_BITS | _PAGE_EXEC) - -#define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ - __PRIV_BITS | \ - __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC) - -#define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ - _PAGE_CACHE | \ - __ACCESS_BITS | _PAGE_WRITE) - -#define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ - _PAGE_CACHE | __ACCESS_BITS) - -#define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \ - _PAGE_CACHE | __ACCESS_BITS) - -#define _PFN_MASK _PAGE_PADDR - -#define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | \ - __ACCESS_BITS | _PAGE_E) - -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY_NOEXEC -#define __P010 PAGE_COPY_NOEXEC -#define __P011 PAGE_COPY_NOEXEC -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY_NOEXEC -#define __S010 PAGE_SHARED_NOEXEC -#define __S011 PAGE_SHARED_NOEXEC -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED +/* PTE bits which are the same in SUN4U and SUN4V format. */ +#define _PAGE_VALID 0x8000000000000000 /* Valid TTE */ +#define _PAGE_R 0x8000000000000000 /* Keep ref bit up to date*/ + +/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ +#define __P000 __pgprot(0) +#define __P001 __pgprot(0) +#define __P010 __pgprot(0) +#define __P011 __pgprot(0) +#define __P100 __pgprot(0) +#define __P101 __pgprot(0) +#define __P110 __pgprot(0) +#define __P111 __pgprot(0) + +#define __S000 __pgprot(0) +#define __S001 __pgprot(0) +#define __S010 __pgprot(0) +#define __S011 __pgprot(0) +#define __S100 __pgprot(0) +#define __S101 __pgprot(0) +#define __S110 __pgprot(0) +#define __S111 __pgprot(0) #ifndef __ASSEMBLY__ +extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); + +extern unsigned long pte_sz_bits(unsigned long size); + +extern pgprot_t PAGE_KERNEL; +extern pgprot_t PAGE_KERNEL_LOCKED; +extern pgprot_t PAGE_COPY; + +/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */ +extern unsigned long _PAGE_IE; +extern unsigned long _PAGE_E; +extern unsigned long _PAGE_CACHE; + +extern unsigned long pg_iobits; +extern unsigned long _PAGE_ALL_SZ_BITS; +extern unsigned long _PAGE_SZBITS; + extern unsigned long phys_base; extern unsigned long pfn_base; @@ -229,27 +143,12 @@ extern struct page *mem_map_zero; * the first physical page in the machine is at some huge physical address, * such as 4GB. This is common on a partitioned E10000, for example. */ - -#define pfn_pte(pfn, prot) \ - __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS) +extern pte_t pfn_pte(unsigned long, pgprot_t); #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +extern unsigned long pte_pfn(pte_t); +#define pte_page(x) pfn_to_page(pte_pfn(x)) +extern pte_t pte_modify(pte_t, pgprot_t); -#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT) -#define pte_page(x) pfn_to_page(pte_pfn(x)) - -static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) -{ - pte_t __pte; - const unsigned long preserve_mask = (_PFN_MASK | - _PAGE_MODIFIED | _PAGE_ACCESSED | - _PAGE_CACHE | _PAGE_E | - _PAGE_PRESENT | _PAGE_SZBITS); - - pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) | - (pgprot_val(new_prot) & ~preserve_mask); - - return __pte; -} #define pmd_set(pmdp, ptep) \ (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) #define pud_set(pudp, pmdp) \ @@ -259,8 +158,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) #define pud_page(pud) \ ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) -#define pte_none(pte) (!pte_val(pte)) -#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (0) #define pmd_present(pmd) (pmd_val(pmd) != 0U) @@ -270,30 +167,29 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) #define pud_present(pud) (pud_val(pud) != 0U) #define pud_clear(pudp) (pud_val(*(pudp)) = 0U) +/* Same in both SUN4V and SUN4U. */ +#define pte_none(pte) (!pte_val(pte)) + +extern unsigned long pte_present(pte_t); + /* The following only work if pte_present() is true. * Undefined behaviour if not.. */ -#define pte_read(pte) (pte_val(pte) & _PAGE_READ) -#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC) -#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) -#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED) -#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) -#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W))) -#define pte_rdprotect(pte) \ - (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ)) -#define pte_mkclean(pte) \ - (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W))) -#define pte_mkold(pte) \ - (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED)) - -/* Permanent address of a page. */ -#define __page_address(page) page_address(page) +extern unsigned long pte_read(pte_t); +extern unsigned long pte_exec(pte_t); +extern unsigned long pte_write(pte_t); +extern unsigned long pte_dirty(pte_t); +extern unsigned long pte_young(pte_t); +extern pte_t pte_wrprotect(pte_t); +extern pte_t pte_rdprotect(pte_t); +extern pte_t pte_mkclean(pte_t); +extern pte_t pte_mkold(pte_t); /* Be very careful when you change these three, they are delicate. */ -#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R)) -#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE)) -#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W)) -#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE)) +extern pte_t pte_mkyoung(pte_t); +extern pte_t pte_mkwrite(pte_t); +extern pte_t pte_mkdirty(pte_t); +extern pte_t pte_mkhuge(pte_t); /* to find an entry in a page-table-directory. */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) @@ -328,6 +224,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p /* It is more efficient to let flush_tlb_kernel_range() * handle init_mm tlb flushes. + * + * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U + * and SUN4V pte layout, so this inline test is fine. */ if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) tlb_batch_add(mm, addr, ptep, orig); @@ -362,42 +261,23 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) /* File offset in PTE support. */ -#define pte_file(pte) (pte_val(pte) & _PAGE_FILE) +extern unsigned long pte_file(pte_t); #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) -#define pgoff_to_pte(off) (__pte(((off) << PAGE_SHIFT) | _PAGE_FILE)) +extern pte_t pgoff_to_pte(unsigned long); #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) extern unsigned long prom_virt_to_phys(unsigned long, int *); -static __inline__ unsigned long -sun4u_get_pte (unsigned long addr) -{ - pgd_t *pgdp; - pud_t *pudp; - pmd_t *pmdp; - pte_t *ptep; - - if (addr >= PAGE_OFFSET) - return addr & _PAGE_PADDR; - if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) - return prom_virt_to_phys(addr, NULL); - pgdp = pgd_offset_k(addr); - pudp = pud_offset(pgdp, addr); - pmdp = pmd_offset(pudp, addr); - ptep = pte_offset_kernel(pmdp, addr); - return pte_val(*ptep) & _PAGE_PADDR; -} +extern unsigned long sun4u_get_pte(unsigned long); -static __inline__ unsigned long -__get_phys (unsigned long addr) +static inline unsigned long __get_phys(unsigned long addr) { - return sun4u_get_pte (addr); + return sun4u_get_pte(addr); } -static __inline__ int -__get_iospace (unsigned long addr) +static inline int __get_iospace(unsigned long addr) { - return ((sun4u_get_pte (addr) & 0xf0000000) >> 28); + return ((sun4u_get_pte(addr) & 0xf0000000) >> 28); } extern unsigned long *sparc64_valid_addr_bitmap; @@ -411,9 +291,7 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long size, pgprot_t prot); /* Clear virtual and physical cachability, set side-effect bit. */ -#define pgprot_noncached(prot) \ - (__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \ - _PAGE_E)) +extern pgprot_t pgprot_noncached(pgprot_t); /* * For sparc32&64, the pfn in io_remap_pfn_range() carries in -- cgit v1.2.3 From 1a7a242c898dd131f2df005c492e9b44fb8900e0 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 23:24:30 -0800 Subject: [SPARC64]: Recognize "virtual-console" as input and output console device. Signed-off-by: David S. Miller --- include/asm-sparc/oplib.h | 2 ++ include/asm-sparc64/oplib.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h index d0d76b30eb4..f283f8aaf6a 100644 --- a/include/asm-sparc/oplib.h +++ b/include/asm-sparc/oplib.h @@ -165,6 +165,7 @@ enum prom_input_device { PROMDEV_ITTYA, /* input from ttya */ PROMDEV_ITTYB, /* input from ttyb */ PROMDEV_IRSC, /* input from rsc */ + PROMDEV_IVCONS, /* input from virtual-console */ PROMDEV_I_UNK, }; @@ -177,6 +178,7 @@ enum prom_output_device { PROMDEV_OTTYA, /* to ttya */ PROMDEV_OTTYB, /* to ttyb */ PROMDEV_ORSC, /* to rsc */ + PROMDEV_OVCONS, /* to virtual-console */ PROMDEV_O_UNK, }; diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index ce5066ef2dd..0631d13475f 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -167,6 +167,7 @@ enum prom_input_device { PROMDEV_ITTYA, /* input from ttya */ PROMDEV_ITTYB, /* input from ttyb */ PROMDEV_IRSC, /* input from rsc */ + PROMDEV_IVCONS, /* input from virtual-console */ PROMDEV_I_UNK, }; @@ -179,6 +180,7 @@ enum prom_output_device { PROMDEV_OTTYA, /* to ttya */ PROMDEV_OTTYB, /* to ttyb */ PROMDEV_ORSC, /* to rsc */ + PROMDEV_OVCONS, /* to virtual-console */ PROMDEV_O_UNK, }; -- cgit v1.2.3 From ff02e0d26f139ad95ec3a7e94f88faccaa180dff Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 17:07:51 -0800 Subject: [SPARC64]: Move PTE field definitions back into asm/pgtable.h Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 88 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 86 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index bd8bce704a9..3c02d5d9a53 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -91,8 +91,92 @@ #endif /* !(__ASSEMBLY__) */ /* PTE bits which are the same in SUN4U and SUN4V format. */ -#define _PAGE_VALID 0x8000000000000000 /* Valid TTE */ -#define _PAGE_R 0x8000000000000000 /* Keep ref bit up to date*/ +#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ +#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ + +/* SUN4U pte bits... */ +#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */ +#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */ +#define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */ +#define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */ +#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */ +#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ +#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ +#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ +#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ +#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ +#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ +#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */ +#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */ +#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */ +#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */ +#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */ +#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */ +#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ +#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */ +#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */ +#define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */ +#define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */ +#define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ +#define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ +#define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */ +#define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */ +#define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */ + +/* SUN4V pte bits... */ +#define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */ +#define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */ +#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */ +#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */ +#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ +#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ +#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ +#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ +#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ +#define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */ +#define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */ +#define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */ +#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */ +#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */ +#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */ +#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */ +#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */ +#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */ +#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */ +#define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */ +#define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */ +#define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */ +#define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */ +#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */ +#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */ +#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ + +#if PAGE_SHIFT == 13 +#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V +#elif PAGE_SHIFT == 16 +#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V +#elif PAGE_SHIFT == 19 +#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V +#elif PAGE_SHIFT == 22 +#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U +#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V +#else +#error Wrong PAGE_SHIFT specified +#endif + +#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) +#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) +#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V +#endif /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ #define __P000 __pgprot(0) -- cgit v1.2.3 From cf627156c450cd5a0741b31f55181db3400d4887 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 21:10:07 -0800 Subject: [SPARC64]: Use inline patching for critical PTE operations. This handles the SUN4U vs SUN4V PTE layout differences with near zero performance cost. Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 488 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 485 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 3c02d5d9a53..00eecbb52f9 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -227,11 +227,493 @@ extern struct page *mem_map_zero; * the first physical page in the machine is at some huge physical address, * such as 4GB. This is common on a partitioned E10000, for example. */ -extern pte_t pfn_pte(unsigned long, pgprot_t); +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + unsigned long paddr = pfn << PAGE_SHIFT; + unsigned long sz_bits; + + BUILD_BUG_ON(!__builtin_constant_p(_PAGE_SZBITS_4U) || + !__builtin_constant_p(_PAGE_SZBITS_4V)); + + sz_bits = 0UL; + if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { + BUILD_BUG_ON((_PAGE_SZBITS_4U & ~(0xfffffc0000000000UL)) || + (_PAGE_SZBITS_4V & ~(0x0000000000000fffUL))); + __asm__ __volatile__( + "\n661: sethi %uhi(%1), %0\n" + " sllx %0, 32, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " mov %2, %0\n" + " nop\n" + " .previous\n" + : "=r" (sz_bits) + : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V)); + } + return __pte(paddr | sz_bits | pgprot_val(prot)); +} #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -extern unsigned long pte_pfn(pte_t); + +/* This one can be done with two shifts. */ +static inline unsigned long pte_pfn(pte_t pte) +{ + const unsigned long pte_paddr_shl_sun4u = 21; + const unsigned long pte_paddr_shr_sun4u = 21 + PAGE_SHIFT; + const unsigned long pte_paddr_shl_sun4v = 8; + const unsigned long pte_paddr_shr_sun4v = 8 + PAGE_SHIFT; + unsigned long ret; + + __asm__ __volatile__( + "\n661: sllx %1, %2, %0\n" + " srlx %0, %3, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sllx %1, %4, %0\n" + " srlx %0, %5, %0\n" + " .previous\n" + : "=r" (ret) + : "r" (pte_val(pte)), + "i" (pte_paddr_shl_sun4u), "i" (pte_paddr_shr_sun4u), + "i" (pte_paddr_shl_sun4v), "i" (pte_paddr_shr_sun4v)); + + return ret; +} #define pte_page(x) pfn_to_page(pte_pfn(x)) -extern pte_t pte_modify(pte_t, pgprot_t); + +static inline pte_t pte_modify(pte_t pte, pgprot_t prot) +{ + const unsigned long preserve_mask_sun4u = (_PAGE_PADDR_4U | + _PAGE_MODIFIED_4U | + _PAGE_ACCESSED_4U | + _PAGE_CP_4U | + _PAGE_CV_4U | + _PAGE_E_4U | + _PAGE_PRESENT_4U | + _PAGE_SZBITS_4U); + const unsigned long preserve_mask_sun4v = (_PAGE_PADDR_4V | + _PAGE_MODIFIED_4V | + _PAGE_ACCESSED_4V | + _PAGE_CP_4V | + _PAGE_CV_4V | + _PAGE_E_4V | + _PAGE_PRESENT_4V | + _PAGE_SZBITS_4V); + unsigned long mask, tmp; + + /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) + * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) + * + * Even if we use negation tricks the result is still a 6 + * instruction sequence, so don't try to play fancy and just + * do the most straightforward implementation. + * + * Note: We encode this into 3 sun4v 2-insn patch sequences. + */ + + __asm__ __volatile__( + "\n661: sethi %%uhi(%2), %1\n" + " sethi %%hi(%2), %0\n" + "\n662: or %1, %%ulo(%2), %1\n" + " or %0, %%lo(%2), %0\n" + "\n663: sllx %1, 32, %1\n" + " or %0, %1, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%3), %1\n" + " sethi %%hi(%3), %0\n" + " .word 662b\n" + " or %1, %%ulo(%3), %1\n" + " or %0, %%lo(%3), %0\n" + " .word 663b\n" + " sllx %1, 32, %1\n" + " or %0, %1, %0\n" + " .previous\n" + : "=r" (mask), "=r" (tmp) + : "i" (preserve_mask_sun4u), "i" (preserve_mask_sun4v)); + + return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); +} + +static inline pte_t pgoff_to_pte(unsigned long off) +{ + off <<= PAGE_SHIFT; + + BUILD_BUG_ON((_PAGE_FILE_4U & ~0xfffUL) || + (_PAGE_FILE_4V & ~0xfffUL)); + + __asm__ __volatile__( + "\n661: or %0, %2, %0\n" + " .section .sun4v_1insn_patch, \"ax\"\n" + " .word 661b\n" + " or %0, %3, %0\n" + " .previous\n" + : "=r" (off) + : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); + + return __pte(off); +} + +static inline pgprot_t pgprot_noncached(pgprot_t prot) +{ + unsigned long val = pgprot_val(prot); + + BUILD_BUG_ON(((_PAGE_CP_4U | _PAGE_CP_4U | _PAGE_E_4U) & ~(0xfffUL)) || + ((_PAGE_CP_4V | _PAGE_CP_4V | _PAGE_E_4V) & ~(0xfffUL))); + + __asm__ __volatile__( + "\n661: andn %0, %2, %0\n" + " or %0, %3, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " andn %0, %4, %0\n" + " or %0, %3, %0\n" + " .previous\n" + : "=r" (val) + : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), + "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); + + return __pgprot(val); +} +/* Various pieces of code check for platform support by ifdef testing + * on "pgprot_noncached". That's broken and should be fixed, but for + * now... + */ +#define pgprot_noncached pgprot_noncached + +static inline pte_t pte_mkhuge(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_SZHUGE_4U; + const unsigned long mask_4v = _PAGE_SZHUGE_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0xfffffc0000000000UL)) || + (mask_4v & ~(0xfffUL))); + + __asm__ __volatile__( + "\n661: sethi %%uhi(%1), %0\n" + " sllx %0, 32, %0\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " mov %2, %0\n" + " nop\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return __pte(pte_val(pte) | mask); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U; + const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V; + unsigned long val = pte_val(pte), tmp; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000fffUL))); + + __asm__ __volatile__( + "\n661: or %0, %3, %0\n" + " nop\n" + "\n662: nop\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%4), %1\n" + " sllx %1, 32, %1\n" + " .word 662b\n" + " or %1, %%lo(%4), %1\n" + " or %0, %1, %0\n" + " .previous\n" + : "=r" (val), "=r" (tmp) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return __pte(val); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U; + const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V; + unsigned long val = pte_val(pte), tmp; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000fffUL))); + + __asm__ __volatile__( + "\n661: andn %0, %3, %0\n" + " nop\n" + "\n662: nop\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%4), %1\n" + " sllx %1, 32, %1\n" + " .word 662b\n" + " or %1, %%lo(%4), %1\n" + " andn %0, %1, %0\n" + " .previous\n" + : "=r" (val), "=r" (tmp) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return __pte(val); +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_WRITE_4U; + const unsigned long mask_4v = _PAGE_WRITE_4V; + unsigned long val = pte_val(pte), mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return __pte(val | mask); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_WRITE_4U | _PAGE_W_4U; + const unsigned long mask_4v = _PAGE_WRITE_4V | _PAGE_W_4V; + unsigned long val = pte_val(pte), tmp; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000fffUL))); + + __asm__ __volatile__( + "\n661: andn %0, %3, %0\n" + " nop\n" + "\n662: nop\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%4), %1\n" + " sllx %1, 32, %1\n" + " .word 662b\n" + " or %1, %%lo(%4), %1\n" + " andn %0, %1, %0\n" + " .previous\n" + : "=r" (val), "=r" (tmp) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return __pte(val); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_ACCESSED_4U; + const unsigned long mask_4v = _PAGE_ACCESSED_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + mask |= _PAGE_R; + + return __pte(pte_val(pte) & ~mask); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_ACCESSED_4U; + const unsigned long mask_4v = _PAGE_ACCESSED_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + mask |= _PAGE_R; + + return __pte(pte_val(pte) | mask); +} + +static inline unsigned long pte_young(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_ACCESSED_4U; + const unsigned long mask_4v = _PAGE_ACCESSED_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_dirty(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_MODIFIED_4U; + const unsigned long mask_4v = _PAGE_MODIFIED_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_write(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_WRITE_4U; + const unsigned long mask_4v = _PAGE_WRITE_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_exec(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_EXEC_4U; + const unsigned long mask_4v = _PAGE_EXEC_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x00000000fffffc00UL)) || + (mask_4v & ~(0x0000000000000fffUL))); + + __asm__ __volatile__( + "\n661: sethi %%hi(%1), %0\n" + " .section .sun4v_1insn_patch, \"ax\"\n" + " .word 661b\n" + " mov %2, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_read(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_READ_4U; + const unsigned long mask_4v = _PAGE_READ_4V; + unsigned long mask; + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0xfffffc0000000000UL))); + + __asm__ __volatile__( + "\n661: mov %1, %0\n" + " nop\n" + " .section .sun4v_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%2), %0\n" + " sllx %0, 32, %0\n" + " .previous\n" + : "=r" (mask) + : "i" (mask_4u), "i" (mask_4v)); + + return (pte_val(pte) & mask); +} + +static inline unsigned long pte_file(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_FILE_4U; + const unsigned long mask_4v = _PAGE_FILE_4V; + unsigned long val = pte_val(pte); + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0x0000000000000fffUL))); + + __asm__ __volatile__( + "\n661: and %0, %2, %0\n" + " .section .sun4v_1insn_patch, \"ax\"\n" + " .word 661b\n" + " and %0, %3, %0\n" + " .previous\n" + : "=r" (val) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return val; +} + +static inline unsigned long pte_present(pte_t pte) +{ + const unsigned long mask_4u = _PAGE_PRESENT_4U; + const unsigned long mask_4v = _PAGE_PRESENT_4V; + unsigned long val = pte_val(pte); + + BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || + (mask_4v & ~(0x0000000000000fffUL))); + + __asm__ __volatile__( + "\n661: and %0, %2, %0\n" + " .section .sun4v_1insn_patch, \"ax\"\n" + " .word 661b\n" + " and %0, %3, %0\n" + " .previous\n" + : "=r" (val) + : "0" (val), "i" (mask_4u), "i" (mask_4v)); + + return val; +} #define pmd_set(pmdp, ptep) \ (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) -- cgit v1.2.3 From 85dfa19ba92f88fa1c1482f655c7247119dfdcd5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 00:02:16 -0800 Subject: [SPARC64]: Move devino_to_sysino out of pci_sun4v_asm.S It is not PCI specific, it is for all system interrupts. Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index 5d795ee5192..16a40f48beb 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -1203,6 +1203,11 @@ struct hv_trap_trace_entry { */ #define HV_FAST_INTR_DEVINO2SYSINO 0xa0 +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, + unsigned long devino); +#endif + /* intr_getenabled() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_INTR_GETENABLED -- cgit v1.2.3 From 6c0f402f6cc62314ef83b975f3430350dcb6055f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 00:23:32 -0800 Subject: [SPARC64]: Implement rest of generic interrupt hypervisor calls. Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index 16a40f48beb..587a0f6a0a7 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -1221,6 +1221,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, */ #define HV_FAST_INTR_GETENABLED 0xa1 +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_intr_getenabled(unsigned long sysino); +#endif + /* intr_setenabled() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_INTR_SETENABLED @@ -1233,6 +1237,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, */ #define HV_FAST_INTR_SETENABLED 0xa2 +#ifndef __ASSEMBLY__ +extern void sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled); +#endif + /* intr_getstate() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_INTR_GETSTATE @@ -1245,6 +1253,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, */ #define HV_FAST_INTR_GETSTATE 0xa3 +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_intr_getstate(unsigned long sysino); +#endif + /* intr_setstate() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_INTR_SETSTATE @@ -1261,6 +1273,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, */ #define HV_FAST_INTR_SETSTATE 0xa4 +#ifndef __ASSEMBLY__ +extern void sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state); +#endif + /* intr_gettarget() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_INTR_GETTARGET @@ -1275,6 +1291,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, */ #define HV_FAST_INTR_GETTARGET 0xa5 +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_intr_gettarget(unsigned long sysino); +#endif + /* intr_settarget() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_INTR_SETTARGET @@ -1288,6 +1308,10 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle, */ #define HV_FAST_INTR_SETTARGET 0xa6 +#ifndef __ASSEMBLY__ +extern void sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); +#endif + /* PCI IO services. * * See the terminology descriptions in the device interrupt services -- cgit v1.2.3 From e3999574b48125c9bb0c95e3e9f1c696bf96c3e3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 18:16:10 -0800 Subject: [SPARC64]: Generic sun4v_build_irq(). Signed-off-by: David S. Miller --- include/asm-sparc64/irq.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h index 8b70edcb80d..529a9df1ad4 100644 --- a/include/asm-sparc64/irq.h +++ b/include/asm-sparc64/irq.h @@ -111,6 +111,7 @@ extern void disable_irq(unsigned int); #define disable_irq_nosync disable_irq extern void enable_irq(unsigned int); extern unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap); +extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags); extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); static __inline__ void set_softint(unsigned long bits) -- cgit v1.2.3 From e77227eb4e17591a6a511b9c0ff6e8ad7350c575 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 20:42:16 -0800 Subject: [SPARC64]: Probe virtual-devices root node on sun4v. This is where we learn how to get the interrupts for things like the hypervisor console device. Signed-off-by: David S. Miller --- include/asm-sparc64/vdev.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 include/asm-sparc64/vdev.h (limited to 'include') diff --git a/include/asm-sparc64/vdev.h b/include/asm-sparc64/vdev.h new file mode 100644 index 00000000000..ebaac41a6e1 --- /dev/null +++ b/include/asm-sparc64/vdev.h @@ -0,0 +1,18 @@ +/* vdev.h: SUN4V virtual device interfaces and defines. + * + * Copyright (C) 2006 David S. Miller + */ + +#ifndef _SPARC64_VDEV_H +#define _SPARC64_VDEV_H + +#include +#include + +extern u32 sun4v_vdev_devhandle; +extern int sun4v_vdev_root; +extern struct linux_prom_pci_intmap *sun4v_vdev_intmap; +extern int sun4v_vdev_num_intmap; +extern struct linux_prom_pci_intmap sun4v_vdev_intmask; + +#endif /* !(_SPARC64_VDEV_H) */ -- cgit v1.2.3 From 5259d5bfaf5b2953b130e9a500277a905bd37823 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 21:15:44 -0800 Subject: [SPARC64]: Fix comment typo in asm/hypervisor.h Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index 587a0f6a0a7..76a9d0fa272 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -912,7 +912,7 @@ struct hv_fault_status { * ARG0: character * RET0: status * ERRORS: EINVAL Illegal character - * EWOULDBLOCK Output buffer currentl full, would block + * EWOULDBLOCK Output buffer currently full, would block * * Send a character to the console device. Only character values * between 0 and 255 may be used. Values outside this range are -- cgit v1.2.3 From c4bea2883974a59ab7a0ac6c01d34f7ae0e8cd8e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 22:56:27 -0800 Subject: [SPARC64]: Make error codes available from sun4v_intr_get*(). And check for errors at call sites. Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index 76a9d0fa272..f14992ab7fe 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -1238,7 +1238,7 @@ extern unsigned long sun4v_intr_getenabled(unsigned long sysino); #define HV_FAST_INTR_SETENABLED 0xa2 #ifndef __ASSEMBLY__ -extern void sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled); +extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled); #endif /* intr_getstate() @@ -1274,7 +1274,7 @@ extern unsigned long sun4v_intr_getstate(unsigned long sysino); #define HV_FAST_INTR_SETSTATE 0xa4 #ifndef __ASSEMBLY__ -extern void sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state); +extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state); #endif /* intr_gettarget() @@ -1309,7 +1309,7 @@ extern unsigned long sun4v_intr_gettarget(unsigned long sysino); #define HV_FAST_INTR_SETTARGET 0xa6 #ifndef __ASSEMBLY__ -extern void sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); +extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); #endif /* PCI IO services. -- cgit v1.2.3 From 50f4f23c3bd95afc82e931254a71e7b1b3699fd2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 01:32:29 -0800 Subject: [SPARC64]: Fix gcc-3.3.x warnings. It doesn't like const variables being passed into "i" constraing asm operations. It's a bug, but there is nothing we can really do but work around it. Based upon a report from Andrew Morton. Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 143 ++++++++---------------------------------- 1 file changed, 25 insertions(+), 118 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 00eecbb52f9..bc446f302ea 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -232,13 +232,8 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) unsigned long paddr = pfn << PAGE_SHIFT; unsigned long sz_bits; - BUILD_BUG_ON(!__builtin_constant_p(_PAGE_SZBITS_4U) || - !__builtin_constant_p(_PAGE_SZBITS_4V)); - sz_bits = 0UL; if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { - BUILD_BUG_ON((_PAGE_SZBITS_4U & ~(0xfffffc0000000000UL)) || - (_PAGE_SZBITS_4V & ~(0x0000000000000fffUL))); __asm__ __volatile__( "\n661: sethi %uhi(%1), %0\n" " sllx %0, 32, %0\n" @@ -257,10 +252,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) /* This one can be done with two shifts. */ static inline unsigned long pte_pfn(pte_t pte) { - const unsigned long pte_paddr_shl_sun4u = 21; - const unsigned long pte_paddr_shr_sun4u = 21 + PAGE_SHIFT; - const unsigned long pte_paddr_shl_sun4v = 8; - const unsigned long pte_paddr_shr_sun4v = 8 + PAGE_SHIFT; unsigned long ret; __asm__ __volatile__( @@ -273,8 +264,8 @@ static inline unsigned long pte_pfn(pte_t pte) " .previous\n" : "=r" (ret) : "r" (pte_val(pte)), - "i" (pte_paddr_shl_sun4u), "i" (pte_paddr_shr_sun4u), - "i" (pte_paddr_shl_sun4v), "i" (pte_paddr_shr_sun4v)); + "i" (21), "i" (21 + PAGE_SHIFT), + "i" (8), "i" (8 + PAGE_SHIFT)); return ret; } @@ -282,22 +273,6 @@ static inline unsigned long pte_pfn(pte_t pte) static inline pte_t pte_modify(pte_t pte, pgprot_t prot) { - const unsigned long preserve_mask_sun4u = (_PAGE_PADDR_4U | - _PAGE_MODIFIED_4U | - _PAGE_ACCESSED_4U | - _PAGE_CP_4U | - _PAGE_CV_4U | - _PAGE_E_4U | - _PAGE_PRESENT_4U | - _PAGE_SZBITS_4U); - const unsigned long preserve_mask_sun4v = (_PAGE_PADDR_4V | - _PAGE_MODIFIED_4V | - _PAGE_ACCESSED_4V | - _PAGE_CP_4V | - _PAGE_CV_4V | - _PAGE_E_4V | - _PAGE_PRESENT_4V | - _PAGE_SZBITS_4V); unsigned long mask, tmp; /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) @@ -329,7 +304,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) " or %0, %1, %0\n" " .previous\n" : "=r" (mask), "=r" (tmp) - : "i" (preserve_mask_sun4u), "i" (preserve_mask_sun4v)); + : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | + _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | + _PAGE_SZBITS_4U), + "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | + _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | + _PAGE_SZBITS_4V)); return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); } @@ -338,9 +318,6 @@ static inline pte_t pgoff_to_pte(unsigned long off) { off <<= PAGE_SHIFT; - BUILD_BUG_ON((_PAGE_FILE_4U & ~0xfffUL) || - (_PAGE_FILE_4V & ~0xfffUL)); - __asm__ __volatile__( "\n661: or %0, %2, %0\n" " .section .sun4v_1insn_patch, \"ax\"\n" @@ -357,9 +334,6 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) { unsigned long val = pgprot_val(prot); - BUILD_BUG_ON(((_PAGE_CP_4U | _PAGE_CP_4U | _PAGE_E_4U) & ~(0xfffUL)) || - ((_PAGE_CP_4V | _PAGE_CP_4V | _PAGE_E_4V) & ~(0xfffUL))); - __asm__ __volatile__( "\n661: andn %0, %2, %0\n" " or %0, %3, %0\n" @@ -382,13 +356,8 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) static inline pte_t pte_mkhuge(pte_t pte) { - const unsigned long mask_4u = _PAGE_SZHUGE_4U; - const unsigned long mask_4v = _PAGE_SZHUGE_4V; unsigned long mask; - BUILD_BUG_ON((mask_4u & ~(0xfffffc0000000000UL)) || - (mask_4v & ~(0xfffUL))); - __asm__ __volatile__( "\n661: sethi %%uhi(%1), %0\n" " sllx %0, 32, %0\n" @@ -398,20 +367,15 @@ static inline pte_t pte_mkhuge(pte_t pte) " nop\n" " .previous\n" : "=r" (mask) - : "i" (mask_4u), "i" (mask_4v)); + : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V)); return __pte(pte_val(pte) | mask); } static inline pte_t pte_mkdirty(pte_t pte) { - const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U; - const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V; unsigned long val = pte_val(pte), tmp; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000fffUL))); - __asm__ __volatile__( "\n661: or %0, %3, %0\n" " nop\n" @@ -426,20 +390,16 @@ static inline pte_t pte_mkdirty(pte_t pte) " or %0, %1, %0\n" " .previous\n" : "=r" (val), "=r" (tmp) - : "0" (val), "i" (mask_4u), "i" (mask_4v)); + : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), + "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); return __pte(val); } static inline pte_t pte_mkclean(pte_t pte) { - const unsigned long mask_4u = _PAGE_MODIFIED_4U | _PAGE_W_4U; - const unsigned long mask_4v = _PAGE_MODIFIED_4V | _PAGE_W_4V; unsigned long val = pte_val(pte), tmp; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000fffUL))); - __asm__ __volatile__( "\n661: andn %0, %3, %0\n" " nop\n" @@ -454,20 +414,16 @@ static inline pte_t pte_mkclean(pte_t pte) " andn %0, %1, %0\n" " .previous\n" : "=r" (val), "=r" (tmp) - : "0" (val), "i" (mask_4u), "i" (mask_4v)); + : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), + "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); return __pte(val); } static inline pte_t pte_mkwrite(pte_t pte) { - const unsigned long mask_4u = _PAGE_WRITE_4U; - const unsigned long mask_4v = _PAGE_WRITE_4V; unsigned long val = pte_val(pte), mask; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000000UL))); - __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" @@ -477,20 +433,15 @@ static inline pte_t pte_mkwrite(pte_t pte) " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) - : "i" (mask_4u), "i" (mask_4v)); + : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); return __pte(val | mask); } static inline pte_t pte_wrprotect(pte_t pte) { - const unsigned long mask_4u = _PAGE_WRITE_4U | _PAGE_W_4U; - const unsigned long mask_4v = _PAGE_WRITE_4V | _PAGE_W_4V; unsigned long val = pte_val(pte), tmp; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000fffUL))); - __asm__ __volatile__( "\n661: andn %0, %3, %0\n" " nop\n" @@ -505,20 +456,16 @@ static inline pte_t pte_wrprotect(pte_t pte) " andn %0, %1, %0\n" " .previous\n" : "=r" (val), "=r" (tmp) - : "0" (val), "i" (mask_4u), "i" (mask_4v)); + : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U), + "i" (_PAGE_WRITE_4V | _PAGE_W_4V)); return __pte(val); } static inline pte_t pte_mkold(pte_t pte) { - const unsigned long mask_4u = _PAGE_ACCESSED_4U; - const unsigned long mask_4v = _PAGE_ACCESSED_4V; unsigned long mask; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000000UL))); - __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" @@ -528,7 +475,7 @@ static inline pte_t pte_mkold(pte_t pte) " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) - : "i" (mask_4u), "i" (mask_4v)); + : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); mask |= _PAGE_R; @@ -537,13 +484,8 @@ static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkyoung(pte_t pte) { - const unsigned long mask_4u = _PAGE_ACCESSED_4U; - const unsigned long mask_4v = _PAGE_ACCESSED_4V; unsigned long mask; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000000UL))); - __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" @@ -553,7 +495,7 @@ static inline pte_t pte_mkyoung(pte_t pte) " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) - : "i" (mask_4u), "i" (mask_4v)); + : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); mask |= _PAGE_R; @@ -562,13 +504,8 @@ static inline pte_t pte_mkyoung(pte_t pte) static inline unsigned long pte_young(pte_t pte) { - const unsigned long mask_4u = _PAGE_ACCESSED_4U; - const unsigned long mask_4v = _PAGE_ACCESSED_4V; unsigned long mask; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000000UL))); - __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" @@ -578,20 +515,15 @@ static inline unsigned long pte_young(pte_t pte) " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) - : "i" (mask_4u), "i" (mask_4v)); + : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); return (pte_val(pte) & mask); } static inline unsigned long pte_dirty(pte_t pte) { - const unsigned long mask_4u = _PAGE_MODIFIED_4U; - const unsigned long mask_4v = _PAGE_MODIFIED_4V; unsigned long mask; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000000UL))); - __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" @@ -601,20 +533,15 @@ static inline unsigned long pte_dirty(pte_t pte) " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) - : "i" (mask_4u), "i" (mask_4v)); + : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V)); return (pte_val(pte) & mask); } static inline unsigned long pte_write(pte_t pte) { - const unsigned long mask_4u = _PAGE_WRITE_4U; - const unsigned long mask_4v = _PAGE_WRITE_4V; unsigned long mask; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000000UL))); - __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" @@ -624,20 +551,15 @@ static inline unsigned long pte_write(pte_t pte) " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) - : "i" (mask_4u), "i" (mask_4v)); + : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); return (pte_val(pte) & mask); } static inline unsigned long pte_exec(pte_t pte) { - const unsigned long mask_4u = _PAGE_EXEC_4U; - const unsigned long mask_4v = _PAGE_EXEC_4V; unsigned long mask; - BUILD_BUG_ON((mask_4u & ~(0x00000000fffffc00UL)) || - (mask_4v & ~(0x0000000000000fffUL))); - __asm__ __volatile__( "\n661: sethi %%hi(%1), %0\n" " .section .sun4v_1insn_patch, \"ax\"\n" @@ -645,20 +567,15 @@ static inline unsigned long pte_exec(pte_t pte) " mov %2, %0\n" " .previous\n" : "=r" (mask) - : "i" (mask_4u), "i" (mask_4v)); + : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V)); return (pte_val(pte) & mask); } static inline unsigned long pte_read(pte_t pte) { - const unsigned long mask_4u = _PAGE_READ_4U; - const unsigned long mask_4v = _PAGE_READ_4V; unsigned long mask; - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0xfffffc0000000000UL))); - __asm__ __volatile__( "\n661: mov %1, %0\n" " nop\n" @@ -668,20 +585,15 @@ static inline unsigned long pte_read(pte_t pte) " sllx %0, 32, %0\n" " .previous\n" : "=r" (mask) - : "i" (mask_4u), "i" (mask_4v)); + : "i" (_PAGE_READ_4U), "i" (_PAGE_READ_4V)); return (pte_val(pte) & mask); } static inline unsigned long pte_file(pte_t pte) { - const unsigned long mask_4u = _PAGE_FILE_4U; - const unsigned long mask_4v = _PAGE_FILE_4V; unsigned long val = pte_val(pte); - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0x0000000000000fffUL))); - __asm__ __volatile__( "\n661: and %0, %2, %0\n" " .section .sun4v_1insn_patch, \"ax\"\n" @@ -689,20 +601,15 @@ static inline unsigned long pte_file(pte_t pte) " and %0, %3, %0\n" " .previous\n" : "=r" (val) - : "0" (val), "i" (mask_4u), "i" (mask_4v)); + : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); return val; } static inline unsigned long pte_present(pte_t pte) { - const unsigned long mask_4u = _PAGE_PRESENT_4U; - const unsigned long mask_4v = _PAGE_PRESENT_4V; unsigned long val = pte_val(pte); - BUILD_BUG_ON((mask_4u & ~(0x0000000000000fffUL)) || - (mask_4v & ~(0x0000000000000fffUL))); - __asm__ __volatile__( "\n661: and %0, %2, %0\n" " .section .sun4v_1insn_patch, \"ax\"\n" @@ -710,7 +617,7 @@ static inline unsigned long pte_present(pte_t pte) " and %0, %3, %0\n" " .previous\n" : "=r" (val) - : "0" (val), "i" (mask_4u), "i" (mask_4v)); + : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V)); return val; } -- cgit v1.2.3 From 7c3514e4501565d76f9e4dec43e1fc17389f4811 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 00:41:47 -0800 Subject: [SPARC64]: Fixup TSTATE layout diagram in asm/pstate.h Signed-off-by: David S. Miller --- include/asm-sparc64/pstate.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h index 1181d73315a..49a7924a89a 100644 --- a/include/asm-sparc64/pstate.h +++ b/include/asm-sparc64/pstate.h @@ -28,9 +28,9 @@ /* The V9 TSTATE Register (with SpitFire and Linux extensions). * - * --------------------------------------------------------------- + * --------------------------------------------------------------------- * | Resv | GL | CCR | ASI | %pil | PSTATE | Resv | CWP | - * --------------------------------------------------------------- + * --------------------------------------------------------------------- * 63 43 42 40 39 32 31 24 23 20 19 8 7 5 4 0 */ #define TSTATE_GL _AC(0x0000070000000000,UL) /* Global reg level */ -- cgit v1.2.3 From 7890f794e0e6f7dce2a5f4a03ba64b0b3fe306bd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 02:26:54 -0800 Subject: [SPARC64]: Add prom_{start,stop}cpu_cpuid(). Use prom_startcpu_cpuid() on SUN4V instead of prom_startcpu(). We should really test for "SUNW,start-cpu-by-cpuid" presence and use it if present even on SUN4U. Signed-off-by: David S. Miller --- include/asm-sparc64/oplib.h | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index 0631d13475f..84618f8a9e6 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -188,10 +188,18 @@ extern enum prom_output_device prom_query_output_device(void); /* Multiprocessor operations... */ #ifdef CONFIG_SMP -/* Start the CPU with the given device tree node, context table, and context - * at the passed program counter. +/* Start the CPU with the given device tree node at the passed program + * counter with the given arg passed in via register %o0. */ -extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0); +extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg); + +/* Start the CPU with the given cpu ID at the passed program + * counter with the given arg passed in via register %o0. + */ +extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg); + +/* Stop the CPU with the given cpu ID. */ +extern void prom_stopcpu_cpuid(int cpuid); /* Stop the current CPU. */ extern void prom_stopself(void); -- cgit v1.2.3 From 9d29a3fafd06534ad73427fee3c968c094d05b9b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 19:48:54 -0800 Subject: [SPARC64]: Decode virtual-devices interrupts correctly. Need to translate through the interrupt-map{,-mask] properties. Signed-off-by: David S. Miller --- include/asm-sparc64/vdev.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/vdev.h b/include/asm-sparc64/vdev.h index ebaac41a6e1..996e6be7b97 100644 --- a/include/asm-sparc64/vdev.h +++ b/include/asm-sparc64/vdev.h @@ -7,12 +7,10 @@ #define _SPARC64_VDEV_H #include -#include extern u32 sun4v_vdev_devhandle; extern int sun4v_vdev_root; -extern struct linux_prom_pci_intmap *sun4v_vdev_intmap; -extern int sun4v_vdev_num_intmap; -extern struct linux_prom_pci_intmap sun4v_vdev_intmask; + +extern unsigned int sun4v_vdev_device_interrupt(unsigned int); #endif /* !(_SPARC64_VDEV_H) */ -- cgit v1.2.3 From bc45e32e0fbf661d0c5c5b9c981bc0fe5da4901f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Mar 2006 16:46:58 -0800 Subject: [SPARC]: Kill off these __put_user_ret things. They are bogus and haven't been referenced in years. Signed-off-by: David S. Miller --- include/asm-sparc/uaccess.h | 47 ------------------------------------------- include/asm-sparc64/uaccess.h | 37 ---------------------------------- 2 files changed, 84 deletions(-) (limited to 'include') diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h index f8f1ec1f06e..3cf132e1aa2 100644 --- a/include/asm-sparc/uaccess.h +++ b/include/asm-sparc/uaccess.h @@ -120,17 +120,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ default: __pu_ret = __put_user_bad(); break; \ } } else { __pu_ret = -EFAULT; } __pu_ret; }) -#define __put_user_check_ret(x,addr,size,retval) ({ \ -register int __foo __asm__ ("l1"); \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \ -case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \ -case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \ -case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \ -default: if (__put_user_bad()) return retval; break; \ -} } else return retval; }) - #define __put_user_nocheck(x,addr,size) ({ \ register int __pu_ret; \ switch (size) { \ @@ -141,16 +130,6 @@ case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ default: __pu_ret = __put_user_bad(); break; \ } __pu_ret; }) -#define __put_user_nocheck_ret(x,addr,size,retval) ({ \ -register int __foo __asm__ ("l1"); \ -switch (size) { \ -case 1: __put_user_asm_ret(x,b,addr,retval,__foo); break; \ -case 2: __put_user_asm_ret(x,h,addr,retval,__foo); break; \ -case 4: __put_user_asm_ret(x,,addr,retval,__foo); break; \ -case 8: __put_user_asm_ret(x,d,addr,retval,__foo); break; \ -default: if (__put_user_bad()) return retval; break; \ -} }) - #define __put_user_asm(x,size,addr,ret) \ __asm__ __volatile__( \ "/* Put user asm, inline. */\n" \ @@ -170,32 +149,6 @@ __asm__ __volatile__( \ : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ "i" (-EFAULT)) -#define __put_user_asm_ret(x,size,addr,ret,foo) \ -if (__builtin_constant_p(ret) && ret == -EFAULT) \ -__asm__ __volatile__( \ - "/* Put user asm ret, inline. */\n" \ -"1:\t" "st"#size " %1, %2\n\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, __ret_efault\n\n\t" \ - ".previous\n\n\t" \ - : "=r" (foo) : "r" (x), "m" (*__m(addr))); \ -else \ -__asm__ __volatile( \ - "/* Put user asm ret, inline. */\n" \ -"1:\t" "st"#size " %1, %2\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "ret\n\t" \ - " restore %%g0, %3, %%o0\n\t" \ - ".previous\n\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\n\t" \ - : "=r" (foo) : "r" (x), "m" (*__m(addr)), "i" (ret)) - extern int __put_user_bad(void); #define __get_user_check(x,addr,size,type) ({ \ diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index c91d1e38eac..0c375a989be 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -114,16 +114,6 @@ case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ default: __pu_ret = __put_user_bad(); break; \ } __pu_ret; }) -#define __put_user_nocheck_ret(data,addr,size,retval) ({ \ -register int __foo __asm__ ("l1"); \ -switch (size) { \ -case 1: __put_user_asm_ret(data,b,addr,retval,__foo); break; \ -case 2: __put_user_asm_ret(data,h,addr,retval,__foo); break; \ -case 4: __put_user_asm_ret(data,w,addr,retval,__foo); break; \ -case 8: __put_user_asm_ret(data,x,addr,retval,__foo); break; \ -default: if (__put_user_bad()) return retval; break; \ -} }) - #define __put_user_asm(x,size,addr,ret) \ __asm__ __volatile__( \ "/* Put user asm, inline. */\n" \ @@ -143,33 +133,6 @@ __asm__ __volatile__( \ : "=r" (ret) : "r" (x), "r" (__m(addr)), \ "i" (-EFAULT)) -#define __put_user_asm_ret(x,size,addr,ret,foo) \ -if (__builtin_constant_p(ret) && ret == -EFAULT) \ -__asm__ __volatile__( \ - "/* Put user asm ret, inline. */\n" \ -"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, __ret_efault\n\n\t" \ - ".previous\n\n\t" \ - : "=r" (foo) : "r" (x), "r" (__m(addr))); \ -else \ -__asm__ __volatile__( \ - "/* Put user asm ret, inline. */\n" \ -"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "ret\n\t" \ - " restore %%g0, %3, %%o0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\n\t" \ - : "=r" (foo) : "r" (x), "r" (__m(addr)), \ - "i" (ret)) - extern int __put_user_bad(void); #define __get_user_nocheck(data,addr,size,type) ({ \ -- cgit v1.2.3 From 94f8762db9a80ed34252e9fe5fa38be87bb7826b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 14:26:53 -0800 Subject: [SPARC64]: Add sun4v_cpu_qconf() hypervisor call. Call it from register_one_mondo(). Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index f14992ab7fe..cd5fbcd9556 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -301,6 +301,12 @@ #define HV_CPU_QUEUE_RES_ERROR 0x3e #define HV_CPU_QUEUE_NONRES_ERROR 0x3f +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_cpu_qconf(unsigned long type, + unsigned long queue_paddr, + unsigned long num_queue_entries); +#endif + /* cpu_qinfo() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_QINFO -- cgit v1.2.3 From 4ff7ac417d4b628c23df3ae8301d17e29e6e8f16 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 16:22:26 -0800 Subject: [SPARC64]: Add GET_GL_GLOBAL() macro for SUN4V. So we can read the %gl register for debugging. Signed-off-by: David S. Miller --- include/asm-sparc64/head.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index c4ac3e87aa5..67960a751f4 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h @@ -8,6 +8,10 @@ #define SET_GL(val) \ .word 0xa1902000 | val + /* rdpr %gl, %gN */ +#define GET_GL_GLOBAL(N) \ + .word 0x81540000 | (N << 25) + #define KERNBASE 0x400000 #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) -- cgit v1.2.3 From 72aff53f1fe74153eccef303ab2f79de888d248c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 01:29:17 -0800 Subject: [SPARC64]: Get SUN4V SMP working. The sibling cpu bringup is extremely fragile. We can only perform the most basic calls until we take over the trap table from the firmware/hypervisor on the new cpu. This means no accesses to %g4, %g5, %g6 since those can't be TLB translated without our trap handlers. In order to achieve this: 1) Change sun4v_init_mondo_queues() so that it can operate in several modes. It can allocate the queues, or install them in the current processor, or both. The boot cpu does both in it's call early on. Later, the boot cpu allocates the sibling cpu queue, starts the sibling cpu, then the sibling cpu loads them in. 2) init_cur_cpu_trap() is changed to take the current_thread_info() as an argument instead of reading %g6 directly on the current cpu. 3) Create a trampoline stack for the sibling cpus. We do our basic kernel calls using this stack, which is locked into the kernel image, then go to our proper thread stack after taking over the trap table. 4) While we are in this delicate startup state, we put 0xdeadbeef into %g4/%g5/%g6 in order to catch accidental accesses. 5) On the final prom_set_trap_table*() call, we put &init_thread_union into %g6. This is a hack to make prom_world(0) work. All that wants to do is restore the %asi register using get_thread_current_ds(). Longer term we should just do the OBP calls to set the trap table by hand just like we do for everything else. This would avoid that silly prom_world(0) issue, then we can remove the init_thread_union hack. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 5a970f5ed9b..771aa94dfd9 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -77,7 +77,7 @@ struct trap_per_cpu { unsigned long __pad2[4]; } __attribute__((aligned(64))); extern struct trap_per_cpu trap_block[NR_CPUS]; -extern void init_cur_cpu_trap(void); +extern void init_cur_cpu_trap(struct thread_info *); extern void setup_tba(void); #ifdef CONFIG_SMP -- cgit v1.2.3 From ebd8c56c5ae154e2c6cfb7453a76a4e7265b2377 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 08:38:06 -0800 Subject: [SPARC64]: Fix uniprocessor IRQ targetting on SUN4V. We need to use the real hardware processor ID when targetting interrupts, not the "define to 0" thing the uniprocessor build gives us. Also, fill in the Node-ID and Agent-ID fields properly on sun4u/Safari. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 6 ++---- include/asm-sparc64/irq.h | 3 +++ 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 771aa94dfd9..84656f1895c 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -80,7 +80,6 @@ extern struct trap_per_cpu trap_block[NR_CPUS]; extern void init_cur_cpu_trap(struct thread_info *); extern void setup_tba(void); -#ifdef CONFIG_SMP struct cpuid_patch_entry { unsigned int addr; unsigned int cheetah_safari[4]; @@ -89,7 +88,6 @@ struct cpuid_patch_entry { unsigned int sun4v[4]; }; extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; -#endif struct sun4v_1insn_patch_entry { unsigned int addr; @@ -123,8 +121,6 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, #include -#ifdef CONFIG_SMP - #define __GET_CPUID(REG) \ /* Spitfire implementation (default). */ \ 661: ldxa [%g0] ASI_UPA_CONFIG, REG; \ @@ -156,6 +152,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, nop; \ .previous; +#ifdef CONFIG_SMP + #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ __GET_CPUID(TMP) \ sethi %hi(trap_block), DEST; \ diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h index 529a9df1ad4..de33d6e1afb 100644 --- a/include/asm-sparc64/irq.h +++ b/include/asm-sparc64/irq.h @@ -72,8 +72,11 @@ struct ino_bucket { #define IMAP_VALID 0x80000000 /* IRQ Enabled */ #define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ #define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */ +#define IMAP_TID_SHIFT 26 #define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */ +#define IMAP_AID_SHIFT 26 #define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */ +#define IMAP_NID_SHIFT 21 #define IMAP_IGN 0x000007c0 /* IRQ Group Number */ #define IMAP_INO 0x0000003f /* IRQ Number */ #define IMAP_INR 0x000007ff /* Full interrupt number*/ -- cgit v1.2.3 From 97532f598273d03cab8bb5206669b6fdd654eb63 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 10:14:38 -0800 Subject: [SPARC64]: Add HWCAP_SPARC_BLKINIT elf capability flag for Niagara. Signed-off-by: David S. Miller --- include/asm-sparc64/elf.h | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h index 69539a8ab83..303d85e2f82 100644 --- a/include/asm-sparc64/elf.h +++ b/include/asm-sparc64/elf.h @@ -10,6 +10,7 @@ #ifdef __KERNEL__ #include #include +#include #endif /* @@ -68,6 +69,7 @@ #define HWCAP_SPARC_MULDIV 8 #define HWCAP_SPARC_V9 16 #define HWCAP_SPARC_ULTRA3 32 +#define HWCAP_SPARC_BLKINIT 64 /* * These are used to set parameters in the core dumps. @@ -145,11 +147,21 @@ typedef struct { instruction set this cpu supports. */ /* On Ultra, we support all of the v8 capabilities. */ -#define ELF_HWCAP ((HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \ - HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | \ - HWCAP_SPARC_V9) | \ - ((tlb_type == cheetah || tlb_type == cheetah_plus) ? \ - HWCAP_SPARC_ULTRA3 : 0)) +static inline unsigned int sparc64_elf_hwcap(void) +{ + unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | + HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | + HWCAP_SPARC_V9); + + if (tlb_type == cheetah || tlb_type == cheetah_plus) + cap |= HWCAP_SPARC_ULTRA3; + else if (tlb_type == hypervisor) + cap |= HWCAP_SPARC_BLKINIT; + + return cap; +} + +#define ELF_HWCAP sparc64_elf_hwcap(); /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in -- cgit v1.2.3 From c857e3fdbc306e95fdcaad1d8f3ea6bc8e7eea99 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 10:35:23 -0800 Subject: [SPARC64]: __bzero_noasi --> __clear_user Signed-off-by: David S. Miller --- include/asm-sparc64/uaccess.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index 0c375a989be..afe236ba555 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -252,14 +252,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size) } #define __copy_in_user copy_in_user -extern unsigned long __must_check __bzero_noasi(void __user *, unsigned long); - -static inline unsigned long __must_check -__clear_user(void __user *addr, unsigned long size) -{ - - return __bzero_noasi(addr, size); -} +extern unsigned long __must_check __clear_user(void __user *, unsigned long); #define clear_user __clear_user -- cgit v1.2.3 From 3763be32d591cacf808c36390a8af3f2784cde5f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 12:33:13 -0800 Subject: [SPARC64]: Define ARCH_HAS_READ_CURRENT_TIMER. This gives more consistent bogomips and delay() semantics, especially on sun4v. It gives weird looking values though... Signed-off-by: David S. Miller --- include/asm-sparc64/timex.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/timex.h b/include/asm-sparc64/timex.h index 9e8d4175bcb..2a5e4ebaad8 100644 --- a/include/asm-sparc64/timex.h +++ b/include/asm-sparc64/timex.h @@ -14,4 +14,10 @@ typedef unsigned long cycles_t; #define get_cycles() tick_ops->get_tick() +#define ARCH_HAS_READ_CURRENT_TIMER 1 +#define read_current_timer(timer_val_p) \ +({ *timer_val_p = tick_ops->get_tick(); \ + 0; \ +}) + #endif -- cgit v1.2.3 From 8b234274418d6d79527c4ac3a72da446ca4cb35f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 18:01:02 -0800 Subject: [SPARC64]: More TLB/TSB handling fixes. The SUN4V convention with non-shared TSBs is that the context bit of the TAG is clear. So we have to choose an "invalid" bit and initialize new TSBs appropriately. Otherwise a zero TAG looks "valid". Make sure, for the window fixup cases, that we use the right global registers and that we don't potentially trample on the live global registers in etrap/rtrap handling (%g2 and %g6) and that we put the missing virtual address properly in %g5. Signed-off-by: David S. Miller --- include/asm-sparc64/tsb.h | 8 ++++++++ include/asm-sparc64/ttable.h | 12 ++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index 7f3abc32c4d..6e6768067e3 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h @@ -12,6 +12,8 @@ * * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1 * ldxa [%g0] ASI_{D,I}MMU, %g6 + * sllx %g6, 22, %g6 + * srlx %g6, 22, %g6 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 * cmp %g4, %g6 * bne,pn %xcc, tsb_miss_{d,i}tlb @@ -29,6 +31,9 @@ * ------------------------------------------------- * 63 61 60 48 47 42 41 0 * + * But actually, since we use per-mm TSB's, we zero out the CONTEXT + * field. + * * Like the powerpc hashtables we need to use locking in order to * synchronize while we update the entries. PTE updates need locking * as well. @@ -42,6 +47,9 @@ #define TSB_TAG_LOCK_BIT 47 #define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32)) +#define TSB_TAG_INVALID_BIT 46 +#define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32)) + #define TSB_MEMBAR membar #StoreStore /* Some cpus support physical address quad loads. We want to use diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h index 9e28b240f3a..2d5e3c464df 100644 --- a/include/asm-sparc64/ttable.h +++ b/include/asm-sparc64/ttable.h @@ -184,20 +184,20 @@ ldxa [%g0] ASI_SCRATCHPAD, %g2; \ ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \ ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \ - srlx %g4, 22, %g7; \ - sllx %g5, 48, %g6; \ + srlx %g4, 22, %g6; \ ba,pt %xcc, sun4v_itsb_miss; \ - or %g6, %g7, %g6; \ + nop; \ + nop; \ nop; #define SUN4V_DTSB_MISS \ ldxa [%g0] ASI_SCRATCHPAD, %g2; \ ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \ ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \ - srlx %g4, 22, %g7; \ - sllx %g5, 48, %g6; \ + srlx %g4, 22, %g6; \ ba,pt %xcc, sun4v_dtsb_miss; \ - or %g6, %g7, %g6; \ + nop; \ + nop; \ nop; /* Before touching these macros, you owe it to yourself to go and -- cgit v1.2.3 From f6c1fe529217788f095f6953c2b66bec1196ad3d Mon Sep 17 00:00:00 2001 From: "Fabio M. Di Nitto" Date: Sat, 18 Feb 2006 00:32:31 -0800 Subject: [SPARC64] Fix build if CONFIG_HUGETLB_PAGE is not set Signed-off-by: Fabio M. Di Nitto Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index bc446f302ea..bab7defd8b3 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -354,6 +354,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) */ #define pgprot_noncached pgprot_noncached +#ifdef CONFIG_HUGETLB_PAGE static inline pte_t pte_mkhuge(pte_t pte) { unsigned long mask; @@ -371,6 +372,7 @@ static inline pte_t pte_mkhuge(pte_t pte) return __pte(pte_val(pte) | mask); } +#endif static inline pte_t pte_mkdirty(pte_t pte) { -- cgit v1.2.3 From 0f15952ac8641bde1045162ffd4a7b474cc318b0 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Feb 2006 12:43:16 -0800 Subject: [SPARC64]: Export a PAGE_SHARED symbol. For drivers/media/*, noticed by Fabbione. Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index bab7defd8b3..6c8126b2dec 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -206,6 +206,7 @@ extern unsigned long pte_sz_bits(unsigned long size); extern pgprot_t PAGE_KERNEL; extern pgprot_t PAGE_KERNEL_LOCKED; extern pgprot_t PAGE_COPY; +extern pgprot_t PAGE_SHARED; /* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */ extern unsigned long _PAGE_IE; -- cgit v1.2.3 From 1bd0cd74d102a527b2a72907698d73fad4b82cbd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 15:41:01 -0800 Subject: [SPARC64]: Kill cpudata->idle_volume. Set, but never used. We used to use this for dynamic IRQ retargetting, but that code died a long time ago. Signed-off-by: David S. Miller --- include/asm-sparc64/cpudata.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 84656f1895c..c66a81bbc84 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -19,7 +19,7 @@ typedef struct { unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ unsigned int multiplier; unsigned int counter; - unsigned int idle_volume; + unsigned int __pad1; unsigned long clock_tick; /* %tick's per second */ unsigned long udelay_val; -- cgit v1.2.3 From 6f5374c91f0dd1d92408ed44c066c32bcce5ce69 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 15:42:09 -0800 Subject: [SPARC64]: Add sun4v_cpu_yield(). Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index cd5fbcd9556..726e2ea03ce 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -258,6 +258,9 @@ */ #define HV_FAST_CPU_YIELD 0x12 +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_cpu_yield(void); +#endif /* cpu_qconf() * TRAP: HV_FAST_TRAP -- cgit v1.2.3 From d7744a09504d5ae84edc8289a02254e1f2102410 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 22:31:11 -0800 Subject: [SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings. It can map all of the linear kernel mappings with zero TSB hash conflicts for systems with 16GB or less ram. In such cases, on SUN4V, once we load up this TSB the first time with all the mappings, we never take a linear kernel mapping TLB miss ever again, the hypervisor handles them all. Signed-off-by: David S. Miller --- include/asm-sparc64/tsb.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h index 6e6768067e3..e82612cd9f3 100644 --- a/include/asm-sparc64/tsb.h +++ b/include/asm-sparc64/tsb.h @@ -243,6 +243,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; #define KERNEL_TSB_SIZE_BYTES (32 * 1024) #define KERNEL_TSB_NENTRIES \ (KERNEL_TSB_SIZE_BYTES / 16) +#define KERNEL_TSB4M_NENTRIES 4096 /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries @@ -263,4 +264,18 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; be,a,pt %xcc, OK_LABEL; \ mov REG4, REG1; + /* This version uses a trick, the TAG is already (VADDR >> 22) so + * we can make use of that for the index computation. + */ +#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ + sethi %hi(swapper_4m_tsb), REG1; \ + or REG1, %lo(swapper_4m_tsb), REG1; \ + and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \ + sllx REG2, 4, REG2; \ + add REG1, REG2, REG2; \ + KTSB_LOAD_QUAD(REG2, REG3); \ + cmp REG3, TAG; \ + be,a,pt %xcc, OK_LABEL; \ + mov REG4, REG1; + #endif /* !(_SPARC64_TSB_H) */ -- cgit v1.2.3 From 0f05da6d577b80eb00f15994c86e4812ae60f1b9 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 22 Feb 2006 16:20:11 -0800 Subject: [SPARC64]: Fix %tstate ASI handling in start_thread{,32}() Niagara helps us find a ancient bug in the sparc64 port :-) The ASI_* values are plain constant defines, thus signed 32-bit on sparc64. To put shift this into the regs->tstate value we were doing or'ing "(ASI_PNF << 24)" into there. ASI_PNF is 0x82 and shifted left by 24 makes that topmost bit the sign bit in a 32-bit value. This would get sign extended to 64-bits and thus corrupt the top-half of the reg->tstate value. This never caused problems in pre-Niagara cpus because the only thing up there were the condition code values. But Niagara has the global register level field, and this all 1's value is illegal there so Niagara gives an illegal instruction trap due to this bug. I'm pretty sure this bug is about as old as the sparc64 port itself. This also points out that we weren't setting ASI_PNF for 32-bit tasks. We should, so fix that while we're here. Signed-off-by: David S. Miller --- include/asm-sparc64/processor.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index b3889f3f943..685479fb436 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -91,7 +91,8 @@ extern unsigned long thread_saved_pc(struct task_struct *); /* Do necessary setup to start up a newly executed thread. */ #define start_thread(regs, pc, sp) \ do { \ - regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (ASI_PNF << 24); \ + unsigned long __asi = ASI_PNF; \ + regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \ regs->tpc = ((pc & (~3)) - 4); \ regs->tnpc = regs->tpc + 4; \ regs->y = 0; \ @@ -128,10 +129,10 @@ do { \ #define start_thread32(regs, pc, sp) \ do { \ + unsigned long __asi = ASI_PNF; \ pc &= 0x00000000ffffffffUL; \ sp &= 0x00000000ffffffffUL; \ -\ - regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM); \ + regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \ regs->tpc = ((pc & (~3)) - 4); \ regs->tnpc = regs->tpc + 4; \ regs->y = 0; \ -- cgit v1.2.3 From a0663a79ad4faebe1db4a56e2e767b120b12333a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 14:19:28 -0800 Subject: [SPARC64]: Fix TLB context allocation with SMT style shared TLBs. The context allocation scheme we use depends upon there being a 1<-->1 mapping from cpu to physical TLB for correctness. Chips like Niagara break this assumption. So what we do is notify all cpus with a cross call when the context version number changes, and if necessary this makes them allocate a valid context for the address space they are running at the time. Stress tested with make -j1024, make -j2048, and make -j4096 kernel builds on a 32-strand, 8 core, T2000 with 16GB of ram. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 1 + include/asm-sparc64/mmu_context.h | 25 ++++++++++++------------- 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 473d990848e..1504d303a1d 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -102,6 +102,7 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte extern void tsb_flush(unsigned long ent, unsigned long tag); typedef struct { + spinlock_t lock; unsigned long sparc64_ctx_val; struct tsb *tsb; unsigned long tsb_rss_limit; diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index eb660b1609c..4be40c58e3c 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -19,6 +19,12 @@ extern unsigned long tlb_context_cache; extern unsigned long mmu_context_bmap[]; extern void get_new_mmu_context(struct mm_struct *mm); +#ifdef CONFIG_SMP +extern void smp_new_mmu_context_version(void); +#else +#define smp_new_mmu_context_version() do { } while (0) +#endif + extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm); @@ -58,21 +64,17 @@ extern void smp_tsb_sync(struct mm_struct *mm); extern void __flush_tlb_mm(unsigned long, unsigned long); -/* Switch the current MM context. */ +/* Switch the current MM context. Interrupts are disabled. */ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) { unsigned long ctx_valid; int cpu; - /* Note: page_table_lock is used here to serialize switch_mm - * and activate_mm, and their calls to get_new_mmu_context. - * This use of page_table_lock is unrelated to its other uses. - */ - spin_lock(&mm->page_table_lock); + spin_lock(&mm->context.lock); ctx_valid = CTX_VALID(mm->context); if (!ctx_valid) get_new_mmu_context(mm); - spin_unlock(&mm->page_table_lock); + spin_unlock(&mm->context.lock); if (!ctx_valid || (old_mm != mm)) { load_secondary_context(mm); @@ -98,19 +100,16 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str /* Activate a new MM instance for the current task. */ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) { + unsigned long flags; int cpu; - /* Note: page_table_lock is used here to serialize switch_mm - * and activate_mm, and their calls to get_new_mmu_context. - * This use of page_table_lock is unrelated to its other uses. - */ - spin_lock(&mm->page_table_lock); + spin_lock_irqsave(&mm->context.lock, flags); if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); cpu = smp_processor_id(); if (!cpu_isset(cpu, mm->cpu_vm_mask)) cpu_set(cpu, mm->cpu_vm_mask); - spin_unlock(&mm->page_table_lock); + spin_unlock_irqrestore(&mm->context.lock, flags); load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); -- cgit v1.2.3 From 36344762396ca868d6076c41a84bda25f1ed9d3c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 25 Feb 2006 17:16:29 -0800 Subject: [SPARC64]: Niagara optimized XOR functions for RAID. Signed-off-by: David S. Miller --- include/asm-sparc64/xor.h | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/xor.h b/include/asm-sparc64/xor.h index 8b3a7e4b606..8ce3f1813e2 100644 --- a/include/asm-sparc64/xor.h +++ b/include/asm-sparc64/xor.h @@ -2,9 +2,11 @@ * include/asm-sparc64/xor.h * * High speed xor_block operation for RAID4/5 utilizing the - * UltraSparc Visual Instruction Set. + * UltraSparc Visual Instruction Set and Niagara block-init + * twin-load instructions. * * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) + * Copyright (C) 2006 David S. Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -16,8 +18,7 @@ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include -#include +#include extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, @@ -37,4 +38,29 @@ static struct xor_block_template xor_block_VIS = { .do_5 = xor_vis_5, }; -#define XOR_TRY_TEMPLATES xor_speed(&xor_block_VIS) +extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); +extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, + unsigned long *); +extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *); +extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *, unsigned long *); + +static struct xor_block_template xor_block_niagara = { + .name = "Niagara", + .do_2 = xor_niagara_2, + .do_3 = xor_niagara_3, + .do_4 = xor_niagara_4, + .do_5 = xor_niagara_5, +}; + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_VIS); \ + xor_speed(&xor_block_niagara); \ + } while (0) + +/* For VIS for everything except Niagara. */ +#define XOR_SELECT_TEMPLATE(FASTEST) \ + (tlb_type == hypervisor ? &xor_block_niagara : &xor_block_VIS) -- cgit v1.2.3 From 97c4b6f95afadea5846b78ce589d25de2a245c56 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 20:37:41 -0800 Subject: [SPARC64]: Use 13-bit context size always. We no longer have the problems that require using the smaller sizes. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index 1504d303a1d..da14a9bf0ed 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -6,19 +6,7 @@ #include #include -/* - * For the 8k pagesize kernel, use only 10 hw context bits to optimize some - * shifts in the fast tlbmiss handlers, instead of all 13 bits (specifically - * for vpte offset calculation). For other pagesizes, this optimization in - * the tlbhandlers can not be done; but still, all 13 bits can not be used - * because the tlb handlers use "andcc" instruction which sign extends 13 - * bit arguments. - */ -#if PAGE_SHIFT == 13 -#define CTX_NR_BITS 10 -#else -#define CTX_NR_BITS 12 -#endif +#define CTX_NR_BITS 13 #define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL)) -- cgit v1.2.3 From b830ab665ad96c6b20d51a89b35cbc09ab5a2c29 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 28 Feb 2006 15:10:26 -0800 Subject: [SPARC64]: Fix bugs in SUN4V cpu mondo dispatch. There were several bugs in the SUN4V cpu mondo dispatch code. In fact, if we ever got a EWOULDBLOCK or other error from the hypervisor call, we'd potentially send a cpu mondo multiple times to the same cpu and even worse we could loop until the timeout resending the same mondo over and over to such cpus. So let's bulletproof this thing as follows: 1) Implement cpu_mondo_send() and cpu_state() hypervisor calls in arch/sparc64/kernel/entry.S, add prototypes to asm/hypervisor.h 2) Don't build and update the cpulist using inline functions, this was causing the cpu mask to not get updated in the caller. 3) Disable interrupts during the entire mondo send, otherwise our cpu list and/or mondo block could get overwritten if we take an interrupt and do a cpu mondo send on the current cpu. 4) Check for all possible error return types from the cpu_mondo_send() hypervisor call. In particular: HV_EOK) Our work is done, all cpus have received the mondo. HV_CPUERROR) One or more of the cpus in the cpu list we passed to the hypervisor are in error state. Use cpu_state() calls over the entries in the cpu list to see which ones. Record them in "error_mask" and report this after we are done sending the mondo to cpus which are not in error state. HV_EWOULDBLOCK) We need to keep trying. Any other error we consider fatal, we report the event and exit immediately. 5) We only timeout if forward progress is not made. Forward progress is defined as having at least one cpu get the mondo successfully in a given cpu_mondo_send() call. Otherwise we bump a counter and delay a little. If the counter hits a limit, we signal an error and report the event. Also, smp_call_function_mask() error handling reports the number of cpus incorrectly. Signed-off-by: David S. Miller --- include/asm-sparc64/hypervisor.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index 726e2ea03ce..612bf319753 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -342,6 +342,8 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type, * ENOCPU Invalid cpu in CPU list * EWOULDBLOCK Some or all of the listed CPUs did not receive * the mondo + * ECPUERROR One or more of the listed CPUs are in error + * state, use HV_FAST_CPU_STATE to see which ones * EINVAL CPU list includes caller's CPU ID * * Send a mondo interrupt to the CPUs in the given CPU list with the @@ -355,6 +357,10 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type, */ #define HV_FAST_CPU_MONDO_SEND 0x42 +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa); +#endif + /* cpu_myid() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_MYID @@ -382,6 +388,10 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type, #define HV_CPU_STATE_RUNNING 0x02 #define HV_CPU_STATE_ERROR 0x03 +#ifndef __ASSEMBLY__ +extern long sun4v_cpu_state(unsigned long cpuid); +#endif + /* cpu_set_rtba() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_SET_RTBA -- cgit v1.2.3 From e22990451a6a6263250cdd267708548dfa08a8f2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 1 Mar 2006 22:25:43 -0800 Subject: [SPARC64]: Kill bogus function externs in asm/pgtable.h These are all implemented inline earlier in the file. Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 24 ------------------------ 1 file changed, 24 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 6c8126b2dec..75a2cd2d7e8 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -646,27 +646,6 @@ static inline unsigned long pte_present(pte_t pte) /* Same in both SUN4V and SUN4U. */ #define pte_none(pte) (!pte_val(pte)) -extern unsigned long pte_present(pte_t); - -/* The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -extern unsigned long pte_read(pte_t); -extern unsigned long pte_exec(pte_t); -extern unsigned long pte_write(pte_t); -extern unsigned long pte_dirty(pte_t); -extern unsigned long pte_young(pte_t); -extern pte_t pte_wrprotect(pte_t); -extern pte_t pte_rdprotect(pte_t); -extern pte_t pte_mkclean(pte_t); -extern pte_t pte_mkold(pte_t); - -/* Be very careful when you change these three, they are delicate. */ -extern pte_t pte_mkyoung(pte_t); -extern pte_t pte_mkwrite(pte_t); -extern pte_t pte_mkdirty(pte_t); -extern pte_t pte_mkhuge(pte_t); - /* to find an entry in a page-table-directory. */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) @@ -766,9 +745,6 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); -/* Clear virtual and physical cachability, set side-effect bit. */ -extern pgprot_t pgprot_noncached(pgprot_t); - /* * For sparc32&64, the pfn in io_remap_pfn_range() carries in * its high 4 bits. These macros/functions put it there or get it from there. -- cgit v1.2.3 From 8bcd17411643beb9a601e032d0cf1016909a81d3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Mar 2006 18:12:27 -0800 Subject: [SPARC64]: Do not allow mapping pages within 4GB of 64-bit VA hole. The UltraSPARC T1 manual recommends this because the chip could instruction prefetch into the VA hole, and this would also make decoding certain kinds of memory access traps more difficult (because the chip sign extends certain pieces of trap state). Signed-off-by: David S. Miller --- include/asm-sparc64/a.out.h | 4 +++- include/asm-sparc64/page.h | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h index 02af289e3f4..faac321d559 100644 --- a/include/asm-sparc64/a.out.h +++ b/include/asm-sparc64/a.out.h @@ -95,7 +95,9 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */ #ifdef __KERNEL__ -#define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L) +#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ + 0xf0000000 : \ + (0x0000080000000000UL - (1UL << 32UL))) #endif diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index 5426bb28a99..c277ac56b9d 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -107,7 +107,8 @@ typedef unsigned long pgprot_t; #endif #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ - (_AC(0x0000000070000000,UL)) : (PAGE_OFFSET)) + (_AC(0x0000000070000000,UL)) : \ + (_AC(0xfffff80000000000,UL) + (1UL << 32UL))) #endif /* !(__ASSEMBLY__) */ -- cgit v1.2.3 From a77754b4d0731321db266c6c60ffcd7c62757da5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Mar 2006 19:59:50 -0800 Subject: [SPARC64]: Bulletproof MMU context locking. 1) Always spin_lock_init() in init_context(). The caller essentially clears it out, or copies the mm info from the parent. In both cases we need to explicitly initialize the spinlock. 2) Always do explicit IRQ disabling while taking mm->context.lock and ctx_alloc_lock. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu_context.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 4be40c58e3c..ca36ea96f64 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -67,14 +67,14 @@ extern void __flush_tlb_mm(unsigned long, unsigned long); /* Switch the current MM context. Interrupts are disabled. */ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) { - unsigned long ctx_valid; + unsigned long ctx_valid, flags; int cpu; - spin_lock(&mm->context.lock); + spin_lock_irqsave(&mm->context.lock, flags); ctx_valid = CTX_VALID(mm->context); if (!ctx_valid) get_new_mmu_context(mm); - spin_unlock(&mm->context.lock); + spin_unlock_irqrestore(&mm->context.lock, flags); if (!ctx_valid || (old_mm != mm)) { load_secondary_context(mm); -- cgit v1.2.3 From ee29074d3bd23848905f52c515974e0cd0219faa Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Mar 2006 22:50:44 -0800 Subject: [SPARC64]: Fix new context version SMP handling. Don't piggy back the SMP receive signal code to do the context version change handling. Instead allocate another fixed PIL number for this asynchronous cross-call. We can't use smp_call_function() because this thing is invoked with interrupts disabled and a few spinlocks held. Also, fix smp_call_function_mask() to count "cpus" correctly. There is no guarentee that the local cpu is in the mask yet that is exactly what this code was assuming. Signed-off-by: David S. Miller --- include/asm-sparc64/pil.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sparc64/pil.h b/include/asm-sparc64/pil.h index 8f87750c351..79f827eb3f5 100644 --- a/include/asm-sparc64/pil.h +++ b/include/asm-sparc64/pil.h @@ -16,11 +16,13 @@ #define PIL_SMP_CALL_FUNC 1 #define PIL_SMP_RECEIVE_SIGNAL 2 #define PIL_SMP_CAPTURE 3 +#define PIL_SMP_CTX_NEW_VERSION 4 #ifndef __ASSEMBLY__ #define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \ (PIL) == PIL_SMP_RECEIVE_SIGNAL || \ - (PIL) == PIL_SMP_CAPTURE) + (PIL) == PIL_SMP_CAPTURE || \ + (PIL) == PIL_SMP_CTX_NEW_VERSION) #endif #endif /* !(_SPARC64_PIL_H) */ -- cgit v1.2.3 From d1112018b4bc82adf5c8a9c15a08954328f023ae Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 02:16:07 -0800 Subject: [SPARC64]: Move over to sparsemem. This has been pending for a long time, and the fact that we waste a ton of ram on some configurations kind of pushed things over the edge. Signed-off-by: David S. Miller --- include/asm-sparc64/numnodes.h | 6 ++++++ include/asm-sparc64/page.h | 9 +-------- include/asm-sparc64/pgtable.h | 3 --- include/asm-sparc64/sparsemem.h | 12 ++++++++++++ 4 files changed, 19 insertions(+), 11 deletions(-) create mode 100644 include/asm-sparc64/numnodes.h create mode 100644 include/asm-sparc64/sparsemem.h (limited to 'include') diff --git a/include/asm-sparc64/numnodes.h b/include/asm-sparc64/numnodes.h new file mode 100644 index 00000000000..017e7e74f5e --- /dev/null +++ b/include/asm-sparc64/numnodes.h @@ -0,0 +1,6 @@ +#ifndef _SPARC64_NUMNODES_H +#define _SPARC64_NUMNODES_H + +#define NODES_SHIFT 0 + +#endif /* !(_SPARC64_NUMNODES_H) */ diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index c277ac56b9d..f6b49256fe2 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -125,17 +125,10 @@ typedef unsigned long pgprot_t; #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) -/* PFNs are real physical page numbers. However, mem_map only begins to record - * per-page information starting at pfn_base. This is to handle systems where - * the first physical page in the machine is at some huge physical address, - * such as 4GB. This is common on a partitioned E10000, for example. - */ -extern struct page *pfn_to_page(unsigned long pfn); -extern unsigned long page_to_pfn(struct page *); +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) -#define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define virt_to_phys __pa diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 75a2cd2d7e8..d427ce64921 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -217,9 +217,6 @@ extern unsigned long pg_iobits; extern unsigned long _PAGE_ALL_SZ_BITS; extern unsigned long _PAGE_SZBITS; -extern unsigned long phys_base; -extern unsigned long pfn_base; - extern struct page *mem_map_zero; #define ZERO_PAGE(vaddr) (mem_map_zero) diff --git a/include/asm-sparc64/sparsemem.h b/include/asm-sparc64/sparsemem.h new file mode 100644 index 00000000000..ed5c9d8541e --- /dev/null +++ b/include/asm-sparc64/sparsemem.h @@ -0,0 +1,12 @@ +#ifndef _SPARC64_SPARSEMEM_H +#define _SPARC64_SPARSEMEM_H + +#ifdef __KERNEL__ + +#define SECTION_SIZE_BITS 26 +#define MAX_PHYSADDR_BITS 42 +#define MAX_PHYSMEM_BITS 42 + +#endif /* !(__KERNEL__) */ + +#endif /* !(_SPARC64_SPARSEMEM_H) */ -- cgit v1.2.3 From 8935dced547afbf37d0fcfcac9a3556494e53104 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 16:09:19 -0800 Subject: [SPARC64]: Add SMT scheduling support for Niagara. The mapping is a simple "(cpuid >> 2) == core" for now. Later we'll add more sophisticated code that will walk the sun4v machine description and figure this out from there. We should also add core mappings for jaguar and panther processors. Signed-off-by: David S. Miller --- include/asm-sparc64/smp.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index ad1d35a7d13..89d86ecaab2 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -33,6 +33,8 @@ extern cpumask_t phys_cpu_present_map; #define cpu_possible_map phys_cpu_present_map +extern cpumask_t cpu_sibling_map[NR_CPUS]; + /* * General functions that each host system must provide. */ -- cgit v1.2.3 From 90a6646bf6a1ca821f32d5510e935855612904df Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 17:18:19 -0800 Subject: [SPARC64]: Fix system type in /proc/cpuinfo and remove bogus OBP check. Report 'sun4v' when appropriate in /proc/cpuinfo Remove all the verifications of the OBP version string. Just make sure it's there, and report it raw in the bootup logs and via /proc/cpuinfo. Signed-off-by: David S. Miller --- include/asm-sparc64/oplib.h | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index 84618f8a9e6..c754676e13e 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -12,18 +12,8 @@ #include #include -/* Enumeration to describe the prom major version we have detected. */ -enum prom_major_version { - PROM_V0, /* Original sun4c V0 prom */ - PROM_V2, /* sun4c and early sun4m V2 prom */ - PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */ - PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */ - PROM_AP1000, /* actually no prom at all */ -}; - -extern enum prom_major_version prom_vers; -/* Revision, and firmware revision. */ -extern unsigned int prom_rev, prom_prev; +/* OBP version string. */ +extern char prom_version[]; /* Root node of the prom device tree, this stays constant after * initialization is complete. @@ -133,15 +123,6 @@ extern void prom_setcallback(callback_func_t func_ptr); */ extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); -/* Get the prom major version. */ -extern int prom_version(void); - -/* Get the prom plugin revision. */ -extern int prom_getrev(void); - -/* Get the prom firmware revision. */ -extern int prom_getprev(void); - /* Character operations to/from the console.... */ /* Non-blocking get character from console. */ -- cgit v1.2.3 From 7a1ac5264108fc3ed22d17a3cdd76212ed1666d1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Mar 2006 02:02:32 -0800 Subject: [SPARC64]: Fix and re-enable dynamic TSB sizing. This is good for up to %50 performance improvement of some test cases. The problem has been the race conditions, and hopefully I've plugged them all up here. 1) There was a serious race in switch_mm() wrt. lazy TLB switching to and from kernel threads. We could erroneously skip a tsb_context_switch() and thus use a stale TSB across a TSB grow event. There is a big comment now in that function describing exactly how it can happen. 2) All code paths that do something with the TSB need to be guarded with the mm->context.lock spinlock. This makes page table flushing paths properly synchronize with both TSB growing and TLB context changes. 3) TSB growing events are moved to the end of successful fault processing. Previously it was in update_mmu_cache() but that is deadlock prone. At the end of do_sparc64_fault() we hold no spinlocks that could deadlock the TSB grow sequence. We also have dropped the address space semaphore. While we're here, add prefetching to the copy_tsb() routine and put it in assembler into the tsb.S file. This piece of code is quite time critical. There are some small negative side effects to this code which can be improved upon. In particular we grab the mm->context.lock even for the tsb insert done by update_mmu_cache() now and that's a bit excessive. We can get rid of that locking, and the same lock taking in flush_tsb_user(), by disabling PSTATE_IE around the whole operation including the capturing of the tsb pointer and tsb_nentries value. That would work because anyone growing the TSB won't free up the old TSB until all cpus respond to the TSB change cross call. I'm not quite so confident in that optimization to put it in right now, but eventually we might be able to and the description is here for reference. This code seems very solid now. It passes several parallel GCC bootstrap builds, and our favorite "nut cruncher" stress test which is a full "make -j8192" build of a "make allmodconfig" kernel. That puts about 256 processes on each cpu's run queue, makes lots of process cpu migrations occur, causes lots of page table and TLB flushing activity, incurs many context version number changes, and it swaps the machine real far out to disk even though there is 16GB of ram on this test system. :-) Signed-off-by: David S. Miller --- include/asm-sparc64/mmu_context.h | 50 +++++++++++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index ca36ea96f64..e7974321d05 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -42,7 +42,7 @@ static inline void tsb_context_switch(struct mm_struct *mm) __pa(&mm->context.tsb_descr)); } -extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags); +extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss); #ifdef CONFIG_SMP extern void smp_tsb_sync(struct mm_struct *mm); #else @@ -74,18 +74,43 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str ctx_valid = CTX_VALID(mm->context); if (!ctx_valid) get_new_mmu_context(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); - if (!ctx_valid || (old_mm != mm)) { - load_secondary_context(mm); - tsb_context_switch(mm); - } + /* We have to be extremely careful here or else we will miss + * a TSB grow if we switch back and forth between a kernel + * thread and an address space which has it's TSB size increased + * on another processor. + * + * It is possible to play some games in order to optimize the + * switch, but the safest thing to do is to unconditionally + * perform the secondary context load and the TSB context switch. + * + * For reference the bad case is, for address space "A": + * + * CPU 0 CPU 1 + * run address space A + * set cpu0's bits in cpu_vm_mask + * switch to kernel thread, borrow + * address space A via entry_lazy_tlb + * run address space A + * set cpu1's bit in cpu_vm_mask + * flush_tlb_pending() + * reset cpu_vm_mask to just cpu1 + * TSB grow + * run address space A + * context was valid, so skip + * TSB context switch + * + * At that point cpu0 continues to use a stale TSB, the one from + * before the TSB grow performed on cpu1. cpu1 did not cross-call + * cpu0 to update it's TSB because at that point the cpu_vm_mask + * only had cpu1 set in it. + */ + load_secondary_context(mm); + tsb_context_switch(mm); - /* Even if (mm == old_mm) we _must_ check - * the cpu_vm_mask. If we do not we could - * corrupt the TLB state because of how - * smp_flush_tlb_{page,range,mm} on sparc64 - * and lazy tlb switches work. -DaveM + /* Any time a processor runs a context on an address space + * for the first time, we must flush that context out of the + * local TLB. */ cpu = smp_processor_id(); if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { @@ -93,6 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } + spin_unlock_irqrestore(&mm->context.lock, flags); } #define deactivate_mm(tsk,mm) do { } while (0) @@ -109,11 +135,11 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm cpu = smp_processor_id(); if (!cpu_isset(cpu, mm->cpu_vm_mask)) cpu_set(cpu, mm->cpu_vm_mask); - spin_unlock_irqrestore(&mm->context.lock, flags); load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); tsb_context_switch(mm); + spin_unlock_irqrestore(&mm->context.lock, flags); } #endif /* !(__ASSEMBLY__) */ -- cgit v1.2.3 From a91690ddd05ab0b7fbdd37733875525ac75c20f2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Mar 2006 14:41:03 -0800 Subject: [SPARC64]: Top-down address space allocation for 32-bit tasks. Currently allocations are very constrained for 32-bit processes. It grows down-up from 0x70000000 to 0xf0000000 which gives about 2GB of stack + dynamic mmap() space. So support the top-down method, and we need to override the generic helper function in order to deal with D-cache coloring. With these changes I was able to squeeze out a mmap() just over 3.6GB in size in a 32-bit process. Signed-off-by: David S. Miller --- include/asm-sparc64/pgtable.h | 5 ++++- include/asm-sparc64/processor.h | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index d427ce64921..ed4124edf83 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -752,8 +752,11 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, #include -/* We provide our own get_unmapped_area to cope with VA holes for userland */ +/* We provide our own get_unmapped_area to cope with VA holes and + * SHM area cache aliasing for userland. + */ #define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN /* We provide a special get_unmapped_area for framebuffer mmaps to try and use * the largest alignment possible such that larget PTEs can be used. diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index 685479fb436..c6896b88283 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -217,6 +217,8 @@ static inline void prefetchw(const void *x) #define spin_lock_prefetch(x) prefetchw(x) +#define HAVE_ARCH_PICK_MMAP_LAYOUT + #endif /* !(__ASSEMBLY__) */ #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ -- cgit v1.2.3 From d61e16df940e02e25679bdc1aee8c25786f6de90 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Mar 2006 17:33:56 -0800 Subject: [SPARC64]: Increase top of 32-bit process stack. Put it one page below the top of the 32-bit address space. This gives us ~16MB more address space to work with. Signed-off-by: David S. Miller --- include/asm-sparc64/a.out.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc64/a.out.h b/include/asm-sparc64/a.out.h index faac321d559..35cb5c9e0c9 100644 --- a/include/asm-sparc64/a.out.h +++ b/include/asm-sparc64/a.out.h @@ -95,9 +95,11 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */ #ifdef __KERNEL__ +#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE) +#define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL)) + #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ - 0xf0000000 : \ - (0x0000080000000000UL - (1UL << 32UL))) + STACK_TOP32 : STACK_TOP64) #endif -- cgit v1.2.3 From bb8646d8340fa7c1b66a037428e39f85f8738f0a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Mar 2006 23:55:11 -0800 Subject: [SPARC64]: Optimized TSB table initialization. We only need to write an invalid tag every 16 bytes, so taking advantage of this can save many instructions compared to the simple memset() call we make now. A prefetching implementation is implemented for sun4u and a block-init store version if implemented for Niagara. The next trick is to be able to perform an init and a copy_tsb() in parallel when growing a TSB table. Signed-off-by: David S. Miller --- include/asm-sparc64/mmu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h index da14a9bf0ed..230ba678d3b 100644 --- a/include/asm-sparc64/mmu.h +++ b/include/asm-sparc64/mmu.h @@ -88,6 +88,7 @@ struct tsb { extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte); extern void tsb_flush(unsigned long ent, unsigned long tag); +extern void tsb_init(struct tsb *tsb, unsigned long size); typedef struct { spinlock_t lock; -- cgit v1.2.3 From f6b83f070e9b7ad9075f7cc5646260e56c7d0219 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Mar 2006 01:17:17 -0800 Subject: [SPARC64]: Fix 2 bugs in huge page support. 1) huge_pte_offset() did not check the page table hierarchy elements as being empty correctly, resulting in an OOPS 2) Need platform specific hugetlb_get_unmapped_area() to handle the top-down vs. bottom-up address space allocation strategies. Signed-off-by: David S. Miller --- include/asm-sparc64/page.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index f6b49256fe2..fcb2812265f 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -104,6 +104,7 @@ typedef unsigned long pgprot_t; #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define ARCH_HAS_SETCLEAR_HUGE_PTE #define ARCH_HAS_HUGETLB_PREFAULT_HOOK +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ -- cgit v1.2.3