diff options
author | Chris Zankel <chris@zankel.net> | 2009-04-03 02:29:05 -0700 |
---|---|---|
committer | Chris Zankel <chris@zankel.net> | 2009-04-03 02:29:05 -0700 |
commit | 65127d28e312bb6b38ce84a7bb71d762ef63ad4c (patch) | |
tree | d5fdf52a2d0731f7fab0ce0ed394faac50b04fbc /drivers | |
parent | b8bb76713ec50df2f11efee386e16f93d51e1076 (diff) | |
parent | 8fe74cf053de7ad2124a894996f84fa890a81093 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into merge
Diffstat (limited to 'drivers')
121 files changed, 28381 insertions, 3571 deletions
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index c2c95e61450..1300df6f164 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -177,6 +177,7 @@ static int print_unex = 1; #include <linux/interrupt.h> #include <linux/init.h> #include <linux/platform_device.h> +#include <linux/mod_devicetable.h> #include <linux/buffer_head.h> /* for invalidate_buffers() */ #include <linux/mutex.h> @@ -4597,6 +4598,13 @@ MODULE_AUTHOR("Alain L. Knaff"); MODULE_SUPPORTED_DEVICE("fd"); MODULE_LICENSE("GPL"); +/* This doesn't actually get used other than for module information */ +static const struct pnp_device_id floppy_pnpids[] = { + { "PNP0700", 0 }, + { } +}; +MODULE_DEVICE_TABLE(pnp, floppy_pnpids); + #else __setup("floppy=", floppy_setup); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 8299e2d3b61..4d6de4f15cc 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -4,7 +4,7 @@ * Note that you can not swap over this thing, yet. Seems to work but * deadlocks sometimes - you can not swap over TCP in general. * - * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz> + * Copyright 1997-2000, 2008 Pavel Machek <pavel@suse.cz> * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> * * This file is released under GPLv2 or later. @@ -276,7 +276,7 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) return 0; error_out: - return 1; + return -EIO; } static struct request *nbd_find_request(struct nbd_device *lo, @@ -467,9 +467,7 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req) mutex_unlock(&lo->tx_lock); printk(KERN_ERR "%s: Attempted send on closed socket\n", lo->disk->disk_name); - req->errors++; - nbd_end_request(req); - return; + goto error_out; } lo->active_req = req; @@ -531,7 +529,7 @@ static int nbd_thread(void *data) * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } */ -static void do_nbd_request(struct request_queue * q) +static void do_nbd_request(struct request_queue *q) { struct request *req; @@ -568,27 +566,17 @@ static void do_nbd_request(struct request_queue * q) } } -static int nbd_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - struct nbd_device *lo = bdev->bd_disk->private_data; - struct file *file; - int error; - struct request sreq ; - struct task_struct *thread; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - BUG_ON(lo->magic != LO_MAGIC); - - /* Anyone capable of this syscall can do *real bad* things */ - dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", - lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); +/* Must be called with tx_lock held */ +static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, + unsigned int cmd, unsigned long arg) +{ switch (cmd) { - case NBD_DISCONNECT: + case NBD_DISCONNECT: { + struct request sreq; + printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); + blk_rq_init(NULL, &sreq); sreq.cmd_type = REQ_TYPE_SPECIAL; nbd_cmd(&sreq) = NBD_CMD_DISC; @@ -599,29 +587,29 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, */ sreq.sector = 0; sreq.nr_sectors = 0; - if (!lo->sock) + if (!lo->sock) return -EINVAL; - mutex_lock(&lo->tx_lock); - nbd_send_req(lo, &sreq); - mutex_unlock(&lo->tx_lock); + nbd_send_req(lo, &sreq); return 0; + } - case NBD_CLEAR_SOCK: - error = 0; - mutex_lock(&lo->tx_lock); + case NBD_CLEAR_SOCK: { + struct file *file; + lo->sock = NULL; - mutex_unlock(&lo->tx_lock); file = lo->file; lo->file = NULL; nbd_clear_que(lo); BUG_ON(!list_empty(&lo->queue_head)); if (file) fput(file); - return error; - case NBD_SET_SOCK: + return 0; + } + + case NBD_SET_SOCK: { + struct file *file; if (lo->file) return -EBUSY; - error = -EINVAL; file = fget(arg); if (file) { struct inode *inode = file->f_path.dentry->d_inode; @@ -630,12 +618,14 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, lo->sock = SOCKET_I(inode); if (max_part > 0) bdev->bd_invalidated = 1; - error = 0; + return 0; } else { fput(file); } } - return error; + return -EINVAL; + } + case NBD_SET_BLKSIZE: lo->blksize = arg; lo->bytesize &= ~(lo->blksize-1); @@ -643,35 +633,50 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, set_blocksize(bdev, lo->blksize); set_capacity(lo->disk, lo->bytesize >> 9); return 0; + case NBD_SET_SIZE: lo->bytesize = arg & ~(lo->blksize-1); bdev->bd_inode->i_size = lo->bytesize; set_blocksize(bdev, lo->blksize); set_capacity(lo->disk, lo->bytesize >> 9); return 0; + case NBD_SET_TIMEOUT: lo->xmit_timeout = arg * HZ; return 0; + case NBD_SET_SIZE_BLOCKS: lo->bytesize = ((u64) arg) * lo->blksize; bdev->bd_inode->i_size = lo->bytesize; set_blocksize(bdev, lo->blksize); set_capacity(lo->disk, lo->bytesize >> 9); return 0; - case NBD_DO_IT: + + case NBD_DO_IT: { + struct task_struct *thread; + struct file *file; + int error; + if (lo->pid) return -EBUSY; if (!lo->file) return -EINVAL; + + mutex_unlock(&lo->tx_lock); + thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); - if (IS_ERR(thread)) + if (IS_ERR(thread)) { + mutex_lock(&lo->tx_lock); return PTR_ERR(thread); + } wake_up_process(thread); error = nbd_do_it(lo); kthread_stop(thread); + + mutex_lock(&lo->tx_lock); if (error) return error; - sock_shutdown(lo, 1); + sock_shutdown(lo, 0); file = lo->file; lo->file = NULL; nbd_clear_que(lo); @@ -684,6 +689,8 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, if (max_part > 0) ioctl_by_bdev(bdev, BLKRRPART, 0); return lo->harderror; + } + case NBD_CLEAR_QUE: /* * This is for compatibility only. The queue is always cleared @@ -691,6 +698,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, */ BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); return 0; + case NBD_PRINT_DEBUG: printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", bdev->bd_disk->disk_name, @@ -698,7 +706,29 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, &lo->queue_head); return 0; } - return -EINVAL; + return -ENOTTY; +} + +static int nbd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct nbd_device *lo = bdev->bd_disk->private_data; + int error; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + BUG_ON(lo->magic != LO_MAGIC); + + /* Anyone capable of this syscall can do *real bad* things */ + dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", + lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); + + mutex_lock(&lo->tx_lock); + error = __nbd_ioctl(bdev, lo, cmd, arg); + mutex_unlock(&lo->tx_lock); + + return error; } static struct block_device_operations nbd_fops = diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 32b8bbf5003..50dfa3bc71c 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -713,7 +713,7 @@ static struct ctl_table_header *sysctl_header; */ #define TICK_CALIBRATE (1000UL) -static unsigned long hpet_calibrate(struct hpets *hpetp) +static unsigned long __hpet_calibrate(struct hpets *hpetp) { struct hpet_timer __iomem *timer = NULL; unsigned long t, m, count, i, flags, start; @@ -750,6 +750,26 @@ static unsigned long hpet_calibrate(struct hpets *hpetp) return (m - start) / i; } +static unsigned long hpet_calibrate(struct hpets *hpetp) +{ + unsigned long ret = -1; + unsigned long tmp; + + /* + * Try to calibrate until return value becomes stable small value. + * If SMI interruption occurs in calibration loop, the return value + * will be big. This avoids its impact. + */ + for ( ; ; ) { + tmp = __hpet_calibrate(hpetp); + if (ret <= tmp) + break; + ret = tmp; + } + + return ret; +} + int hpet_alloc(struct hpet_data *hdp) { u64 cap, mcfg; diff --git a/drivers/char/random.c b/drivers/char/random.c index 7c43ae782b2..f824ef8a927 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1488,7 +1488,8 @@ static void rekey_seq_generator(struct work_struct *work) keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS; smp_wmb(); ip_cnt++; - schedule_delayed_work(&rekey_work, REKEY_INTERVAL); + schedule_delayed_work(&rekey_work, + round_jiffies_relative(REKEY_INTERVAL)); } static inline struct keydata *get_keyptr(void) diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 6ec6e13d47d..5e256494686 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c @@ -298,6 +298,7 @@ struct slgt_info { unsigned int rbuf_fill_level; unsigned int if_mode; + unsigned int base_clock; /* device status */ @@ -1156,22 +1157,26 @@ static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *ne return -EFAULT; spin_lock(&info->lock); - info->params.mode = tmp_params.mode; - info->params.loopback = tmp_params.loopback; - info->params.flags = tmp_params.flags; - info->params.encoding = tmp_params.encoding; - info->params.clock_speed = tmp_params.clock_speed; - info->params.addr_filter = tmp_params.addr_filter; - info->params.crc_type = tmp_params.crc_type; - info->params.preamble_length = tmp_params.preamble_length; - info->params.preamble = tmp_params.preamble; - info->params.data_rate = tmp_params.data_rate; - info->params.data_bits = tmp_params.data_bits; - info->params.stop_bits = tmp_params.stop_bits; - info->params.parity = tmp_params.parity; + if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) { + info->base_clock = tmp_params.clock_speed; + } else { + info->params.mode = tmp_params.mode; + info->params.loopback = tmp_params.loopback; + info->params.flags = tmp_params.flags; + info->params.encoding = tmp_params.encoding; + info->params.clock_speed = tmp_params.clock_speed; + info->params.addr_filter = tmp_params.addr_filter; + info->params.crc_type = tmp_params.crc_type; + info->params.preamble_length = tmp_params.preamble_length; + info->params.preamble = tmp_params.preamble; + info->params.data_rate = tmp_params.data_rate; + info->params.data_bits = tmp_params.data_bits; + info->params.stop_bits = tmp_params.stop_bits; + info->params.parity = tmp_params.parity; + } spin_unlock(&info->lock); - change_params(info); + program_hw(info); return 0; } @@ -2559,10 +2564,13 @@ static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params) return -EFAULT; spin_lock_irqsave(&info->lock, flags); - memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS)); + if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) + info->base_clock = tmp_params.clock_speed; + else + memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS)); spin_unlock_irqrestore(&info->lock, flags); - change_params(info); + program_hw(info); return 0; } @@ -3432,6 +3440,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev info->magic = MGSL_MAGIC; INIT_WORK(&info->task, bh_handler); info->max_frame_size = 4096; + info->base_clock = 14745600; info->rbuf_fill_level = DMABUFSIZE; info->port.close_delay = 5*HZ/10; info->port.closing_wait = 30*HZ; @@ -3779,7 +3788,7 @@ static void enable_loopback(struct slgt_info *info) static void set_rate(struct slgt_info *info, u32 rate) { unsigned int div; - static unsigned int osc = 14745600; + unsigned int osc = info->base_clock; /* div = osc/rate - 1 * @@ -4083,18 +4092,27 @@ static void async_mode(struct slgt_info *info) * 06 CTS IRQ enable * 05 DCD IRQ enable * 04 RI IRQ enable - * 03 reserved, must be zero + * 03 0=16x sampling, 1=8x sampling * 02 1=txd->rxd internal loopback enable * 01 reserved, must be zero * 00 1=master IRQ enable */ val = BIT15 + BIT14 + BIT0; + /* JCR[8] : 1 = x8 async mode feature available */ + if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate && + ((info->base_clock < (info->params.data_rate * 16)) || + (info->base_clock % (info->params.data_rate * 16)))) { + /* use 8x sampling */ + val |= BIT3; + set_rate(info, info->params.data_rate * 8); + } else { + /* use 16x sampling */ + set_rate(info, info->params.data_rate * 16); + } wr_reg16(info, SCR, val); slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER); - set_rate(info, info->params.data_rate * 16); - if (info->params.loopback) enable_loopback(info); } diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c index 34ab6d798f8..55ba6f14288 100644 --- a/drivers/char/tty_audit.c +++ b/drivers/char/tty_audit.c @@ -10,8 +10,6 @@ */ #include <linux/audit.h> -#include <linux/file.h> -#include <linux/fdtable.h> #include <linux/tty.h> struct tty_audit_buf { diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 33dac94922a..66b99a2049e 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -1758,7 +1758,7 @@ static int __tty_open(struct inode *inode, struct file *filp) struct tty_driver *driver; int index; dev_t device = inode->i_rdev; - unsigned short saved_flags = filp->f_flags; + unsigned saved_flags = filp->f_flags; nonseekable_open(inode, filp); @@ -2681,7 +2681,7 @@ void __do_SAK(struct tty_struct *tty) /* Kill the entire session */ do_each_pid_task(session, PIDTYPE_SID, p) { printk(KERN_NOTICE "SAK: killed process %d" - " (%s): task_session_nr(p)==tty->session\n", + " (%s): task_session(p)==tty->session\n", task_pid_nr(p), p->comm); send_sig(SIGKILL, p, 1); } while_each_pid_task(session, PIDTYPE_SID, p); @@ -2691,7 +2691,7 @@ void __do_SAK(struct tty_struct *tty) do_each_thread(g, p) { if (p->signal->tty == tty) { printk(KERN_NOTICE "SAK: killed process %d" - " (%s): task_session_nr(p)==tty->session\n", + " (%s): task_session(p)==tty->session\n", task_pid_nr(p), p->comm); send_sig(SIGKILL, p, 1); continue; diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index 7a84b406a95..f78f5b0127a 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c @@ -10,7 +10,6 @@ #include <linux/tty_flip.h> #include <linux/devpts_fs.h> #include <linux/file.h> -#include <linux/fdtable.h> #include <linux/console.h> #include <linux/timer.h> #include <linux/ctype.h> diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 0c79fe7f156..4d85402a9e4 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -1882,7 +1882,7 @@ static void hifn_clear_rings(struct hifn_device *dev, int error) static void hifn_work(struct work_struct *work) { - struct delayed_work *dw = container_of(work, struct delayed_work, work); + struct delayed_work *dw = to_delayed_work(work); struct hifn_device *dev = container_of(dw, struct hifn_device, work); unsigned long flags; int reset = 0; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index eee47fd16d7..e5f5c5a8ba6 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -1,13 +1,12 @@ # # EDAC Kconfig -# Copyright (c) 2003 Linux Networx +# Copyright (c) 2008 Doug Thompson www.softwarebitmaker.com # Licensed and distributed under the GPL # menuconfig EDAC - bool "EDAC - error detection and reporting (EXPERIMENTAL)" + bool "EDAC - error detection and reporting" depends on HAS_IOMEM - depends on EXPERIMENTAL depends on X86 || PPC help EDAC is designed to report errors in the core system. @@ -40,6 +39,14 @@ config EDAC_DEBUG there're four debug levels (x=0,1,2,3 from low to high). Usually you should select 'N'. +config EDAC_DEBUG_VERBOSE + bool "More verbose debugging" + depends on EDAC_DEBUG + help + This option makes debugging information more verbose. + Source file name and line number where debugging message + printed will be added to debugging message. + config EDAC_MM_EDAC tristate "Main Memory EDAC (Error Detection And Correction) reporting" default y @@ -174,4 +181,27 @@ config EDAC_CELL Cell Broadband Engine internal memory controller on platform without a hypervisor +config EDAC_PPC4XX + tristate "PPC4xx IBM DDR2 Memory Controller" + depends on EDAC_MM_EDAC && 4xx + help + This enables support for EDAC on the ECC memory used + with the IBM DDR2 memory controller found in various + PowerPC 4xx embedded processors such as the 405EX[r], + 440SP, 440SPe, 460EX, 460GT and 460SX. + +config EDAC_AMD8131 + tristate "AMD8131 HyperTransport PCI-X Tunnel" + depends on EDAC_MM_EDAC && PCI + help + Support for error detection and correction on the + AMD8131 HyperTransport PCI-X Tunnel chip. + +config EDAC_AMD8111 + tristate "AMD8111 HyperTransport I/O Hub" + depends on EDAC_MM_EDAC && PCI + help + Support for error detection and correction on the + AMD8111 HyperTransport I/O Hub chip. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index b75196927de..a5fdcf02f59 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -34,4 +34,4 @@ obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o obj-$(CONFIG_EDAC_CELL) += cell_edac.o - +obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c new file mode 100644 index 00000000000..61469218112 --- /dev/null +++ b/drivers/edac/amd8111_edac.c @@ -0,0 +1,595 @@ +/* + * amd8111_edac.c, AMD8111 Hyper Transport chip EDAC kernel module + * + * Copyright (c) 2008 Wind River Systems, Inc. + * + * Authors: Cao Qingtao <qingtao.cao@windriver.com> + * Benjamin Walsh <benjamin.walsh@windriver.com> + * Hu Yongqi <yongqi.hu@windriver.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/bitops.h> +#include <linux/edac.h> +#include <linux/pci_ids.h> +#include <asm/io.h> + +#include "edac_core.h" +#include "edac_module.h" +#include "amd8111_edac.h" + +#define AMD8111_EDAC_REVISION " Ver: 1.0.0 " __DATE__ +#define AMD8111_EDAC_MOD_STR "amd8111_edac" + +#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 +static int edac_dev_idx; + +enum amd8111_edac_devs { + LPC_BRIDGE = 0, +}; + +enum amd8111_edac_pcis { + PCI_BRIDGE = 0, +}; + +/* Wrapper functions for accessing PCI configuration space */ +static int edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32) +{ + int ret; + + ret = pci_read_config_dword(dev, reg, val32); + if (ret != 0) + printk(KERN_ERR AMD8111_EDAC_MOD_STR + " PCI Access Read Error at 0x%x\n", reg); + + return ret; +} + +static void edac_pci_read_byte(struct pci_dev *dev, int reg, u8 *val8) +{ + int ret; + + ret = pci_read_config_byte(dev, reg, val8); + if (ret != 0) + printk(KERN_ERR AMD8111_EDAC_MOD_STR + " PCI Access Read Error at 0x%x\n", reg); +} + +static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32) +{ + int ret; + + ret = pci_write_config_dword(dev, reg, val32); + if (ret != 0) + printk(KERN_ERR AMD8111_EDAC_MOD_STR + " PCI Access Write Error at 0x%x\n", reg); +} + +static void edac_pci_write_byte(struct pci_dev *dev, int reg, u8 val8) +{ + int ret; + + ret = pci_write_config_byte(dev, reg, val8); + if (ret != 0) + printk(KERN_ERR AMD8111_EDAC_MOD_STR + " PCI Access Write Error at 0x%x\n", reg); +} + +/* + * device-specific methods for amd8111 PCI Bridge Controller + * + * Error Reporting and Handling for amd8111 chipset could be found + * in its datasheet 3.1.2 section, P37 + */ +static void amd8111_pci_bridge_init(struct amd8111_pci_info *pci_info) +{ + u32 val32; + struct pci_dev *dev = pci_info->dev; + + /* First clear error detection flags on the host interface */ + + /* Clear SSE/SMA/STA flags in the global status register*/ + edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32); + if (val32 & PCI_STSCMD_CLEAR_MASK) + edac_pci_write_dword(dev, REG_PCI_STSCMD, val32); + + /* Clear CRC and Link Fail flags in HT Link Control reg */ + edac_pci_read_dword(dev, REG_HT_LINK, &val32); + if (val32 & HT_LINK_CLEAR_MASK) + edac_pci_write_dword(dev, REG_HT_LINK, val32); + + /* Second clear all fault on the secondary interface */ + + /* Clear error flags in the memory-base limit reg. */ + edac_pci_read_dword(dev, REG_MEM_LIM, &val32); + if (val32 & MEM_LIMIT_CLEAR_MASK) + edac_pci_write_dword(dev, REG_MEM_LIM, val32); + + /* Clear Discard Timer Expired flag in Interrupt/Bridge Control reg */ + edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32); + if (val32 & PCI_INTBRG_CTRL_CLEAR_MASK) + edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32); + + /* Last enable error detections */ + if (edac_op_state == EDAC_OPSTATE_POLL) { + /* Enable System Error reporting in global status register */ + edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32); + val32 |= PCI_STSCMD_SERREN; + edac_pci_write_dword(dev, REG_PCI_STSCMD, val32); + + /* Enable CRC Sync flood packets to HyperTransport Link */ + edac_pci_read_dword(dev, REG_HT_LINK, &val32); + val32 |= HT_LINK_CRCFEN; + edac_pci_write_dword(dev, REG_HT_LINK, val32); + + /* Enable SSE reporting etc in Interrupt control reg */ + edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32); + val32 |= PCI_INTBRG_CTRL_POLL_MASK; + edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32); + } +} + +static void amd8111_pci_bridge_exit(struct amd8111_pci_info *pci_info) +{ + u32 val32; + struct pci_dev *dev = pci_info->dev; + + if (edac_op_state == EDAC_OPSTATE_POLL) { + /* Disable System Error reporting */ + edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32); + val32 &= ~PCI_STSCMD_SERREN; + edac_pci_write_dword(dev, REG_PCI_STSCMD, val32); + + /* Disable CRC flood packets */ + edac_pci_read_dword(dev, REG_HT_LINK, &val32); + val32 &= ~HT_LINK_CRCFEN; + edac_pci_write_dword(dev, REG_HT_LINK, val32); + + /* Disable DTSERREN/MARSP/SERREN in Interrupt Control reg */ + edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32); + val32 &= ~PCI_INTBRG_CTRL_POLL_MASK; + edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32); + } +} + +static void amd8111_pci_bridge_check(struct edac_pci_ctl_info *edac_dev) +{ + struct amd8111_pci_info *pci_info = edac_dev->pvt_info; + struct pci_dev *dev = pci_info->dev; + u32 val32; + + /* Check out PCI Bridge Status and Command Register */ + edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32); + if (val32 & PCI_STSCMD_CLEAR_MASK) { + printk(KERN_INFO "Error(s) in PCI bridge status and command" + "register on device %s\n", pci_info->ctl_name); + printk(KERN_INFO "SSE: %d, RMA: %d, RTA: %d\n", + (val32 & PCI_STSCMD_SSE) != 0, + (val32 & PCI_STSCMD_RMA) != 0, + (val32 & PCI_STSCMD_RTA) != 0); + + val32 |= PCI_STSCMD_CLEAR_MASK; + edac_pci_write_dword(dev, REG_PCI_STSCMD, val32); + + edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); + } + + /* Check out HyperTransport Link Control Register */ + edac_pci_read_dword(dev, REG_HT_LINK, &val32); + if (val32 & HT_LINK_LKFAIL) { + printk(KERN_INFO "Error(s) in hypertransport link control" + "register on device %s\n", pci_info->ctl_name); + printk(KERN_INFO "LKFAIL: %d\n", + (val32 & HT_LINK_LKFAIL) != 0); + + val32 |= HT_LINK_LKFAIL; + edac_pci_write_dword(dev, REG_HT_LINK, val32); + + edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); + } + + /* Check out PCI Interrupt and Bridge Control Register */ + edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32); + if (val32 & PCI_INTBRG_CTRL_DTSTAT) { + printk(KERN_INFO "Error(s) in PCI interrupt and bridge control" + "register on device %s\n", pci_info->ctl_name); + printk(KERN_INFO "DTSTAT: %d\n", + (val32 & PCI_INTBRG_CTRL_DTSTAT) != 0); + + val32 |= PCI_INTBRG_CTRL_DTSTAT; + edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32); + + edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); + } + + /* Check out PCI Bridge Memory Base-Limit Register */ + edac_pci_read_dword(dev, REG_MEM_LIM, &val32); + if (val32 & MEM_LIMIT_CLEAR_MASK) { + printk(KERN_INFO + "Error(s) in mem limit register on %s device\n", + pci_info->ctl_name); + printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n" + "RTA: %d, STA: %d, MDPE: %d\n", + (val32 & MEM_LIMIT_DPE) != 0, + (val32 & MEM_LIMIT_RSE) != 0, + (val32 & MEM_LIMIT_RMA) != 0, + (val32 & MEM_LIMIT_RTA) != 0, + (val32 & MEM_LIMIT_STA) != 0, + (val32 & MEM_LIMIT_MDPE) != 0); + + val32 |= MEM_LIMIT_CLEAR_MASK; + edac_pci_write_dword(dev, REG_MEM_LIM, val32); + + edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); + } +} + +static struct resource *legacy_io_res; +static int at_compat_reg_broken; +#define LEGACY_NR_PORTS 1 + +/* device-specific methods for amd8111 LPC Bridge device */ +static void amd8111_lpc_bridge_init(struct amd8111_dev_info *dev_info) +{ + u8 val8; + struct pci_dev *dev = dev_info->dev; + + /* First clear REG_AT_COMPAT[SERR, IOCHK] if necessary */ + legacy_io_res = request_region(REG_AT_COMPAT, LEGACY_NR_PORTS, + AMD8111_EDAC_MOD_STR); + if (!legacy_io_res) + printk(KERN_INFO "%s: failed to request legacy I/O region " + "start %d, len %d\n", __func__, + REG_AT_COMPAT, LEGACY_NR_PORTS); + else { + val8 = __do_inb(REG_AT_COMPAT); + if (val8 == 0xff) { /* buggy port */ + printk(KERN_INFO "%s: port %d is buggy, not supported" + " by hardware?\n", __func__, REG_AT_COMPAT); + at_compat_reg_broken = 1; + release_region(REG_AT_COMPAT, LEGACY_NR_PORTS); + legacy_io_res = NULL; + } else { + u8 out8 = 0; + if (val8 & AT_COMPAT_SERR) + out8 = AT_COMPAT_CLRSERR; + if (val8 & AT_COMPAT_IOCHK) + out8 |= AT_COMPAT_CLRIOCHK; + if (out8 > 0) + __do_outb(out8, REG_AT_COMPAT); + } + } + + /* Second clear error flags on LPC bridge */ + edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8); + if (val8 & IO_CTRL_1_CLEAR_MASK) + edac_pci_write_byte(dev, REG_IO_CTRL_1, val8); +} + +static void amd8111_lpc_bridge_exit(struct amd8111_dev_info *dev_info) +{ + if (legacy_io_res) + release_region(REG_AT_COMPAT, LEGACY_NR_PORTS); +} + +static void amd8111_lpc_bridge_check(struct edac_device_ctl_info *edac_dev) +{ + struct amd8111_dev_info *dev_info = edac_dev->pvt_info; + struct pci_dev *dev = dev_info->dev; + u8 val8; + + edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8); + if (val8 & IO_CTRL_1_CLEAR_MASK) { + printk(KERN_INFO + "Error(s) in IO control register on %s device\n", + dev_info->ctl_name); + printk(KERN_INFO "LPC ERR: %d, PW2LPC: %d\n", + (val8 & IO_CTRL_1_LPC_ERR) != 0, + (val8 & IO_CTRL_1_PW2LPC) != 0); + + val8 |= IO_CTRL_1_CLEAR_MASK; + edac_pci_write_byte(dev, REG_IO_CTRL_1, val8); + + edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); + } + + if (at_compat_reg_broken == 0) { + u8 out8 = 0; + val8 = __do_inb(REG_AT_COMPAT); + if (val8 & AT_COMPAT_SERR) + out8 = AT_COMPAT_CLRSERR; + if (val8 & AT_COMPAT_IOCHK) + out8 |= AT_COMPAT_CLRIOCHK; + if (out8 > 0) { + __do_outb(out8, REG_AT_COMPAT); + edac_device_handle_ue(edac_dev, 0, 0, + edac_dev->ctl_name); + } + } +} + +/* General devices represented by edac_device_ctl_info */ +static struct amd8111_dev_info amd8111_devices[] = { + [LPC_BRIDGE] = { + .err_dev = PCI_DEVICE_ID_AMD_8111_LPC, + .ctl_name = "lpc", + .init = amd8111_lpc_bridge_init, + .exit = amd8111_lpc_bridge_exit, + .check = amd8111_lpc_bridge_check, + }, + {0}, +}; + +/* PCI controllers represented by edac_pci_ctl_info */ +static struct amd8111_pci_info amd8111_pcis[] = { + [PCI_BRIDGE] = { + .err_dev = PCI_DEVICE_ID_AMD_8111_PCI, + .ctl_name = "AMD8111_PCI_Controller", + .init = amd8111_pci_bridge_init, + .exit = amd8111_pci_bridge_exit, + .check = amd8111_pci_bridge_check, + }, + {0}, +}; + +static int amd8111_dev_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data]; + + dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD, + dev_info->err_dev, NULL); + + if (!dev_info->dev) { + printk(KERN_ERR "EDAC device not found:" + "vendor %x, device %x, name %s\n", + PCI_VENDOR_ID_AMD, dev_info->err_dev, + dev_info->ctl_name); + return -ENODEV; + } + + if (pci_enable_device(dev_info->dev)) { + pci_dev_put(dev_info->dev); + printk(KERN_ERR "failed to enable:" + "vendor %x, device %x, name %s\n", + PCI_VENDOR_ID_AMD, dev_info->err_dev, + dev_info->ctl_name); + return -ENODEV; + } + + /* + * we do not allocate extra private structure for + * edac_device_ctl_info, but make use of existing + * one instead. + */ + dev_info->edac_idx = edac_dev_idx++; + dev_info->edac_dev = + edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1, + NULL, 0, 0, + NULL, 0, dev_info->edac_idx); + if (!dev_info->edac_dev) + return -ENOMEM; + + dev_info->edac_dev->pvt_info = dev_info; + dev_info->edac_dev->dev = &dev_info->dev->dev; + dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; + dev_info->edac_dev->ctl_name = dev_info->ctl_name; + dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id; + + if (edac_op_state == EDAC_OPSTATE_POLL) + dev_info->edac_dev->edac_check = dev_info->check; + + if (dev_info->init) + dev_info->init(dev_info); + + if (edac_device_add_device(dev_info->edac_dev) > 0) { + printk(KERN_ERR "failed to add edac_dev for %s\n", + dev_info->ctl_name); + edac_device_free_ctl_info(dev_info->edac_dev); + return -ENODEV; + } + + printk(KERN_INFO "added one edac_dev on AMD8111 " + "vendor %x, device %x, name %s\n", + PCI_VENDOR_ID_AMD, dev_info->err_dev, + dev_info->ctl_name); + + return 0; +} + +static void amd8111_dev_remove(struct pci_dev *dev) +{ + struct amd8111_dev_info *dev_info; + + for (dev_info = amd8111_devices; dev_info->err_dev; dev_info++) + if (dev_info->dev->device == dev->device) + break; + + if (!dev_info->err_dev) /* should never happen */ + return; + + if (dev_info->edac_dev) { + edac_device_del_device(dev_info->edac_dev->dev); + edac_device_free_ctl_info(dev_info->edac_dev); + } + + if (dev_info->exit) + dev_info->exit(dev_info); + + pci_dev_put(dev_info->dev); +} + +static int amd8111_pci_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data]; + + pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD, + pci_info->err_dev, NULL); + + if (!pci_info->dev) { + printk(KERN_ERR "EDAC device not found:" + "vendor %x, device %x, name %s\n", + PCI_VENDOR_ID_AMD, pci_info->err_dev, + pci_info->ctl_name); + return -ENODEV; + } + + if (pci_enable_device(pci_info->dev)) { + pci_dev_put(pci_info->dev); + printk(KERN_ERR "failed to enable:" + "vendor %x, device %x, name %s\n", + PCI_VENDOR_ID_AMD, pci_info->err_dev, + pci_info->ctl_name); + return -ENODEV; + } + + /* + * we do not allocate extra private structure for + * edac_pci_ctl_info, but make use of existing + * one instead. + */ + pci_info->edac_idx = edac_pci_alloc_index(); + pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name); + if (!pci_info->edac_dev) + return -ENOMEM; + + pci_info->edac_dev->pvt_info = pci_info; + pci_info->edac_dev->dev = &pci_info->dev->dev; + pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; + pci_info->edac_dev->ctl_name = pci_info->ctl_name; + pci_info->edac_dev->dev_name = pci_info->dev->dev.bus_id; + + if (edac_op_state == EDAC_OPSTATE_POLL) + pci_info->edac_dev->edac_check = pci_info->check; + + if (pci_info->init) + pci_info->init(pci_info); + + if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) { + printk(KERN_ERR "failed to add edac_pci for %s\n", + pci_info->ctl_name); + edac_pci_free_ctl_info(pci_info->edac_dev); + return -ENODEV; + } + + printk(KERN_INFO "added one edac_pci on AMD8111 " + "vendor %x, device %x, name %s\n", + PCI_VENDOR_ID_AMD, pci_info->err_dev, + pci_info->ctl_name); + + return 0; +} + +static void amd8111_pci_remove(struct pci_dev *dev) +{ + struct amd8111_pci_info *pci_info; + + for (pci_info = amd8111_pcis; pci_info->err_dev; pci_info++) + if (pci_info->dev->device == dev->device) + break; + + if (!pci_info->err_dev) /* should never happen */ + return; + + if (pci_info->edac_dev) { + edac_pci_del_device(pci_info->edac_dev->dev); + edac_pci_free_ctl_info(pci_info->edac_dev); + } + + if (pci_info->exit) + pci_info->exit(pci_info); + + pci_dev_put(pci_info->dev); +} + +/* PCI Device ID talbe for general EDAC device */ +static const struct pci_device_id amd8111_edac_dev_tbl[] = { + { + PCI_VEND_DEV(AMD, 8111_LPC), + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0, + .class_mask = 0, + .driver_data = LPC_BRIDGE, + }, + { + 0, + } /* table is NULL-terminated */ +}; +MODULE_DEVICE_TABLE(pci, amd8111_edac_dev_tbl); + +static struct pci_driver amd8111_edac_dev_driver = { + .name = "AMD8111_EDAC_DEV", + .probe = amd8111_dev_probe, + .remove = amd8111_dev_remove, + .id_table = amd8111_edac_dev_tbl, +}; + +/* PCI Device ID table for EDAC PCI controller */ +static const struct pci_device_id amd8111_edac_pci_tbl[] = { + { + PCI_VEND_DEV(AMD, 8111_PCI), + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0, + .class_mask = 0, + .driver_data = PCI_BRIDGE, + }, + { + 0, + } /* table is NULL-terminated */ +}; +MODULE_DEVICE_TABLE(pci, amd8111_edac_pci_tbl); + +static struct pci_driver amd8111_edac_pci_driver = { + .name = "AMD8111_EDAC_PCI", + .probe = amd8111_pci_probe, + .remove = amd8111_pci_remove, + .id_table = amd8111_edac_pci_tbl, +}; + +static int __init amd8111_edac_init(void) +{ + int val; + + printk(KERN_INFO "AMD8111 EDAC driver " AMD8111_EDAC_REVISION "\n"); + printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n"); + + /* Only POLL mode supported so far */ + edac_op_state = EDAC_OPSTATE_POLL; + + val = pci_register_driver(&amd8111_edac_dev_driver); + val |= pci_register_driver(&amd8111_edac_pci_driver); + + return val; +} + +static void __exit amd8111_edac_exit(void) +{ + pci_unregister_driver(&amd8111_edac_pci_driver); + pci_unregister_driver(&amd8111_edac_dev_driver); +} + + +module_init(amd8111_edac_init); +module_exit(amd8111_edac_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n"); +MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module"); diff --git a/drivers/edac/amd8111_edac.h b/drivers/edac/amd8111_edac.h new file mode 100644 index 00000000000..35794331deb --- /dev/null +++ b/drivers/edac/amd8111_edac.h @@ -0,0 +1,130 @@ +/* + * amd8111_edac.h, EDAC defs for AMD8111 hypertransport chip + * + * Copyright (c) 2008 Wind River Systems, Inc. + * + * Authors: Cao Qingtao <qingtao.cao@windriver.com> + * Benjamin Walsh <benjamin.walsh@windriver.com> + * Hu Yongqi <yongqi.hu@windriver.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _AMD8111_EDAC_H_ +#define _AMD8111_EDAC_H_ + +/************************************************************ + * PCI Bridge Status and Command Register, DevA:0x04 + ************************************************************/ +#define REG_PCI_STSCMD 0x04 +enum pci_stscmd_bits { + PCI_STSCMD_SSE = BIT(30), + PCI_STSCMD_RMA = BIT(29), + PCI_STSCMD_RTA = BIT(28), + PCI_STSCMD_SERREN = BIT(8), + PCI_STSCMD_CLEAR_MASK = (PCI_STSCMD_SSE | + PCI_STSCMD_RMA | + PCI_STSCMD_RTA) +}; + +/************************************************************ + * PCI Bridge Memory Base-Limit Register, DevA:0x1c + ************************************************************/ +#define REG_MEM_LIM 0x1c +enum mem_limit_bits { + MEM_LIMIT_DPE = BIT(31), + MEM_LIMIT_RSE = BIT(30), + MEM_LIMIT_RMA = BIT(29), + MEM_LIMIT_RTA = BIT(28), + MEM_LIMIT_STA = BIT(27), + MEM_LIMIT_MDPE = BIT(24), + MEM_LIMIT_CLEAR_MASK = (MEM_LIMIT_DPE | + MEM_LIMIT_RSE | + MEM_LIMIT_RMA | + MEM_LIMIT_RTA | + MEM_LIMIT_STA | + MEM_LIMIT_MDPE) +}; + +/************************************************************ + * HyperTransport Link Control Register, DevA:0xc4 + ************************************************************/ +#define REG_HT_LINK 0xc4 +enum ht_link_bits { + HT_LINK_LKFAIL = BIT(4), + HT_LINK_CRCFEN = BIT(1), + HT_LINK_CLEAR_MASK = (HT_LINK_LKFAIL) +}; + +/************************************************************ + * PCI Bridge Interrupt and Bridge Control, DevA:0x3c + ************************************************************/ +#define REG_PCI_INTBRG_CTRL 0x3c +enum pci_intbrg_ctrl_bits { + PCI_INTBRG_CTRL_DTSERREN = BIT(27), + PCI_INTBRG_CTRL_DTSTAT = BIT(26), + PCI_INTBRG_CTRL_MARSP = BIT(21), + PCI_INTBRG_CTRL_SERREN = BIT(17), + PCI_INTBRG_CTRL_PEREN = BIT(16), + PCI_INTBRG_CTRL_CLEAR_MASK = (PCI_INTBRG_CTRL_DTSTAT), + PCI_INTBRG_CTRL_POLL_MASK = (PCI_INTBRG_CTRL_DTSERREN | + PCI_INTBRG_CTRL_MARSP | + PCI_INTBRG_CTRL_SERREN) +}; + +/************************************************************ + * I/O Control 1 Register, DevB:0x40 + ************************************************************/ +#define REG_IO_CTRL_1 0x40 +enum io_ctrl_1_bits { + IO_CTRL_1_NMIONERR = BIT(7), + IO_CTRL_1_LPC_ERR = BIT(6), + IO_CTRL_1_PW2LPC = BIT(1), + IO_CTRL_1_CLEAR_MASK = (IO_CTRL_1_LPC_ERR | IO_CTRL_1_PW2LPC) +}; + +/************************************************************ + * Legacy I/O Space Registers + ************************************************************/ +#define REG_AT_COMPAT 0x61 +enum at_compat_bits { + AT_COMPAT_SERR = BIT(7), + AT_COMPAT_IOCHK = BIT(6), + AT_COMPAT_CLRIOCHK = BIT(3), + AT_COMPAT_CLRSERR = BIT(2), +}; + +struct amd8111_dev_info { + u16 err_dev; /* PCI Device ID */ + struct pci_dev *dev; + int edac_idx; /* device index */ + char *ctl_name; + struct edac_device_ctl_info *edac_dev; + void (*init)(struct amd8111_dev_info *dev_info); + void (*exit)(struct amd8111_dev_info *dev_info); + void (*check)(struct edac_device_ctl_info *edac_dev); +}; + +struct amd8111_pci_info { + u16 err_dev; /* PCI Device ID */ + struct pci_dev *dev; + int edac_idx; /* pci index */ + const char *ctl_name; + struct edac_pci_ctl_info *edac_dev; + void (*init)(struct amd8111_pci_info *dev_info); + void (*exit)(struct amd8111_pci_info *dev_info); + void (*check)(struct edac_pci_ctl_info *edac_dev); +}; + +#endif /* _AMD8111_EDAC_H_ */ diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c new file mode 100644 index 00000000000..c083b31cac5 --- /dev/null +++ b/drivers/edac/amd8131_edac.c @@ -0,0 +1,379 @@ +/* + * amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module + * + * Copyright (c) 2008 Wind River Systems, Inc. + * + * Authors: Cao Qingtao <qingtao.cao@windriver.com> + * Benjamin Walsh <benjamin.walsh@windriver.com> + * Hu Yongqi <yongqi.hu@windriver.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/edac.h> +#include <linux/pci_ids.h> + +#include "edac_core.h" +#include "edac_module.h" +#include "amd8131_edac.h" + +#define AMD8131_EDAC_REVISION " Ver: 1.0.0 " __DATE__ +#define AMD8131_EDAC_MOD_STR "amd8131_edac" + +/* Wrapper functions for accessing PCI configuration space */ +static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32) +{ + int ret; + + ret = pci_read_config_dword(dev, reg, val32); + if (ret != 0) + printk(KERN_ERR AMD8131_EDAC_MOD_STR + " PCI Access Read Error at 0x%x\n", reg); +} + +static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32) +{ + int ret; + + ret = pci_write_config_dword(dev, reg, val32); + if (ret != 0) + printk(KERN_ERR AMD8131_EDAC_MOD_STR + " PCI Access Write Error at 0x%x\n", reg); +} + +static char * const bridge_str[] = { + [NORTH_A] = "NORTH A", + [NORTH_B] = "NORTH B", + [SOUTH_A] = "SOUTH A", + [SOUTH_B] = "SOUTH B", + [NO_BRIDGE] = "NO BRIDGE", +}; + +/* Support up to two AMD8131 chipsets on a platform */ +static struct amd8131_dev_info amd8131_devices[] = { + { + .inst = NORTH_A, + .devfn = DEVFN_PCIX_BRIDGE_NORTH_A, + .ctl_name = "AMD8131_PCIX_NORTH_A", + }, + { + .inst = NORTH_B, + .devfn = DEVFN_PCIX_BRIDGE_NORTH_B, + .ctl_name = "AMD8131_PCIX_NORTH_B", + }, + { + .inst = SOUTH_A, + .devfn = DEVFN_PCIX_BRIDGE_SOUTH_A, + .ctl_name = "AMD8131_PCIX_SOUTH_A", + }, + { + .inst = SOUTH_B, + .devfn = DEVFN_PCIX_BRIDGE_SOUTH_B, + .ctl_name = "AMD8131_PCIX_SOUTH_B", + }, + {.inst = NO_BRIDGE,}, +}; + +static void amd8131_pcix_init(struct amd8131_dev_info *dev_info) +{ + u32 val32; + struct pci_dev *dev = dev_info->dev; + + /* First clear error detection flags */ + edac_pci_read_dword(dev, REG_MEM_LIM, &val32); + if (val32 & MEM_LIMIT_MASK) + edac_pci_write_dword(dev, REG_MEM_LIM, val32); + + /* Clear Discard Timer Timedout flag */ + edac_pci_read_dword(dev, REG_INT_CTLR, &val32); + if (val32 & INT_CTLR_DTS) + edac_pci_write_dword(dev, REG_INT_CTLR, val32); + + /* Clear CRC Error flag on link side A */ + edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); + if (val32 & LNK_CTRL_CRCERR_A) + edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); + + /* Clear CRC Error flag on link side B */ + edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); + if (val32 & LNK_CTRL_CRCERR_B) + edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); + + /* + * Then enable all error detections. + * + * Setup Discard Timer Sync Flood Enable, + * System Error Enable and Parity Error Enable. + */ + edac_pci_read_dword(dev, REG_INT_CTLR, &val32); + val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE; + edac_pci_write_dword(dev, REG_INT_CTLR, val32); + + /* Enable overall SERR Error detection */ + edac_pci_read_dword(dev, REG_STS_CMD, &val32); + val32 |= STS_CMD_SERREN; + edac_pci_write_dword(dev, REG_STS_CMD, val32); + + /* Setup CRC Flood Enable for link side A */ + edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); + val32 |= LNK_CTRL_CRCFEN; + edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); + + /* Setup CRC Flood Enable for link side B */ + edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); + val32 |= LNK_CTRL_CRCFEN; + edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); +} + +static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info) +{ + u32 val32; + struct pci_dev *dev = dev_info->dev; + + /* Disable SERR, PERR and DTSE Error detection */ + edac_pci_read_dword(dev, REG_INT_CTLR, &val32); + val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE); + edac_pci_write_dword(dev, REG_INT_CTLR, val32); + + /* Disable overall System Error detection */ + edac_pci_read_dword(dev, REG_STS_CMD, &val32); + val32 &= ~STS_CMD_SERREN; + edac_pci_write_dword(dev, REG_STS_CMD, val32); + + /* Disable CRC Sync Flood on link side A */ + edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); + val32 &= ~LNK_CTRL_CRCFEN; + edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); + + /* Disable CRC Sync Flood on link side B */ + edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); + val32 &= ~LNK_CTRL_CRCFEN; + edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); +} + +static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev) +{ + struct amd8131_dev_info *dev_info = edac_dev->pvt_info; + struct pci_dev *dev = dev_info->dev; + u32 val32; + + /* Check PCI-X Bridge Memory Base-Limit Register for errors */ + edac_pci_read_dword(dev, REG_MEM_LIM, &val32); + if (val32 & MEM_LIMIT_MASK) { + printk(KERN_INFO "Error(s) in mem limit register " + "on %s bridge\n", dev_info->ctl_name); + printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n" + "RTA: %d, STA: %d, MDPE: %d\n", + val32 & MEM_LIMIT_DPE, + val32 & MEM_LIMIT_RSE, + val32 & MEM_LIMIT_RMA, + val32 & MEM_LIMIT_RTA, + val32 & MEM_LIMIT_STA, + val32 & MEM_LIMIT_MDPE); + + val32 |= MEM_LIMIT_MASK; + edac_pci_write_dword(dev, REG_MEM_LIM, val32); + + edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); + } + + /* Check if Discard Timer timed out */ + edac_pci_read_dword(dev, REG_INT_CTLR, &val32); + if (val32 & INT_CTLR_DTS) { + printk(KERN_INFO "Error(s) in interrupt and control register " + "on %s bridge\n", dev_info->ctl_name); + printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS); + + val32 |= INT_CTLR_DTS; + edac_pci_write_dword(dev, REG_INT_CTLR, val32); + + edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); + } + + /* Check if CRC error happens on link side A */ + edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32); + if (val32 & LNK_CTRL_CRCERR_A) { + printk(KERN_INFO "Error(s) in link conf and control register " + "on %s bridge\n", dev_info->ctl_name); + printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A); + + val32 |= LNK_CTRL_CRCERR_A; + edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32); + + edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); + } + + /* Check if CRC error happens on link side B */ + edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32); + if (val32 & LNK_CTRL_CRCERR_B) { + printk(KERN_INFO "Error(s) in link conf and control register " + "on %s bridge\n", dev_info->ctl_name); + printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B); + + val32 |= LNK_CTRL_CRCERR_B; + edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32); + + edac_pci_handle_npe(edac_dev, edac_dev->ctl_name); + } +} + +static struct amd8131_info amd8131_chipset = { + .err_dev = PCI_DEVICE_ID_AMD_8131_APIC, + .devices = amd8131_devices, + .init = amd8131_pcix_init, + .exit = amd8131_pcix_exit, + .check = amd8131_pcix_check, +}; + +/* + * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID, + * so amd8131_probe() would be called by kernel 4 times, with different + * address of pci_dev for each of them each time. + */ +static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct amd8131_dev_info *dev_info; + + for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE; + dev_info++) + if (dev_info->devfn == dev->devfn) + break; + + if (dev_info->inst == NO_BRIDGE) /* should never happen */ + return -ENODEV; + + /* + * We can't call pci_get_device() as we are used to do because + * there are 4 of them but pci_dev_get() instead. + */ + dev_info->dev = pci_dev_get(dev); + + if (pci_enable_device(dev_info->dev)) { + pci_dev_put(dev_info->dev); + printk(KERN_ERR "failed to enable:" + "vendor %x, device %x, devfn %x, name %s\n", + PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev, + dev_info->devfn, dev_info->ctl_name); + return -ENODEV; + } + + /* + * we do not allocate extra private structure for + * edac_pci_ctl_info, but make use of existing + * one instead. + */ + dev_info->edac_idx = edac_pci_alloc_index(); + dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name); + if (!dev_info->edac_dev) + return -ENOMEM; + + dev_info->edac_dev->pvt_info = dev_info; + dev_info->edac_dev->dev = &dev_info->dev->dev; + dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR; + dev_info->edac_dev->ctl_name = dev_info->ctl_name; + dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id; + + if (edac_op_state == EDAC_OPSTATE_POLL) + dev_info->edac_dev->edac_check = amd8131_chipset.check; + + if (amd8131_chipset.init) + amd8131_chipset.init(dev_info); + + if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) { + printk(KERN_ERR "failed edac_pci_add_device() for %s\n", + dev_info->ctl_name); + edac_pci_free_ctl_info(dev_info->edac_dev); + return -ENODEV; + } + + printk(KERN_INFO "added one device on AMD8131 " + "vendor %x, device %x, devfn %x, name %s\n", + PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev, + dev_info->devfn, dev_info->ctl_name); + + return 0; +} + +static void amd8131_remove(struct pci_dev *dev) +{ + struct amd8131_dev_info *dev_info; + + for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE; + dev_info++) + if (dev_info->devfn == dev->devfn) + break; + + if (dev_info->inst == NO_BRIDGE) /* should never happen */ + return; + + if (dev_info->edac_dev) { + edac_pci_del_device(dev_info->edac_dev->dev); + edac_pci_free_ctl_info(dev_info->edac_dev); + } + + if (amd8131_chipset.exit) + amd8131_chipset.exit(dev_info); + + pci_dev_put(dev_info->dev); +} + +static const struct pci_device_id amd8131_edac_pci_tbl[] = { + { + PCI_VEND_DEV(AMD, 8131_BRIDGE), + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0, + .class_mask = 0, + .driver_data = 0, + }, + { + 0, + } /* table is NULL-terminated */ +}; +MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl); + +static struct pci_driver amd8131_edac_driver = { + .name = AMD8131_EDAC_MOD_STR, + .probe = amd8131_probe, + .remove = amd8131_remove, + .id_table = amd8131_edac_pci_tbl, +}; + +static int __init amd8131_edac_init(void) +{ + printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n"); + printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n"); + + /* Only POLL mode supported so far */ + edac_op_state = EDAC_OPSTATE_POLL; + + return pci_register_driver(&amd8131_edac_driver); +} + +static void __exit amd8131_edac_exit(void) +{ + pci_unregister_driver(&amd8131_edac_driver); +} + +module_init(amd8131_edac_init); +module_exit(amd8131_edac_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n"); +MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module"); diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h new file mode 100644 index 00000000000..60e0d1c72de --- /dev/null +++ b/drivers/edac/amd8131_edac.h @@ -0,0 +1,119 @@ +/* + * amd8131_edac.h, EDAC defs for AMD8131 hypertransport chip + * + * Copyright (c) 2008 Wind River Systems, Inc. + * + * Authors: Cao Qingtao <qingtao.cao@windriver.com> + * Benjamin Walsh <benjamin.walsh@windriver.com> + * Hu Yongqi <yongqi.hu@windriver.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _AMD8131_EDAC_H_ +#define _AMD8131_EDAC_H_ + +#define DEVFN_PCIX_BRIDGE_NORTH_A 8 +#define DEVFN_PCIX_BRIDGE_NORTH_B 16 +#define DEVFN_PCIX_BRIDGE_SOUTH_A 24 +#define DEVFN_PCIX_BRIDGE_SOUTH_B 32 + +/************************************************************ + * PCI-X Bridge Status and Command Register, DevA:0x04 + ************************************************************/ +#define REG_STS_CMD 0x04 +enum sts_cmd_bits { + STS_CMD_SSE = BIT(30), + STS_CMD_SERREN = BIT(8) +}; + +/************************************************************ + * PCI-X Bridge Interrupt and Bridge Control Register, + ************************************************************/ +#define REG_INT_CTLR 0x3c +enum int_ctlr_bits { + INT_CTLR_DTSE = BIT(27), + INT_CTLR_DTS = BIT(26), + INT_CTLR_SERR = BIT(17), + INT_CTLR_PERR = BIT(16) +}; + +/************************************************************ + * PCI-X Bridge Memory Base-Limit Register, DevA:0x1C + ************************************************************/ +#define REG_MEM_LIM 0x1c +enum mem_limit_bits { + MEM_LIMIT_DPE = BIT(31), + MEM_LIMIT_RSE = BIT(30), + MEM_LIMIT_RMA = BIT(29), + MEM_LIMIT_RTA = BIT(28), + MEM_LIMIT_STA = BIT(27), + MEM_LIMIT_MDPE = BIT(24), + MEM_LIMIT_MASK = MEM_LIMIT_DPE|MEM_LIMIT_RSE|MEM_LIMIT_RMA| + MEM_LIMIT_RTA|MEM_LIMIT_STA|MEM_LIMIT_MDPE +}; + +/************************************************************ + * Link Configuration And Control Register, side A + ************************************************************/ +#define REG_LNK_CTRL_A 0xc4 + +/************************************************************ + * Link Configuration And Control Register, side B + ************************************************************/ +#define REG_LNK_CTRL_B 0xc8 + +enum lnk_ctrl_bits { + LNK_CTRL_CRCERR_A = BIT(9), + LNK_CTRL_CRCERR_B = BIT(8), + LNK_CTRL_CRCFEN = BIT(1) +}; + +enum pcix_bridge_inst { + NORTH_A = 0, + NORTH_B = 1, + SOUTH_A = 2, + SOUTH_B = 3, + NO_BRIDGE = 4 +}; + +struct amd8131_dev_info { + int devfn; + enum pcix_bridge_inst inst; + struct pci_dev *dev; + int edac_idx; /* pci device index */ + char *ctl_name; + struct edac_pci_ctl_info *edac_dev; +}; + +/* + * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC + * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are + * four PCIX Bridges on ATCA-6101 altogether. + * + * These PCIX Bridges share the same PCI Device ID and are all of + * Function Zero, they could be discrimated by their pci_dev->devfn. + * They share the same set of init/check/exit methods, and their + * private structures are collected in the devices[] array. + */ +struct amd8131_info { + u16 err_dev; /* PCI Device ID for AMD8131 APIC*/ + struct amd8131_dev_info *devices; + void (*init)(struct amd8131_dev_info *dev_info); + void (*exit)(struct amd8131_dev_info *dev_info); + void (*check)(struct edac_pci_ctl_info *edac_dev); +}; + +#endif /* _AMD8131_EDAC_H_ */ + diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 4b55ec607a8..28f2c3f959b 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -49,6 +49,10 @@ #define edac_printk(level, prefix, fmt, arg...) \ printk(level "EDAC " prefix ": " fmt, ##arg) +#define edac_printk_verbose(level, prefix, fmt, arg...) \ + printk(level "EDAC " prefix ": " "in %s, line at %d: " fmt, \ + __FILE__, __LINE__, ##arg) + #define edac_mc_printk(mci, level, fmt, arg...) \ printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg) @@ -71,11 +75,20 @@ #ifdef CONFIG_EDAC_DEBUG extern int edac_debug_level; +#ifndef CONFIG_EDAC_DEBUG_VERBOSE #define edac_debug_printk(level, fmt, arg...) \ do { \ if (level <= edac_debug_level) \ edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \ - } while(0) + } while (0) +#else /* CONFIG_EDAC_DEBUG_VERBOSE */ +#define edac_debug_printk(level, fmt, arg...) \ + do { \ + if (level <= edac_debug_level) \ + edac_printk_verbose(KERN_DEBUG, EDAC_DEBUG, fmt, \ + ##arg); \ + } while (0) +#endif #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) @@ -831,6 +844,7 @@ extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci); extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, unsigned long value); +extern int edac_pci_alloc_index(void); extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx); extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev); diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index 5d3c8083a40..5b150aea703 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c @@ -30,6 +30,7 @@ static DEFINE_MUTEX(edac_pci_ctls_mutex); static LIST_HEAD(edac_pci_list); +static atomic_t pci_indexes = ATOMIC_INIT(0); /* * edac_pci_alloc_ctl_info @@ -318,6 +319,19 @@ void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period); /* + * edac_pci_alloc_index: Allocate a unique PCI index number + * + * Return: + * allocated index number + * + */ +int edac_pci_alloc_index(void) +{ + return atomic_inc_return(&pci_indexes) - 1; +} +EXPORT_SYMBOL_GPL(edac_pci_alloc_index); + +/* * edac_pci_add_device: Insert the 'edac_dev' structure into the * edac_pci global list and create sysfs entries associated with * edac_pci structure. diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c new file mode 100644 index 00000000000..11f2172aa1e --- /dev/null +++ b/drivers/edac/ppc4xx_edac.c @@ -0,0 +1,1448 @@ +/* + * Copyright (c) 2008 Nuovation System Designs, LLC + * Grant Erickson <gerickson@nuovations.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + */ + +#include <linux/edac.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/types.h> + +#include <asm/dcr.h> + +#include "edac_core.h" +#include "ppc4xx_edac.h" + +/* + * This file implements a driver for monitoring and handling events + * associated with the IMB DDR2 ECC controller found in the AMCC/IBM + * 405EX[r], 440SP, 440SPe, 460EX, 460GT and 460SX. + * + * As realized in the 405EX[r], this controller features: + * + * - Support for registered- and non-registered DDR1 and DDR2 memory. + * - 32-bit or 16-bit memory interface with optional ECC. + * + * o ECC support includes: + * + * - 4-bit SEC/DED + * - Aligned-nibble error detect + * - Bypass mode + * + * - Two (2) memory banks/ranks. + * - Up to 1 GiB per bank/rank in 32-bit mode and up to 512 MiB per + * bank/rank in 16-bit mode. + * + * As realized in the 440SP and 440SPe, this controller changes/adds: + * + * - 64-bit or 32-bit memory interface with optional ECC. + * + * o ECC support includes: + * + * - 8-bit SEC/DED + * - Aligned-nibble error detect + * - Bypass mode + * + * - Up to 4 GiB per bank/rank in 64-bit mode and up to 2 GiB + * per bank/rank in 32-bit mode. + * + * As realized in the 460EX and 460GT, this controller changes/adds: + * + * - 64-bit or 32-bit memory interface with optional ECC. + * + * o ECC support includes: + * + * - 8-bit SEC/DED + * - Aligned-nibble error detect + * - Bypass mode + * + * - Four (4) memory banks/ranks. + * - Up to 16 GiB per bank/rank in 64-bit mode and up to 8 GiB + * per bank/rank in 32-bit mode. + * + * At present, this driver has ONLY been tested against the controller + * realization in the 405EX[r] on the AMCC Kilauea and Haleakala + * boards (256 MiB w/o ECC memory soldered onto the board) and a + * proprietary board based on those designs (128 MiB ECC memory, also + * soldered onto the board). + * + * Dynamic feature detection and handling needs to be added for the + * other realizations of this controller listed above. + * + * Eventually, this driver will likely be adapted to the above variant + * realizations of this controller as well as broken apart to handle + * the other known ECC-capable controllers prevalent in other 4xx + * processors: + * + * - IBM SDRAM (405GP, 405CR and 405EP) "ibm,sdram-4xx" + * - IBM DDR1 (440GP, 440GX, 440EP and 440GR) "ibm,sdram-4xx-ddr" + * - Denali DDR1/DDR2 (440EPX and 440GRX) "denali,sdram-4xx-ddr2" + * + * For this controller, unfortunately, correctable errors report + * nothing more than the beat/cycle and byte/lane the correction + * occurred on and the check bit group that covered the error. + * + * In contrast, uncorrectable errors also report the failing address, + * the bus master and the transaction direction (i.e. read or write) + * + * Regardless of whether the error is a CE or a UE, we report the + * following pieces of information in the driver-unique message to the + * EDAC subsystem: + * + * - Device tree path + * - Bank(s) + * - Check bit error group + * - Beat(s)/lane(s) + */ + +/* Preprocessor Definitions */ + +#define EDAC_OPSTATE_INT_STR "interrupt" +#define EDAC_OPSTATE_POLL_STR "polled" +#define EDAC_OPSTATE_UNKNOWN_STR "unknown" + +#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac" +#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0 " __DATE__ + +#define PPC4XX_EDAC_MESSAGE_SIZE 256 + +/* + * Kernel logging without an EDAC instance + */ +#define ppc4xx_edac_printk(level, fmt, arg...) \ + edac_printk(level, "PPC4xx MC", fmt, ##arg) + +/* + * Kernel logging with an EDAC instance + */ +#define ppc4xx_edac_mc_printk(level, mci, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "PPC4xx", fmt, ##arg) + +/* + * Macros to convert bank configuration size enumerations into MiB and + * page values. + */ +#define SDRAM_MBCF_SZ_MiB_MIN 4 +#define SDRAM_MBCF_SZ_TO_MiB(n) (SDRAM_MBCF_SZ_MiB_MIN \ + << (SDRAM_MBCF_SZ_DECODE(n))) +#define SDRAM_MBCF_SZ_TO_PAGES(n) (SDRAM_MBCF_SZ_MiB_MIN \ + << (20 - PAGE_SHIFT + \ + SDRAM_MBCF_SZ_DECODE(n))) + +/* + * The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are + * indirectly acccessed and have a base and length defined by the + * device tree. The base can be anything; however, we expect the + * length to be precisely two registers, the first for the address + * window and the second for the data window. + */ +#define SDRAM_DCR_RESOURCE_LEN 2 +#define SDRAM_DCR_ADDR_OFFSET 0 +#define SDRAM_DCR_DATA_OFFSET 1 + +/* + * Device tree interrupt indices + */ +#define INTMAP_ECCDED_INDEX 0 /* Double-bit Error Detect */ +#define INTMAP_ECCSEC_INDEX 1 /* Single-bit Error Correct */ + +/* Type Definitions */ + +/* + * PPC4xx SDRAM memory controller private instance data + */ +struct ppc4xx_edac_pdata { + dcr_host_t dcr_host; /* Indirect DCR address/data window mapping */ + struct { + int sec; /* Single-bit correctable error IRQ assigned */ + int ded; /* Double-bit detectable error IRQ assigned */ + } irqs; +}; + +/* + * Various status data gathered and manipulated when checking and + * reporting ECC status. + */ +struct ppc4xx_ecc_status { + u32 ecces; + u32 besr; + u32 bearh; + u32 bearl; + u32 wmirq; +}; + +/* Function Prototypes */ + +static int ppc4xx_edac_probe(struct of_device *device, + const struct of_device_id *device_id); +static int ppc4xx_edac_remove(struct of_device *device); + +/* Global Variables */ + +/* + * Device tree node type and compatible tuples this driver can match + * on. + */ +static struct of_device_id ppc4xx_edac_match[] = { + { + .compatible = "ibm,sdram-4xx-ddr2" + }, + { } +}; + +static struct of_platform_driver ppc4xx_edac_driver = { + .match_table = ppc4xx_edac_match, + .probe = ppc4xx_edac_probe, + .remove = ppc4xx_edac_remove, + .driver = { + .owner = THIS_MODULE, + .name = PPC4XX_EDAC_MODULE_NAME + } +}; + +/* + * TODO: The row and channel parameters likely need to be dynamically + * set based on the aforementioned variant controller realizations. + */ +static const unsigned ppc4xx_edac_nr_csrows = 2; +static const unsigned ppc4xx_edac_nr_chans = 1; + +/* + * Strings associated with PLB master IDs capable of being posted in + * SDRAM_BESR or SDRAM_WMIRQ on uncorrectable ECC errors. + */ +static const char * const ppc4xx_plb_masters[9] = { + [SDRAM_PLB_M0ID_ICU] = "ICU", + [SDRAM_PLB_M0ID_PCIE0] = "PCI-E 0", + [SDRAM_PLB_M0ID_PCIE1] = "PCI-E 1", + [SDRAM_PLB_M0ID_DMA] = "DMA", + [SDRAM_PLB_M0ID_DCU] = "DCU", + [SDRAM_PLB_M0ID_OPB] = "OPB", + [SDRAM_PLB_M0ID_MAL] = "MAL", + [SDRAM_PLB_M0ID_SEC] = "SEC", + [SDRAM_PLB_M0ID_AHB] = "AHB" +}; + +/** + * mfsdram - read and return controller register data + * @dcr_host: A pointer to the DCR mapping. + * @idcr_n: The indirect DCR register to read. + * + * This routine reads and returns the data associated with the + * controller's specified indirect DCR register. + * + * Returns the read data. + */ +static inline u32 +mfsdram(const dcr_host_t *dcr_host, unsigned int idcr_n) +{ + return __mfdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET, + dcr_host->base + SDRAM_DCR_DATA_OFFSET, + idcr_n); +} + +/** + * mtsdram - write controller register data + * @dcr_host: A pointer to the DCR mapping. + * @idcr_n: The indirect DCR register to write. + * @value: The data to write. + * + * This routine writes the provided data to the controller's specified + * indirect DCR register. + */ +static inline void +mtsdram(const dcr_host_t *dcr_host, unsigned int idcr_n, u32 value) +{ + return __mtdcri(dcr_host->base + SDRAM_DCR_ADDR_OFFSET, + dcr_host->base + SDRAM_DCR_DATA_OFFSET, + idcr_n, + value); +} + +/** + * ppc4xx_edac_check_bank_error - check a bank for an ECC bank error + * @status: A pointer to the ECC status structure to check for an + * ECC bank error. + * @bank: The bank to check for an ECC error. + * + * This routine determines whether the specified bank has an ECC + * error. + * + * Returns true if the specified bank has an ECC error; otherwise, + * false. + */ +static bool +ppc4xx_edac_check_bank_error(const struct ppc4xx_ecc_status *status, + unsigned int bank) +{ + switch (bank) { + case 0: + return status->ecces & SDRAM_ECCES_BK0ER; + case 1: + return status->ecces & SDRAM_ECCES_BK1ER; + default: + return false; + } +} + +/** + * ppc4xx_edac_generate_bank_message - generate interpretted bank status message + * @mci: A pointer to the EDAC memory controller instance associated + * with the bank message being generated. + * @status: A pointer to the ECC status structure to generate the + * message from. + * @buffer: A pointer to the buffer in which to generate the + * message. + * @size: The size, in bytes, of space available in buffer. + * + * This routine generates to the provided buffer the portion of the + * driver-unique report message associated with the ECCESS[BKNER] + * field of the specified ECC status. + * + * Returns the number of characters generated on success; otherwise, < + * 0 on error. + */ +static int +ppc4xx_edac_generate_bank_message(const struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status, + char *buffer, + size_t size) +{ + int n, total = 0; + unsigned int row, rows; + + n = snprintf(buffer, size, "%s: Banks: ", mci->dev_name); + + if (n < 0 || n >= size) + goto fail; + + buffer += n; + size -= n; + total += n; + + for (rows = 0, row = 0; row < mci->nr_csrows; row++) { + if (ppc4xx_edac_check_bank_error(status, row)) { + n = snprintf(buffer, size, "%s%u", + (rows++ ? ", " : ""), row); + + if (n < 0 || n >= size) + goto fail; + + buffer += n; + size -= n; + total += n; + } + } + + n = snprintf(buffer, size, "%s; ", rows ? "" : "None"); + + if (n < 0 || n >= size) + goto fail; + + buffer += n; + size -= n; + total += n; + + fail: + return total; +} + +/** + * ppc4xx_edac_generate_checkbit_message - generate interpretted checkbit message + * @mci: A pointer to the EDAC memory controller instance associated + * with the checkbit message being generated. + * @status: A pointer to the ECC status structure to generate the + * message from. + * @buffer: A pointer to the buffer in which to generate the + * message. + * @size: The size, in bytes, of space available in buffer. + * + * This routine generates to the provided buffer the portion of the + * driver-unique report message associated with the ECCESS[CKBER] + * field of the specified ECC status. + * + * Returns the number of characters generated on success; otherwise, < + * 0 on error. + */ +static int +ppc4xx_edac_generate_checkbit_message(const struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status, + char *buffer, + size_t size) +{ + const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; + const char *ckber = NULL; + + switch (status->ecces & SDRAM_ECCES_CKBER_MASK) { + case SDRAM_ECCES_CKBER_NONE: + ckber = "None"; + break; + case SDRAM_ECCES_CKBER_32_ECC_0_3: + ckber = "ECC0:3"; + break; + case SDRAM_ECCES_CKBER_32_ECC_4_8: + switch (mfsdram(&pdata->dcr_host, SDRAM_MCOPT1) & + SDRAM_MCOPT1_WDTH_MASK) { + case SDRAM_MCOPT1_WDTH_16: + ckber = "ECC0:3"; + break; + case SDRAM_MCOPT1_WDTH_32: + ckber = "ECC4:8"; + break; + default: + ckber = "Unknown"; + break; + } + break; + case SDRAM_ECCES_CKBER_32_ECC_0_8: + ckber = "ECC0:8"; + break; + default: + ckber = "Unknown"; + break; + } + + return snprintf(buffer, size, "Checkbit Error: %s", ckber); +} + +/** + * ppc4xx_edac_generate_lane_message - generate interpretted byte lane message + * @mci: A pointer to the EDAC memory controller instance associated + * with the byte lane message being generated. + * @status: A pointer to the ECC status structure to generate the + * message from. + * @buffer: A pointer to the buffer in which to generate the + * message. + * @size: The size, in bytes, of space available in buffer. + * + * This routine generates to the provided buffer the portion of the + * driver-unique report message associated with the ECCESS[BNCE] + * field of the specified ECC status. + * + * Returns the number of characters generated on success; otherwise, < + * 0 on error. + */ +static int +ppc4xx_edac_generate_lane_message(const struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status, + char *buffer, + size_t size) +{ + int n, total = 0; + unsigned int lane, lanes; + const unsigned int first_lane = 0; + const unsigned int lane_count = 16; + + n = snprintf(buffer, size, "; Byte Lane Errors: "); + + if (n < 0 || n >= size) + goto fail; + + buffer += n; + size -= n; + total += n; + + for (lanes = 0, lane = first_lane; lane < lane_count; lane++) { + if ((status->ecces & SDRAM_ECCES_BNCE_ENCODE(lane)) != 0) { + n = snprintf(buffer, size, + "%s%u", + (lanes++ ? ", " : ""), lane); + + if (n < 0 || n >= size) + goto fail; + + buffer += n; + size -= n; + total += n; + } + } + + n = snprintf(buffer, size, "%s; ", lanes ? "" : "None"); + + if (n < 0 || n >= size) + goto fail; + + buffer += n; + size -= n; + total += n; + + fail: + return total; +} + +/** + * ppc4xx_edac_generate_ecc_message - generate interpretted ECC status message + * @mci: A pointer to the EDAC memory controller instance associated + * with the ECCES message being generated. + * @status: A pointer to the ECC status structure to generate the + * message from. + * @buffer: A pointer to the buffer in which to generate the + * message. + * @size: The size, in bytes, of space available in buffer. + * + * This routine generates to the provided buffer the portion of the + * driver-unique report message associated with the ECCESS register of + * the specified ECC status. + * + * Returns the number of characters generated on success; otherwise, < + * 0 on error. + */ +static int +ppc4xx_edac_generate_ecc_message(const struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status, + char *buffer, + size_t size) +{ + int n, total = 0; + + n = ppc4xx_edac_generate_bank_message(mci, status, buffer, size); + + if (n < 0 || n >= size) + goto fail; + + buffer += n; + size -= n; + total += n; + + n = ppc4xx_edac_generate_checkbit_message(mci, status, buffer, size); + + if (n < 0 || n >= size) + goto fail; + + buffer += n; + size -= n; + total += n; + + n = ppc4xx_edac_generate_lane_message(mci, status, buffer, size); + + if (n < 0 || n >= size) + goto fail; + + buffer += n; + size -= n; + total += n; + + fail: + return total; +} + +/** + * ppc4xx_edac_generate_plb_message - generate interpretted PLB status message + * @mci: A pointer to the EDAC memory controller instance associated + * with the PLB message being generated. + * @status: A pointer to the ECC status structure to generate the + * message from. + * @buffer: A pointer to the buffer in which to generate the + * message. + * @size: The size, in bytes, of space available in buffer. + * + * This routine generates to the provided buffer the portion of the + * driver-unique report message associated with the PLB-related BESR + * and/or WMIRQ registers of the specified ECC status. + * + * Returns the number of characters generated on success; otherwise, < + * 0 on error. + */ +static int +ppc4xx_edac_generate_plb_message(const struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status, + char *buffer, + size_t size) +{ + unsigned int master; + bool read; + + if ((status->besr & SDRAM_BESR_MASK) == 0) + return 0; + + if ((status->besr & SDRAM_BESR_M0ET_MASK) == SDRAM_BESR_M0ET_NONE) + return 0; + + read = ((status->besr & SDRAM_BESR_M0RW_MASK) == SDRAM_BESR_M0RW_READ); + + master = SDRAM_BESR_M0ID_DECODE(status->besr); + + return snprintf(buffer, size, + "%s error w/ PLB master %u \"%s\"; ", + (read ? "Read" : "Write"), + master, + (((master >= SDRAM_PLB_M0ID_FIRST) && + (master <= SDRAM_PLB_M0ID_LAST)) ? + ppc4xx_plb_masters[master] : "UNKNOWN")); +} + +/** + * ppc4xx_edac_generate_message - generate interpretted status message + * @mci: A pointer to the EDAC memory controller instance associated + * with the driver-unique message being generated. + * @status: A pointer to the ECC status structure to generate the + * message from. + * @buffer: A pointer to the buffer in which to generate the + * message. + * @size: The size, in bytes, of space available in buffer. + * + * This routine generates to the provided buffer the driver-unique + * EDAC report message from the specified ECC status. + */ +static void +ppc4xx_edac_generate_message(const struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status, + char *buffer, + size_t size) +{ + int n; + + if (buffer == NULL || size == 0) + return; + + n = ppc4xx_edac_generate_ecc_message(mci, status, buffer, size); + + if (n < 0 || n >= size) + return; + + buffer += n; + size -= n; + + ppc4xx_edac_generate_plb_message(mci, status, buffer, size); +} + +#ifdef DEBUG +/** + * ppc4xx_ecc_dump_status - dump controller ECC status registers + * @mci: A pointer to the EDAC memory controller instance + * associated with the status being dumped. + * @status: A pointer to the ECC status structure to generate the + * dump from. + * + * This routine dumps to the kernel log buffer the raw and + * interpretted specified ECC status. + */ +static void +ppc4xx_ecc_dump_status(const struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status) +{ + char message[PPC4XX_EDAC_MESSAGE_SIZE]; + + ppc4xx_edac_generate_message(mci, status, message, sizeof(message)); + + ppc4xx_edac_mc_printk(KERN_INFO, mci, + "\n" + "\tECCES: 0x%08x\n" + "\tWMIRQ: 0x%08x\n" + "\tBESR: 0x%08x\n" + "\tBEAR: 0x%08x%08x\n" + "\t%s\n", + status->ecces, + status->wmirq, + status->besr, + status->bearh, + status->bearl, + message); +} +#endif /* DEBUG */ + +/** + * ppc4xx_ecc_get_status - get controller ECC status + * @mci: A pointer to the EDAC memory controller instance + * associated with the status being retrieved. + * @status: A pointer to the ECC status structure to populate the + * ECC status with. + * + * This routine reads and masks, as appropriate, all the relevant + * status registers that deal with ibm,sdram-4xx-ddr2 ECC errors. + * While we read all of them, for correctable errors, we only expect + * to deal with ECCES. For uncorrectable errors, we expect to deal + * with all of them. + */ +static void +ppc4xx_ecc_get_status(const struct mem_ctl_info *mci, + struct ppc4xx_ecc_status *status) +{ + const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; + const dcr_host_t *dcr_host = &pdata->dcr_host; + + status->ecces = mfsdram(dcr_host, SDRAM_ECCES) & SDRAM_ECCES_MASK; + status->wmirq = mfsdram(dcr_host, SDRAM_WMIRQ) & SDRAM_WMIRQ_MASK; + status->besr = mfsdram(dcr_host, SDRAM_BESR) & SDRAM_BESR_MASK; + status->bearl = mfsdram(dcr_host, SDRAM_BEARL); + status->bearh = mfsdram(dcr_host, SDRAM_BEARH); +} + +/** + * ppc4xx_ecc_clear_status - clear controller ECC status + * @mci: A pointer to the EDAC memory controller instance + * associated with the status being cleared. + * @status: A pointer to the ECC status structure containing the + * values to write to clear the ECC status. + * + * This routine clears--by writing the masked (as appropriate) status + * values back to--the status registers that deal with + * ibm,sdram-4xx-ddr2 ECC errors. + */ +static void +ppc4xx_ecc_clear_status(const struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status) +{ + const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; + const dcr_host_t *dcr_host = &pdata->dcr_host; + + mtsdram(dcr_host, SDRAM_ECCES, status->ecces & SDRAM_ECCES_MASK); + mtsdram(dcr_host, SDRAM_WMIRQ, status->wmirq & SDRAM_WMIRQ_MASK); + mtsdram(dcr_host, SDRAM_BESR, status->besr & SDRAM_BESR_MASK); + mtsdram(dcr_host, SDRAM_BEARL, 0); + mtsdram(dcr_host, SDRAM_BEARH, 0); +} + +/** + * ppc4xx_edac_handle_ce - handle controller correctable ECC error (CE) + * @mci: A pointer to the EDAC memory controller instance + * associated with the correctable error being handled and reported. + * @status: A pointer to the ECC status structure associated with + * the correctable error being handled and reported. + * + * This routine handles an ibm,sdram-4xx-ddr2 controller ECC + * correctable error. Per the aforementioned discussion, there's not + * enough status available to use the full EDAC correctable error + * interface, so we just pass driver-unique message to the "no info" + * interface. + */ +static void +ppc4xx_edac_handle_ce(struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status) +{ + int row; + char message[PPC4XX_EDAC_MESSAGE_SIZE]; + + ppc4xx_edac_generate_message(mci, status, message, sizeof(message)); + + for (row = 0; row < mci->nr_csrows; row++) + if (ppc4xx_edac_check_bank_error(status, row)) + edac_mc_handle_ce_no_info(mci, message); +} + +/** + * ppc4xx_edac_handle_ue - handle controller uncorrectable ECC error (UE) + * @mci: A pointer to the EDAC memory controller instance + * associated with the uncorrectable error being handled and + * reported. + * @status: A pointer to the ECC status structure associated with + * the uncorrectable error being handled and reported. + * + * This routine handles an ibm,sdram-4xx-ddr2 controller ECC + * uncorrectable error. + */ +static void +ppc4xx_edac_handle_ue(struct mem_ctl_info *mci, + const struct ppc4xx_ecc_status *status) +{ + const u64 bear = ((u64)status->bearh << 32 | status->bearl); + const unsigned long page = bear >> PAGE_SHIFT; + const unsigned long offset = bear & ~PAGE_MASK; + int row; + char message[PPC4XX_EDAC_MESSAGE_SIZE]; + + ppc4xx_edac_generate_message(mci, status, message, sizeof(message)); + + for (row = 0; row < mci->nr_csrows; row++) + if (ppc4xx_edac_check_bank_error(status, row)) + edac_mc_handle_ue(mci, page, offset, row, message); +} + +/** + * ppc4xx_edac_check - check controller for ECC errors + * @mci: A pointer to the EDAC memory controller instance + * associated with the ibm,sdram-4xx-ddr2 controller being + * checked. + * + * This routine is used to check and post ECC errors and is called by + * both the EDAC polling thread and this driver's CE and UE interrupt + * handler. + */ +static void +ppc4xx_edac_check(struct mem_ctl_info *mci) +{ +#ifdef DEBUG + static unsigned int count; +#endif + struct ppc4xx_ecc_status status; + + ppc4xx_ecc_get_status(mci, &status); + +#ifdef DEBUG + if (count++ % 30 == 0) + ppc4xx_ecc_dump_status(mci, &status); +#endif + + if (status.ecces & SDRAM_ECCES_UE) + ppc4xx_edac_handle_ue(mci, &status); + + if (status.ecces & SDRAM_ECCES_CE) + ppc4xx_edac_handle_ce(mci, &status); + + ppc4xx_ecc_clear_status(mci, &status); +} + +/** + * ppc4xx_edac_isr - SEC (CE) and DED (UE) interrupt service routine + * @irq: The virtual interrupt number being serviced. + * @dev_id: A pointer to the EDAC memory controller instance + * associated with the interrupt being handled. + * + * This routine implements the interrupt handler for both correctable + * (CE) and uncorrectable (UE) ECC errors for the ibm,sdram-4xx-ddr2 + * controller. It simply calls through to the same routine used during + * polling to check, report and clear the ECC status. + * + * Unconditionally returns IRQ_HANDLED. + */ +static irqreturn_t +ppc4xx_edac_isr(int irq, void *dev_id) +{ + struct mem_ctl_info *mci = dev_id; + + ppc4xx_edac_check(mci); + + return IRQ_HANDLED; +} + +/** + * ppc4xx_edac_get_dtype - return the controller memory width + * @mcopt1: The 32-bit Memory Controller Option 1 register value + * currently set for the controller, from which the width + * is derived. + * + * This routine returns the EDAC device type width appropriate for the + * current controller configuration. + * + * TODO: This needs to be conditioned dynamically through feature + * flags or some such when other controller variants are supported as + * the 405EX[r] is 16-/32-bit and the others are 32-/64-bit with the + * 16- and 64-bit field definition/value/enumeration (b1) overloaded + * among them. + * + * Returns a device type width enumeration. + */ +static enum dev_type __devinit +ppc4xx_edac_get_dtype(u32 mcopt1) +{ + switch (mcopt1 & SDRAM_MCOPT1_WDTH_MASK) { + case SDRAM_MCOPT1_WDTH_16: + return DEV_X2; + case SDRAM_MCOPT1_WDTH_32: + return DEV_X4; + default: + return DEV_UNKNOWN; + } +} + +/** + * ppc4xx_edac_get_mtype - return controller memory type + * @mcopt1: The 32-bit Memory Controller Option 1 register value + * currently set for the controller, from which the memory type + * is derived. + * + * This routine returns the EDAC memory type appropriate for the + * current controller configuration. + * + * Returns a memory type enumeration. + */ +static enum mem_type __devinit +ppc4xx_edac_get_mtype(u32 mcopt1) +{ + bool rden = ((mcopt1 & SDRAM_MCOPT1_RDEN_MASK) == SDRAM_MCOPT1_RDEN); + + switch (mcopt1 & SDRAM_MCOPT1_DDR_TYPE_MASK) { + case SDRAM_MCOPT1_DDR2_TYPE: + return rden ? MEM_RDDR2 : MEM_DDR2; + case SDRAM_MCOPT1_DDR1_TYPE: + return rden ? MEM_RDDR : MEM_DDR; + default: + return MEM_UNKNOWN; + } +} + +/** + * ppc4xx_edac_init_csrows - intialize driver instance rows + * @mci: A pointer to the EDAC memory controller instance + * associated with the ibm,sdram-4xx-ddr2 controller for which + * the csrows (i.e. banks/ranks) are being initialized. + * @mcopt1: The 32-bit Memory Controller Option 1 register value + * currently set for the controller, from which bank width + * and memory typ information is derived. + * + * This routine intializes the virtual "chip select rows" associated + * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2 + * controller bank/rank is mapped to a row. + * + * Returns 0 if OK; otherwise, -EINVAL if the memory bank size + * configuration cannot be determined. + */ +static int __devinit +ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) +{ + const struct ppc4xx_edac_pdata *pdata = mci->pvt_info; + int status = 0; + enum mem_type mtype; + enum dev_type dtype; + enum edac_type edac_mode; + int row; + u32 mbxcf, size; + static u32 ppc4xx_last_page; + + /* Establish the memory type and width */ + + mtype = ppc4xx_edac_get_mtype(mcopt1); + dtype = ppc4xx_edac_get_dtype(mcopt1); + + /* Establish EDAC mode */ + + if (mci->edac_cap & EDAC_FLAG_SECDED) + edac_mode = EDAC_SECDED; + else if (mci->edac_cap & EDAC_FLAG_EC) + edac_mode = EDAC_EC; + else + edac_mode = EDAC_NONE; + + /* + * Initialize each chip select row structure which correspond + * 1:1 with a controller bank/rank. + */ + + for (row = 0; row < mci->nr_csrows; row++) { + struct csrow_info *csi = &mci->csrows[row]; + + /* + * Get the configuration settings for this + * row/bank/rank and skip disabled banks. + */ + + mbxcf = mfsdram(&pdata->dcr_host, SDRAM_MBXCF(row)); + + if ((mbxcf & SDRAM_MBCF_BE_MASK) != SDRAM_MBCF_BE_ENABLE) + continue; + + /* Map the bank configuration size setting to pages. */ + + size = mbxcf & SDRAM_MBCF_SZ_MASK; + + switch (size) { + case SDRAM_MBCF_SZ_4MB: + case SDRAM_MBCF_SZ_8MB: + case SDRAM_MBCF_SZ_16MB: + case SDRAM_MBCF_SZ_32MB: + case SDRAM_MBCF_SZ_64MB: + case SDRAM_MBCF_SZ_128MB: + case SDRAM_MBCF_SZ_256MB: + case SDRAM_MBCF_SZ_512MB: + case SDRAM_MBCF_SZ_1GB: + case SDRAM_MBCF_SZ_2GB: + case SDRAM_MBCF_SZ_4GB: + case SDRAM_MBCF_SZ_8GB: + csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size); + break; + default: + ppc4xx_edac_mc_printk(KERN_ERR, mci, + "Unrecognized memory bank %d " + "size 0x%08x\n", + row, SDRAM_MBCF_SZ_DECODE(size)); + status = -EINVAL; + goto done; + } + + csi->first_page = ppc4xx_last_page; + csi->last_page = csi->first_page + csi->nr_pages - 1; + csi->page_mask = 0; + + /* + * It's unclear exactly what grain should be set to + * here. The SDRAM_ECCES register allows resolution of + * an error down to a nibble which would potentially + * argue for a grain of '1' byte, even though we only + * know the associated address for uncorrectable + * errors. This value is not used at present for + * anything other than error reporting so getting it + * wrong should be of little consequence. Other + * possible values would be the PLB width (16), the + * page size (PAGE_SIZE) or the memory width (2 or 4). + */ + + csi->grain = 1; + + csi->mtype = mtype; + csi->dtype = dtype; + + csi->edac_mode = edac_mode; + + ppc4xx_last_page += csi->nr_pages; + } + + done: + return status; +} + +/** + * ppc4xx_edac_mc_init - intialize driver instance + * @mci: A pointer to the EDAC memory controller instance being + * initialized. + * @op: A pointer to the OpenFirmware device tree node associated + * with the controller this EDAC instance is bound to. + * @match: A pointer to the OpenFirmware device tree match + * information associated with the controller this EDAC instance + * is bound to. + * @dcr_host: A pointer to the DCR data containing the DCR mapping + * for this controller instance. + * @mcopt1: The 32-bit Memory Controller Option 1 register value + * currently set for the controller, from which ECC capabilities + * and scrub mode are derived. + * + * This routine performs initialization of the EDAC memory controller + * instance and related driver-private data associated with the + * ibm,sdram-4xx-ddr2 memory controller the instance is bound to. + * + * Returns 0 if OK; otherwise, < 0 on error. + */ +static int __devinit +ppc4xx_edac_mc_init(struct mem_ctl_info *mci, + struct of_device *op, + const struct of_device_id *match, + const dcr_host_t *dcr_host, + u32 mcopt1) +{ + int status = 0; + const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK); + struct ppc4xx_edac_pdata *pdata = NULL; + const struct device_node *np = op->node; + + if (match == NULL) + return -EINVAL; + + /* Initial driver pointers and private data */ + + mci->dev = &op->dev; + + dev_set_drvdata(mci->dev, mci); + + pdata = mci->pvt_info; + + pdata->dcr_host = *dcr_host; + pdata->irqs.sec = NO_IRQ; + pdata->irqs.ded = NO_IRQ; + + /* Initialize controller capabilities and configuration */ + + mci->mtype_cap = (MEM_FLAG_DDR | MEM_FLAG_RDDR | + MEM_FLAG_DDR2 | MEM_FLAG_RDDR2); + + mci->edac_ctl_cap = (EDAC_FLAG_NONE | + EDAC_FLAG_EC | + EDAC_FLAG_SECDED); + + mci->scrub_cap = SCRUB_NONE; + mci->scrub_mode = SCRUB_NONE; + + /* + * Update the actual capabilites based on the MCOPT1[MCHK] + * settings. Scrubbing is only useful if reporting is enabled. + */ + + switch (memcheck) { + case SDRAM_MCOPT1_MCHK_CHK: + mci->edac_cap = EDAC_FLAG_EC; + break; + case SDRAM_MCOPT1_MCHK_CHK_REP: + mci->edac_cap = (EDAC_FLAG_EC | EDAC_FLAG_SECDED); + mci->scrub_mode = SCRUB_SW_SRC; + break; + default: + mci->edac_cap = EDAC_FLAG_NONE; + break; + } + + /* Initialize strings */ + + mci->mod_name = PPC4XX_EDAC_MODULE_NAME; + mci->mod_ver = PPC4XX_EDAC_MODULE_REVISION; + mci->ctl_name = match->compatible, + mci->dev_name = np->full_name; + + /* Initialize callbacks */ + + mci->edac_check = ppc4xx_edac_check; + mci->ctl_page_to_phys = NULL; + + /* Initialize chip select rows */ + + status = ppc4xx_edac_init_csrows(mci, mcopt1); + + if (status) + ppc4xx_edac_mc_printk(KERN_ERR, mci, + "Failed to initialize rows!\n"); + + return status; +} + +/** + * ppc4xx_edac_register_irq - setup and register controller interrupts + * @op: A pointer to the OpenFirmware device tree node associated + * with the controller this EDAC instance is bound to. + * @mci: A pointer to the EDAC memory controller instance + * associated with the ibm,sdram-4xx-ddr2 controller for which + * interrupts are being registered. + * + * This routine parses the correctable (CE) and uncorrectable error (UE) + * interrupts from the device tree node and maps and assigns them to + * the associated EDAC memory controller instance. + * + * Returns 0 if OK; otherwise, -ENODEV if the interrupts could not be + * mapped and assigned. + */ +static int __devinit +ppc4xx_edac_register_irq(struct of_device *op, struct mem_ctl_info *mci) +{ + int status = 0; + int ded_irq, sec_irq; + struct ppc4xx_edac_pdata *pdata = mci->pvt_info; + struct device_node *np = op->node; + + ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX); + sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX); + + if (ded_irq == NO_IRQ || sec_irq == NO_IRQ) { + ppc4xx_edac_mc_printk(KERN_ERR, mci, + "Unable to map interrupts.\n"); + status = -ENODEV; + goto fail; + } + + status = request_irq(ded_irq, + ppc4xx_edac_isr, + IRQF_DISABLED, + "[EDAC] MC ECCDED", + mci); + + if (status < 0) { + ppc4xx_edac_mc_printk(KERN_ERR, mci, + "Unable to request irq %d for ECC DED", + ded_irq); + status = -ENODEV; + goto fail1; + } + + status = request_irq(sec_irq, + ppc4xx_edac_isr, + IRQF_DISABLED, + "[EDAC] MC ECCSEC", + mci); + + if (status < 0) { + ppc4xx_edac_mc_printk(KERN_ERR, mci, + "Unable to request irq %d for ECC SEC", + sec_irq); + status = -ENODEV; + goto fail2; + } + + ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCDED irq is %d\n", ded_irq); + ppc4xx_edac_mc_printk(KERN_INFO, mci, "ECCSEC irq is %d\n", sec_irq); + + pdata->irqs.ded = ded_irq; + pdata->irqs.sec = sec_irq; + + return 0; + + fail2: + free_irq(sec_irq, mci); + + fail1: + free_irq(ded_irq, mci); + + fail: + return status; +} + +/** + * ppc4xx_edac_map_dcrs - locate and map controller registers + * @np: A pointer to the device tree node containing the DCR + * resources to map. + * @dcr_host: A pointer to the DCR data to populate with the + * DCR mapping. + * + * This routine attempts to locate in the device tree and map the DCR + * register resources associated with the controller's indirect DCR + * address and data windows. + * + * Returns 0 if the DCRs were successfully mapped; otherwise, < 0 on + * error. + */ +static int __devinit +ppc4xx_edac_map_dcrs(const struct device_node *np, dcr_host_t *dcr_host) +{ + unsigned int dcr_base, dcr_len; + + if (np == NULL || dcr_host == NULL) + return -EINVAL; + + /* Get the DCR resource extent and sanity check the values. */ + + dcr_base = dcr_resource_start(np, 0); + dcr_len = dcr_resource_len(np, 0); + + if (dcr_base == 0 || dcr_len == 0) { + ppc4xx_edac_printk(KERN_ERR, + "Failed to obtain DCR property.\n"); + return -ENODEV; + } + + if (dcr_len != SDRAM_DCR_RESOURCE_LEN) { + ppc4xx_edac_printk(KERN_ERR, + "Unexpected DCR length %d, expected %d.\n", + dcr_len, SDRAM_DCR_RESOURCE_LEN); + return -ENODEV; + } + + /* Attempt to map the DCR extent. */ + + *dcr_host = dcr_map(np, dcr_base, dcr_len); + + if (!DCR_MAP_OK(*dcr_host)) { + ppc4xx_edac_printk(KERN_INFO, "Failed to map DCRs.\n"); + return -ENODEV; + } + + return 0; +} + +/** + * ppc4xx_edac_probe - check controller and bind driver + * @op: A pointer to the OpenFirmware device tree node associated + * with the controller being probed for driver binding. + * @match: A pointer to the OpenFirmware device tree match + * information associated with the controller being probed + * for driver binding. + * + * This routine probes a specific ibm,sdram-4xx-ddr2 controller + * instance for binding with the driver. + * + * Returns 0 if the controller instance was successfully bound to the + * driver; otherwise, < 0 on error. + */ +static int __devinit +ppc4xx_edac_probe(struct of_device *op, const struct of_device_id *match) +{ + int status = 0; + u32 mcopt1, memcheck; + dcr_host_t dcr_host; + const struct device_node *np = op->node; + struct mem_ctl_info *mci = NULL; + static int ppc4xx_edac_instance; + + /* + * At this point, we only support the controller realized on + * the AMCC PPC 405EX[r]. Reject anything else. + */ + + if (!of_device_is_compatible(np, "ibm,sdram-405ex") && + !of_device_is_compatible(np, "ibm,sdram-405exr")) { + ppc4xx_edac_printk(KERN_NOTICE, + "Only the PPC405EX[r] is supported.\n"); + return -ENODEV; + } + + /* + * Next, get the DCR property and attempt to map it so that we + * can probe the controller. + */ + + status = ppc4xx_edac_map_dcrs(np, &dcr_host); + + if (status) + return status; + + /* + * First determine whether ECC is enabled at all. If not, + * there is no useful checking or monitoring that can be done + * for this controller. + */ + + mcopt1 = mfsdram(&dcr_host, SDRAM_MCOPT1); + memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK); + + if (memcheck == SDRAM_MCOPT1_MCHK_NON) { + ppc4xx_edac_printk(KERN_INFO, "%s: No ECC memory detected or " + "ECC is disabled.\n", np->full_name); + status = -ENODEV; + goto done; + } + + /* + * At this point, we know ECC is enabled, allocate an EDAC + * controller instance and perform the appropriate + * initialization. + */ + + mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata), + ppc4xx_edac_nr_csrows, + ppc4xx_edac_nr_chans, + ppc4xx_edac_instance); + + if (mci == NULL) { + ppc4xx_edac_printk(KERN_ERR, "%s: " + "Failed to allocate EDAC MC instance!\n", + np->full_name); + status = -ENOMEM; + goto done; + } + + status = ppc4xx_edac_mc_init(mci, op, match, &dcr_host, mcopt1); + + if (status) { + ppc4xx_edac_mc_printk(KERN_ERR, mci, + "Failed to initialize instance!\n"); + goto fail; + } + + /* + * We have a valid, initialized EDAC instance bound to the + * controller. Attempt to register it with the EDAC subsystem + * and, if necessary, register interrupts. + */ + + if (edac_mc_add_mc(mci)) { + ppc4xx_edac_mc_printk(KERN_ERR, mci, + "Failed to add instance!\n"); + status = -ENODEV; + goto fail; + } + + if (edac_op_state == EDAC_OPSTATE_INT) { + status = ppc4xx_edac_register_irq(op, mci); + + if (status) + goto fail1; + } + + ppc4xx_edac_instance++; + + return 0; + + fail1: + edac_mc_del_mc(mci->dev); + + fail: + edac_mc_free(mci); + + done: + return status; +} + +/** + * ppc4xx_edac_remove - unbind driver from controller + * @op: A pointer to the OpenFirmware device tree node associated + * with the controller this EDAC instance is to be unbound/removed + * from. + * + * This routine unbinds the EDAC memory controller instance associated + * with the specified ibm,sdram-4xx-ddr2 controller described by the + * OpenFirmware device tree node passed as a parameter. + * + * Unconditionally returns 0. + */ +static int +ppc4xx_edac_remove(struct of_device *op) +{ + struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); + struct ppc4xx_edac_pdata *pdata = mci->pvt_info; + + if (edac_op_state == EDAC_OPSTATE_INT) { + free_irq(pdata->irqs.sec, mci); + free_irq(pdata->irqs.ded, mci); + } + + dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN); + + edac_mc_del_mc(mci->dev); + edac_mc_free(mci); + + return 0; +} + +/** + * ppc4xx_edac_opstate_init - initialize EDAC reporting method + * + * This routine ensures that the EDAC memory controller reporting + * method is mapped to a sane value as the EDAC core defines the value + * to EDAC_OPSTATE_INVAL by default. We don't call the global + * opstate_init as that defaults to polling and we want interrupt as + * the default. + */ +static inline void __init +ppc4xx_edac_opstate_init(void) +{ + switch (edac_op_state) { + case EDAC_OPSTATE_POLL: + case EDAC_OPSTATE_INT: + break; + default: + edac_op_state = EDAC_OPSTATE_INT; + break; + } + + ppc4xx_edac_printk(KERN_INFO, "Reporting type: %s\n", + ((edac_op_state == EDAC_OPSTATE_POLL) ? + EDAC_OPSTATE_POLL_STR : + ((edac_op_state == EDAC_OPSTATE_INT) ? + EDAC_OPSTATE_INT_STR : + EDAC_OPSTATE_UNKNOWN_STR))); +} + +/** + * ppc4xx_edac_init - driver/module insertion entry point + * + * This routine is the driver/module insertion entry point. It + * initializes the EDAC memory controller reporting state and + * registers the driver as an OpenFirmware device tree platform + * driver. + */ +static int __init +ppc4xx_edac_init(void) +{ + ppc4xx_edac_printk(KERN_INFO, PPC4XX_EDAC_MODULE_REVISION "\n"); + + ppc4xx_edac_opstate_init(); + + return of_register_platform_driver(&ppc4xx_edac_driver); +} + +/** + * ppc4xx_edac_exit - driver/module removal entry point + * + * This routine is the driver/module removal entry point. It + * unregisters the driver as an OpenFirmware device tree platform + * driver. + */ +static void __exit +ppc4xx_edac_exit(void) +{ + of_unregister_platform_driver(&ppc4xx_edac_driver); +} + +module_init(ppc4xx_edac_init); +module_exit(ppc4xx_edac_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Grant Erickson <gerickson@nuovations.com>"); +MODULE_DESCRIPTION("EDAC MC Driver for the PPC4xx IBM DDR2 Memory Controller"); +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting State: " + "0=" EDAC_OPSTATE_POLL_STR ", 2=" EDAC_OPSTATE_INT_STR); diff --git a/drivers/edac/ppc4xx_edac.h b/drivers/edac/ppc4xx_edac.h new file mode 100644 index 00000000000..d3154764c44 --- /dev/null +++ b/drivers/edac/ppc4xx_edac.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2008 Nuovation System Designs, LLC + * Grant Erickson <gerickson@nuovations.com> + * + * This file defines processor mnemonics for accessing and managing + * the IBM DDR1/DDR2 ECC controller found in the 405EX[r], 440SP, + * 440SPe, 460EX, 460GT and 460SX. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + */ + +#ifndef __PPC4XX_EDAC_H +#define __PPC4XX_EDAC_H + +#include <linux/types.h> + +/* + * Macro for generating register field mnemonics + */ +#define PPC_REG_BITS 32 +#define PPC_REG_VAL(bit, val) ((val) << ((PPC_REG_BITS - 1) - (bit))) +#define PPC_REG_DECODE(bit, val) ((val) >> ((PPC_REG_BITS - 1) - (bit))) + +/* + * IBM 4xx DDR1/DDR2 SDRAM memory controller registers (at least those + * relevant to ECC) + */ +#define SDRAM_BESR 0x00 /* Error status (read/clear) */ +#define SDRAM_BESRT 0x01 /* Error statuss (test/set) */ +#define SDRAM_BEARL 0x02 /* Error address low */ +#define SDRAM_BEARH 0x03 /* Error address high */ +#define SDRAM_WMIRQ 0x06 /* Write master (read/clear) */ +#define SDRAM_WMIRQT 0x07 /* Write master (test/set) */ +#define SDRAM_MCOPT1 0x20 /* Controller options 1 */ +#define SDRAM_MBXCF_BASE 0x40 /* Bank n configuration base */ +#define SDRAM_MBXCF(n) (SDRAM_MBXCF_BASE + (4 * (n))) +#define SDRAM_MB0CF SDRAM_MBXCF(0) +#define SDRAM_MB1CF SDRAM_MBXCF(1) +#define SDRAM_MB2CF SDRAM_MBXCF(2) +#define SDRAM_MB3CF SDRAM_MBXCF(3) +#define SDRAM_ECCCR 0x98 /* ECC error status */ +#define SDRAM_ECCES SDRAM_ECCCR + +/* + * PLB Master IDs + */ +#define SDRAM_PLB_M0ID_FIRST 0 +#define SDRAM_PLB_M0ID_ICU SDRAM_PLB_M0ID_FIRST +#define SDRAM_PLB_M0ID_PCIE0 1 +#define SDRAM_PLB_M0ID_PCIE1 2 +#define SDRAM_PLB_M0ID_DMA 3 +#define SDRAM_PLB_M0ID_DCU 4 +#define SDRAM_PLB_M0ID_OPB 5 +#define SDRAM_PLB_M0ID_MAL 6 +#define SDRAM_PLB_M0ID_SEC 7 +#define SDRAM_PLB_M0ID_AHB 8 +#define SDRAM_PLB_M0ID_LAST SDRAM_PLB_M0ID_AHB +#define SDRAM_PLB_M0ID_COUNT (SDRAM_PLB_M0ID_LAST - \ + SDRAM_PLB_M0ID_FIRST + 1) + +/* + * Memory Controller Bus Error Status Register + */ +#define SDRAM_BESR_MASK PPC_REG_VAL(7, 0xFF) +#define SDRAM_BESR_M0ID_MASK PPC_REG_VAL(3, 0xF) +#define SDRAM_BESR_M0ID_DECODE(n) PPC_REG_DECODE(3, n) +#define SDRAM_BESR_M0ID_ICU PPC_REG_VAL(3, SDRAM_PLB_M0ID_ICU) +#define SDRAM_BESR_M0ID_PCIE0 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE0) +#define SDRAM_BESR_M0ID_PCIE1 PPC_REG_VAL(3, SDRAM_PLB_M0ID_PCIE1) +#define SDRAM_BESR_M0ID_DMA PPC_REG_VAL(3, SDRAM_PLB_M0ID_DMA) +#define SDRAM_BESR_M0ID_DCU PPC_REG_VAL(3, SDRAM_PLB_M0ID_DCU) +#define SDRAM_BESR_M0ID_OPB PPC_REG_VAL(3, SDRAM_PLB_M0ID_OPB) +#define SDRAM_BESR_M0ID_MAL PPC_REG_VAL(3, SDRAM_PLB_M0ID_MAL) +#define SDRAM_BESR_M0ID_SEC PPC_REG_VAL(3, SDRAM_PLB_M0ID_SEC) +#define SDRAM_BESR_M0ID_AHB PPC_REG_VAL(3, SDRAM_PLB_M0ID_AHB) +#define SDRAM_BESR_M0ET_MASK PPC_REG_VAL(6, 0x7) +#define SDRAM_BESR_M0ET_NONE PPC_REG_VAL(6, 0) +#define SDRAM_BESR_M0ET_ECC PPC_REG_VAL(6, 1) +#define SDRAM_BESR_M0RW_MASK PPC_REG_VAL(7, 1) +#define SDRAM_BESR_M0RW_WRITE PPC_REG_VAL(7, 0) +#define SDRAM_BESR_M0RW_READ PPC_REG_VAL(7, 1) + +/* + * Memory Controller PLB Write Master Interrupt Register + */ +#define SDRAM_WMIRQ_MASK PPC_REG_VAL(8, 0x1FF) +#define SDRAM_WMIRQ_ENCODE(id) PPC_REG_VAL((id % \ + SDRAM_PLB_M0ID_COUNT), 1) +#define SDRAM_WMIRQ_ICU PPC_REG_VAL(SDRAM_PLB_M0ID_ICU, 1) +#define SDRAM_WMIRQ_PCIE0 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE0, 1) +#define SDRAM_WMIRQ_PCIE1 PPC_REG_VAL(SDRAM_PLB_M0ID_PCIE1, 1) +#define SDRAM_WMIRQ_DMA PPC_REG_VAL(SDRAM_PLB_M0ID_DMA, 1) +#define SDRAM_WMIRQ_DCU PPC_REG_VAL(SDRAM_PLB_M0ID_DCU, 1) +#define SDRAM_WMIRQ_OPB PPC_REG_VAL(SDRAM_PLB_M0ID_OPB, 1) +#define SDRAM_WMIRQ_MAL PPC_REG_VAL(SDRAM_PLB_M0ID_MAL, 1) +#define SDRAM_WMIRQ_SEC PPC_REG_VAL(SDRAM_PLB_M0ID_SEC, 1) +#define SDRAM_WMIRQ_AHB PPC_REG_VAL(SDRAM_PLB_M0ID_AHB, 1) + +/* + * Memory Controller Options 1 Register + */ +#define SDRAM_MCOPT1_MCHK_MASK PPC_REG_VAL(3, 0x3) /* ECC mask */ +#define SDRAM_MCOPT1_MCHK_NON PPC_REG_VAL(3, 0x0) /* No ECC gen */ +#define SDRAM_MCOPT1_MCHK_GEN PPC_REG_VAL(3, 0x2) /* ECC gen */ +#define SDRAM_MCOPT1_MCHK_CHK PPC_REG_VAL(3, 0x1) /* ECC gen and chk */ +#define SDRAM_MCOPT1_MCHK_CHK_REP PPC_REG_VAL(3, 0x3) /* ECC gen/chk/rpt */ +#define SDRAM_MCOPT1_MCHK_DECODE(n) ((((u32)(n)) >> 28) & 0x3) +#define SDRAM_MCOPT1_RDEN_MASK PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM mask */ +#define SDRAM_MCOPT1_RDEN PPC_REG_VAL(4, 0x1) /* Rgstrd DIMM enbl */ +#define SDRAM_MCOPT1_WDTH_MASK PPC_REG_VAL(7, 0x1) /* Width mask */ +#define SDRAM_MCOPT1_WDTH_32 PPC_REG_VAL(7, 0x0) /* 32 bits */ +#define SDRAM_MCOPT1_WDTH_16 PPC_REG_VAL(7, 0x1) /* 16 bits */ +#define SDRAM_MCOPT1_DDR_TYPE_MASK PPC_REG_VAL(11, 0x1) /* DDR type mask */ +#define SDRAM_MCOPT1_DDR1_TYPE PPC_REG_VAL(11, 0x0) /* DDR1 type */ +#define SDRAM_MCOPT1_DDR2_TYPE PPC_REG_VAL(11, 0x1) /* DDR2 type */ + +/* + * Memory Bank 0 - n Configuration Register + */ +#define SDRAM_MBCF_BA_MASK PPC_REG_VAL(12, 0x1FFF) +#define SDRAM_MBCF_SZ_MASK PPC_REG_VAL(19, 0xF) +#define SDRAM_MBCF_SZ_DECODE(mbxcf) PPC_REG_DECODE(19, mbxcf) +#define SDRAM_MBCF_SZ_4MB PPC_REG_VAL(19, 0x0) +#define SDRAM_MBCF_SZ_8MB PPC_REG_VAL(19, 0x1) +#define SDRAM_MBCF_SZ_16MB PPC_REG_VAL(19, 0x2) +#define SDRAM_MBCF_SZ_32MB PPC_REG_VAL(19, 0x3) +#define SDRAM_MBCF_SZ_64MB PPC_REG_VAL(19, 0x4) +#define SDRAM_MBCF_SZ_128MB PPC_REG_VAL(19, 0x5) +#define SDRAM_MBCF_SZ_256MB PPC_REG_VAL(19, 0x6) +#define SDRAM_MBCF_SZ_512MB PPC_REG_VAL(19, 0x7) +#define SDRAM_MBCF_SZ_1GB PPC_REG_VAL(19, 0x8) +#define SDRAM_MBCF_SZ_2GB PPC_REG_VAL(19, 0x9) +#define SDRAM_MBCF_SZ_4GB PPC_REG_VAL(19, 0xA) +#define SDRAM_MBCF_SZ_8GB PPC_REG_VAL(19, 0xB) +#define SDRAM_MBCF_AM_MASK PPC_REG_VAL(23, 0xF) +#define SDRAM_MBCF_AM_MODE0 PPC_REG_VAL(23, 0x0) +#define SDRAM_MBCF_AM_MODE1 PPC_REG_VAL(23, 0x1) +#define SDRAM_MBCF_AM_MODE2 PPC_REG_VAL(23, 0x2) +#define SDRAM_MBCF_AM_MODE3 PPC_REG_VAL(23, 0x3) +#define SDRAM_MBCF_AM_MODE4 PPC_REG_VAL(23, 0x4) +#define SDRAM_MBCF_AM_MODE5 PPC_REG_VAL(23, 0x5) +#define SDRAM_MBCF_AM_MODE6 PPC_REG_VAL(23, 0x6) +#define SDRAM_MBCF_AM_MODE7 PPC_REG_VAL(23, 0x7) +#define SDRAM_MBCF_AM_MODE8 PPC_REG_VAL(23, 0x8) +#define SDRAM_MBCF_AM_MODE9 PPC_REG_VAL(23, 0x9) +#define SDRAM_MBCF_BE_MASK PPC_REG_VAL(31, 0x1) +#define SDRAM_MBCF_BE_DISABLE PPC_REG_VAL(31, 0x0) +#define SDRAM_MBCF_BE_ENABLE PPC_REG_VAL(31, 0x1) + +/* + * ECC Error Status + */ +#define SDRAM_ECCES_MASK PPC_REG_VAL(21, 0x3FFFFF) +#define SDRAM_ECCES_BNCE_MASK PPC_REG_VAL(15, 0xFFFF) +#define SDRAM_ECCES_BNCE_ENCODE(lane) PPC_REG_VAL(((lane) & 0xF), 1) +#define SDRAM_ECCES_CKBER_MASK PPC_REG_VAL(17, 0x3) +#define SDRAM_ECCES_CKBER_NONE PPC_REG_VAL(17, 0) +#define SDRAM_ECCES_CKBER_16_ECC_0_3 PPC_REG_VAL(17, 2) +#define SDRAM_ECCES_CKBER_32_ECC_0_3 PPC_REG_VAL(17, 1) +#define SDRAM_ECCES_CKBER_32_ECC_4_8 PPC_REG_VAL(17, 2) +#define SDRAM_ECCES_CKBER_32_ECC_0_8 PPC_REG_VAL(17, 3) +#define SDRAM_ECCES_CE PPC_REG_VAL(18, 1) +#define SDRAM_ECCES_UE PPC_REG_VAL(19, 1) +#define SDRAM_ECCES_BKNER_MASK PPC_REG_VAL(21, 0x3) +#define SDRAM_ECCES_BK0ER PPC_REG_VAL(20, 1) +#define SDRAM_ECCES_BK1ER PPC_REG_VAL(21, 1) + +#endif /* __PPC4XX_EDAC_H */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 42fb2fd24c0..51a8d4103be 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -69,20 +69,24 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label) * those calls have no teeth) we can't avoid autorequesting. This nag * message should motivate switching to explicit requests... so should * the weaker cleanup after faults, compared to gpio_request(). + * + * NOTE: the autorequest mechanism is going away; at this point it's + * only "legal" in the sense that (old) code using it won't break yet, + * but instead only triggers a WARN() stack dump. */ static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset) { - if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { - struct gpio_chip *chip = desc->chip; - int gpio = chip->base + offset; + const struct gpio_chip *chip = desc->chip; + const int gpio = chip->base + offset; + if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0, + "autorequest GPIO-%d\n", gpio)) { if (!try_module_get(chip->owner)) { pr_err("GPIO-%d: module can't be gotten \n", gpio); clear_bit(FLAG_REQUESTED, &desc->flags); /* lose */ return -EIO; } - pr_warning("GPIO-%d autorequested\n", gpio); desc_set_label(desc, "[auto]"); /* caller must chip->request() w/o spinlock */ if (chip->request) @@ -438,6 +442,7 @@ int gpio_export(unsigned gpio, bool direction_may_change) unsigned long flags; struct gpio_desc *desc; int status = -EINVAL; + char *ioname = NULL; /* can't export until sysfs is available ... */ if (!gpio_class.p) { @@ -461,11 +466,14 @@ int gpio_export(unsigned gpio, bool direction_may_change) } spin_unlock_irqrestore(&gpio_lock, flags); + if (desc->chip->names && desc->chip->names[gpio - desc->chip->base]) + ioname = desc->chip->names[gpio - desc->chip->base]; + if (status == 0) { struct device *dev; dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), - desc, "gpio%d", gpio); + desc, ioname ? ioname : "gpio%d", gpio); if (dev) { if (direction_may_change) status = sysfs_create_group(&dev->kobj, @@ -513,6 +521,7 @@ void gpio_unexport(unsigned gpio) mutex_lock(&sysfs_lock); desc = &gpio_desc[gpio]; + if (test_bit(FLAG_EXPORT, &desc->flags)) { struct device *dev = NULL; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 1c3a8c55714..a04639dc633 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -42,6 +42,26 @@ static struct drm_display_mode std_modes[] = { DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, }; +static void drm_mode_validate_flag(struct drm_connector *connector, + int flags) +{ + struct drm_display_mode *mode, *t; + + if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE)) + return; + + list_for_each_entry_safe(mode, t, &connector->modes, head) { + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && + !(flags & DRM_MODE_FLAG_INTERLACE)) + mode->status = MODE_NO_INTERLACE; + if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) && + !(flags & DRM_MODE_FLAG_DBLSCAN)) + mode->status = MODE_NO_DBLESCAN; + } + + return; +} + /** * drm_helper_probe_connector_modes - get complete set of display modes * @dev: DRM device @@ -72,6 +92,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; int count = 0; + int mode_flags = 0; DRM_DEBUG("%s\n", drm_get_connector_name(connector)); /* set all modes to the unverified state */ @@ -96,6 +117,13 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, if (maxX && maxY) drm_mode_validate_size(dev, &connector->modes, maxX, maxY, 0); + + if (connector->interlace_allowed) + mode_flags |= DRM_MODE_FLAG_INTERLACE; + if (connector->doublescan_allowed) + mode_flags |= DRM_MODE_FLAG_DBLSCAN; + drm_mode_validate_flag(connector, mode_flags); + list_for_each_entry_safe(mode, t, &connector->modes, head) { if (mode->status == MODE_OK) mode->status = connector_funcs->mode_valid(connector, @@ -885,7 +913,6 @@ bool drm_helper_plugged_event(struct drm_device *dev) /** * drm_initial_config - setup a sane initial connector configuration * @dev: DRM device - * @can_grow: this configuration is growable * * LOCKING: * Called at init time, must take mode config lock. @@ -897,7 +924,7 @@ bool drm_helper_plugged_event(struct drm_device *dev) * RETURNS: * Zero if everything went ok, nonzero otherwise. */ -bool drm_helper_initial_config(struct drm_device *dev, bool can_grow) +bool drm_helper_initial_config(struct drm_device *dev) { struct drm_connector *connector; int count = 0; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c67400067b8..ca9c6165671 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -125,10 +125,8 @@ static bool edid_is_valid(struct edid *edid) DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); goto bad; } - if (edid->revision > 3) { - DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision); - goto bad; - } + if (edid->revision > 4) + DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); for (i = 0; i < EDID_LENGTH; i++) csum += raw_edid[i]; @@ -162,7 +160,7 @@ static bool edid_vendor(struct edid *edid, char *vendor) edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@'; edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) | ((edid->mfg_id[1] & 0xe0) >> 5)) + '@'; - edid_vendor[2] = (edid->mfg_id[2] & 0x1f) + '@'; + edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@'; return !strncmp(edid_vendor, vendor, 3); } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index c1173d8c458..4984aa89cf3 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -505,7 +505,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_local_map *map = NULL; struct drm_gem_object *obj; struct drm_hash_item *hash; - unsigned long prot; int ret = 0; mutex_lock(&dev->struct_mutex); @@ -538,11 +537,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_ops = obj->dev->driver->gem_vm_ops; vma->vm_private_data = map->handle; /* FIXME: use pgprot_writecombine when available */ - prot = pgprot_val(vma->vm_page_prot); -#ifdef CONFIG_X86 - prot |= _PAGE_CACHE_WC; -#endif - vma->vm_page_prot = __pgprot(prot); + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 5de573a981c..bc0c6849360 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -451,6 +451,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev) kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); } +EXPORT_SYMBOL(drm_sysfs_hotplug_event); /** * drm_sysfs_device_add - adds a class device to sysfs for a character driver diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 85549f615b1..c23b3a95b7c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -922,7 +922,7 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, * Some of the preallocated space is taken by the GTT * and popup. GTT is 1K per MB of aperture size, and popup is 4K. */ - if (IS_G4X(dev)) + if (IS_G4X(dev) || IS_IGD(dev)) overhead = 4096; else overhead = (*aperture_size / 1024) + 4096; @@ -1030,13 +1030,6 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto destroy_ringbuffer; - /* FIXME: re-add hotplug support */ -#if 0 - ret = drm_hotplug_init(dev); - if (ret) - goto destroy_ringbuffer; -#endif - /* Always safe in the mode setting case. */ /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = 1; @@ -1049,7 +1042,7 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_modeset_init(dev); - drm_helper_initial_config(dev, false); + drm_helper_initial_config(dev); return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c1685d0c704..317b1223e09 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -159,6 +159,9 @@ typedef struct drm_i915_private { u32 irq_mask_reg; u32 pipestat[2]; + u32 hotplug_supported_mask; + struct work_struct hotplug_work; + int tex_lru_log_granularity; int allow_batchbuffer; struct mem_block *agp_heap; @@ -297,6 +300,7 @@ typedef struct drm_i915_private { * * A reference is held on the buffer while on this list. */ + spinlock_t active_list_lock; struct list_head active_list; /** @@ -810,6 +814,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) +#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) #define PRIMARY_RINGBUFFER_SIZE (128*1024) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e0389ad1477..1449b452cc6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1072,6 +1072,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) case -EAGAIN: return VM_FAULT_OOM; case -EFAULT: + case -EINVAL: return VM_FAULT_SIGBUS; default: return VM_FAULT_NOPAGE; @@ -1324,8 +1325,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) obj_priv->active = 1; } /* Move from whatever list we were on to the tail of execution. */ + spin_lock(&dev_priv->mm.active_list_lock); list_move_tail(&obj_priv->list, &dev_priv->mm.active_list); + spin_unlock(&dev_priv->mm.active_list_lock); obj_priv->last_rendering_seqno = seqno; } @@ -1467,6 +1470,7 @@ i915_gem_retire_request(struct drm_device *dev, /* Move any buffers on the active list that are no longer referenced * by the ringbuffer to the flushing/inactive lists as appropriate. */ + spin_lock(&dev_priv->mm.active_list_lock); while (!list_empty(&dev_priv->mm.active_list)) { struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; @@ -1481,7 +1485,7 @@ i915_gem_retire_request(struct drm_device *dev, * this seqno. */ if (obj_priv->last_rendering_seqno != request->seqno) - return; + goto out; #if WATCH_LRU DRM_INFO("%s: retire %d moves to inactive list %p\n", @@ -1493,6 +1497,8 @@ i915_gem_retire_request(struct drm_device *dev, else i915_gem_object_move_to_inactive(obj); } +out: + spin_unlock(&dev_priv->mm.active_list_lock); } /** @@ -1990,20 +1996,23 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) int regnum = obj_priv->fence_reg; uint32_t val; uint32_t pitch_val; + uint32_t fence_size_bits; - if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || + if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || (obj_priv->gtt_offset & (obj->size - 1))) { - WARN(1, "%s: object 0x%08x not 1M or size aligned\n", + WARN(1, "%s: object 0x%08x not 512K or size aligned\n", __func__, obj_priv->gtt_offset); return; } pitch_val = (obj_priv->stride / 128) - 1; - + WARN_ON(pitch_val & ~0x0000000f); val = obj_priv->gtt_offset; if (obj_priv->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I830_FENCE_SIZE_BITS(obj->size); + fence_size_bits = I830_FENCE_SIZE_BITS(obj->size); + WARN_ON(fence_size_bits & ~0x00000f00); + val |= fence_size_bits; val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; @@ -2194,7 +2203,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) return -EBUSY; if (alignment == 0) alignment = i915_gem_get_gtt_alignment(obj); - if (alignment & (PAGE_SIZE - 1)) { + if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { DRM_ERROR("Invalid object alignment requested %u\n", alignment); return -EINVAL; } @@ -2211,15 +2220,20 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) } } if (obj_priv->gtt_space == NULL) { + bool lists_empty; + /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ #if WATCH_LRU DRM_INFO("%s: GTT full, evicting something\n", __func__); #endif - if (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->mm.active_list)) { + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->mm.active_list)); + spin_unlock(&dev_priv->mm.active_list_lock); + if (lists_empty) { DRM_ERROR("GTT full, but LRU list empty\n"); return -ENOMEM; } @@ -3675,6 +3689,7 @@ i915_gem_idle(struct drm_device *dev) i915_gem_retire_requests(dev); + spin_lock(&dev_priv->mm.active_list_lock); if (!dev_priv->mm.wedged) { /* Active and flushing should now be empty as we've * waited for a sequence higher than any pending execbuffer @@ -3701,6 +3716,7 @@ i915_gem_idle(struct drm_device *dev) obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; i915_gem_object_move_to_inactive(obj_priv->obj); } + spin_unlock(&dev_priv->mm.active_list_lock); while (!list_empty(&dev_priv->mm.flushing_list)) { struct drm_i915_gem_object *obj_priv; @@ -3949,7 +3965,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, if (ret != 0) return ret; + spin_lock(&dev_priv->mm.active_list_lock); BUG_ON(!list_empty(&dev_priv->mm.active_list)); + spin_unlock(&dev_priv->mm.active_list_lock); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); BUG_ON(!list_empty(&dev_priv->mm.request_list)); @@ -3993,6 +4012,7 @@ i915_gem_load(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + spin_lock_init(&dev_priv->mm.active_list_lock); INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 131c088f8c8..8d0b943e2c5 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -105,12 +105,14 @@ i915_dump_lru(struct drm_device *dev, const char *where) struct drm_i915_gem_object *obj_priv; DRM_INFO("active list %s {\n", where); + spin_lock(&dev_priv->mm.active_list_lock); list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { DRM_INFO(" %p: %08x\n", obj_priv, obj_priv->last_rendering_seqno); } + spin_unlock(&dev_priv->mm.active_list_lock); DRM_INFO("}\n"); DRM_INFO("flushing list %s {\n", where); list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 455ec970b38..a1ac0c5e730 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -69,10 +69,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; + spinlock_t *lock = NULL; switch (list) { case ACTIVE_LIST: seq_printf(m, "Active:\n"); + lock = &dev_priv->mm.active_list_lock; + spin_lock(lock); head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: @@ -104,6 +107,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) seq_printf(m, " (fence: %d\n", obj_priv->fence_reg); seq_printf(m, "\n"); } + + if (lock) + spin_unlock(lock); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 4cce1aef438..6be3f927c86 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -216,6 +216,22 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) else tile_width = 512; + /* check maximum stride & object size */ + if (IS_I965G(dev)) { + /* i965 stores the end address of the gtt mapping in the fence + * reg, so dont bother to check the size */ + if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) + return false; + } else if (IS_I9XX(dev)) { + if (stride / tile_width > I830_FENCE_MAX_PITCH_VAL || + size > (I830_FENCE_MAX_SIZE_VAL << 20)) + return false; + } else { + if (stride / 128 > I830_FENCE_MAX_PITCH_VAL || + size > (I830_FENCE_MAX_SIZE_VAL << 19)) + return false; + } + /* 965+ just needs multiples of tile width */ if (IS_I965G(dev)) { if (stride & (tile_width - 1)) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 87b6b603469..ee7ce7b78cf 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -48,10 +48,6 @@ /** Interrupts that we mask and unmask at runtime. */ #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) -/** These are all of the interrupts used by the driver */ -#define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \ - I915_INTERRUPT_ENABLE_VAR) - #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ PIPE_VBLANK_INTERRUPT_STATUS) @@ -187,6 +183,19 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) return I915_READ(reg); } +/* + * Handle hotplug events outside the interrupt handler proper. + */ +static void i915_hotplug_work_func(struct work_struct *work) +{ + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, + hotplug_work); + struct drm_device *dev = dev_priv->dev; + + /* Just fire off a uevent and let userspace tell us what to do */ + drm_sysfs_hotplug_event(dev); +} + irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -244,6 +253,20 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) ret = IRQ_HANDLED; + /* Consume port. Then clear IIR or we'll miss events */ + if ((I915_HAS_HOTPLUG(dev)) && + (iir & I915_DISPLAY_PORT_INTERRUPT)) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + + DRM_DEBUG("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_status & dev_priv->hotplug_supported_mask) + schedule_work(&dev_priv->hotplug_work); + + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + I915_READ(PORT_HOTPLUG_STAT); + } + I915_WRITE(IIR, iir); new_iir = I915_READ(IIR); /* Flush posted writes */ @@ -528,17 +551,24 @@ void i915_driver_irq_preinstall(struct drm_device * dev) atomic_set(&dev_priv->irq_received, 0); + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + } + I915_WRITE(HWSTAM, 0xeffe); I915_WRITE(PIPEASTAT, 0); I915_WRITE(PIPEBSTAT, 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); (void) I915_READ(IER); + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); } int i915_driver_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; @@ -550,13 +580,35 @@ int i915_driver_irq_postinstall(struct drm_device *dev) dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; + if (I915_HAS_HOTPLUG(dev)) { + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); + + /* Leave other bits alone */ + hotplug_en |= HOTPLUG_EN_MASK; + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + + dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS | + TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS | + SDVOB_HOTPLUG_INT_STATUS; + if (IS_G4X(dev)) { + dev_priv->hotplug_supported_mask |= + HDMIB_HOTPLUG_INT_STATUS | + HDMIC_HOTPLUG_INT_STATUS | + HDMID_HOTPLUG_INT_STATUS; + } + /* Enable in IER... */ + enable_mask |= I915_DISPLAY_PORT_INTERRUPT; + /* and unmask in IMR */ + i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); + } + /* Disable pipe interrupt enables, clear pending pipe status */ I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); /* Clear pending interrupt status */ I915_WRITE(IIR, I915_READ(IIR)); - I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); + I915_WRITE(IER, enable_mask); I915_WRITE(IMR, dev_priv->irq_mask_reg); (void) I915_READ(IER); @@ -575,6 +627,11 @@ void i915_driver_irq_uninstall(struct drm_device * dev) dev_priv->vblank_pipe = 0; + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + } + I915_WRITE(HWSTAM, 0xffffffff); I915_WRITE(PIPEASTAT, 0); I915_WRITE(PIPEBSTAT, 0); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 377cc588f5e..e805b590ae7 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -190,6 +190,8 @@ #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) #define I830_FENCE_PITCH_SHIFT 4 #define I830_FENCE_REG_VALID (1<<0) +#define I830_FENCE_MAX_PITCH_VAL 0x10 +#define I830_FENCE_MAX_SIZE_VAL (1<<8) #define I915_FENCE_START_MASK 0x0ff00000 #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) @@ -198,6 +200,7 @@ #define I965_FENCE_PITCH_SHIFT 2 #define I965_FENCE_TILING_Y_SHIFT 1 #define I965_FENCE_REG_VALID (1<<0) +#define I965_FENCE_MAX_PITCH_VAL 0x0400 /* * Instruction and interrupt control regs @@ -648,6 +651,14 @@ #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ +#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f +#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \ + HDMIC_HOTPLUG_INT_EN | \ + HDMID_HOTPLUG_INT_EN | \ + SDVOB_HOTPLUG_INT_EN | \ + SDVOC_HOTPLUG_INT_EN | \ + TV_HOTPLUG_INT_EN | \ + CRT_HOTPLUG_INT_EN) #define PORT_HOTPLUG_STAT 0x61114 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 2b6d44381c3..9bdd959260a 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -41,7 +41,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) temp = I915_READ(ADPA); temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); - temp &= ~ADPA_DAC_ENABLE; + temp |= ADPA_DAC_ENABLE; switch(mode) { case DRM_MODE_DPMS_ON: @@ -158,7 +158,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) else tries = 1; hotplug_en = I915_READ(PORT_HOTPLUG_EN); - hotplug_en &= ~(CRT_HOTPLUG_MASK); + hotplug_en &= CRT_FORCE_HOTPLUG_MASK; hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; if (IS_GM45(dev)) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d9c50ff94d7..64773ce5296 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -636,7 +636,7 @@ void intel_wait_for_vblank(struct drm_device *dev) { /* Wait for 20ms, i.e. one cycle at 50hz. */ - udelay(20000); + mdelay(20); } static int @@ -1106,6 +1106,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; } + /* SDVO TV has fixed PLL values depend on its clock range, + this mirrors vbios setting. */ + if (is_sdvo && is_tv) { + if (adjusted_mode->clock >= 100000 + && adjusted_mode->clock < 140500) { + clock.p1 = 2; + clock.p2 = 10; + clock.n = 3; + clock.m1 = 16; + clock.m2 = 8; + } else if (adjusted_mode->clock >= 140500 + && adjusted_mode->clock <= 200000) { + clock.p1 = 1; + clock.p2 = 10; + clock.n = 6; + clock.m1 = 12; + clock.m2 = 8; + } + } + if (IS_IGD(dev)) fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; else diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index e42019e5d66..07d7ec97616 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -76,6 +76,7 @@ int intel_ddc_get_modes(struct intel_output *intel_output) drm_mode_connector_update_edid_property(&intel_output->base, edid); ret = drm_add_edid_modes(&intel_output->base, edid); + intel_output->base.display_info.raw_edid = NULL; kfree(edid); } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index fbe6f3931b1..7b31f55f55c 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -273,20 +273,20 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; int i; - DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); + printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); for (i = 0; i < args_len; i++) - printk("%02X ", ((u8 *)args)[i]); + printk(KERN_DEBUG "%02X ", ((u8 *)args)[i]); for (; i < 8; i++) - printk(" "); + printk(KERN_DEBUG " "); for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { if (cmd == sdvo_cmd_names[i].cmd) { - printk("(%s)", sdvo_cmd_names[i].name); + printk(KERN_DEBUG "(%s)", sdvo_cmd_names[i].name); break; } } if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0])) - printk("(%02X)",cmd); - printk("\n"); + printk(KERN_DEBUG "(%02X)", cmd); + printk(KERN_DEBUG "\n"); } #else #define intel_sdvo_debug_write(o, c, a, l) @@ -323,17 +323,18 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output, u8 status) { struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + int i; - DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv)); + printk(KERN_DEBUG "%s: R: ", SDVO_NAME(sdvo_priv)); for (i = 0; i < response_len; i++) - printk("%02X ", ((u8 *)response)[i]); + printk(KERN_DEBUG "%02X ", ((u8 *)response)[i]); for (; i < 8; i++) - printk(" "); + printk(KERN_DEBUG " "); if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) - printk("(%s)", cmd_status_names[status]); + printk(KERN_DEBUG "(%s)", cmd_status_names[status]); else - printk("(??? %d)", status); - printk("\n"); + printk(KERN_DEBUG "(??? %d)", status); + printk(KERN_DEBUG "\n"); } #else #define intel_sdvo_debug_response(o, r, l, s) @@ -588,9 +589,12 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, struct intel_sdvo_preferred_input_timing_args args; uint8_t status; + memset(&args, 0, sizeof(args)); args.clock = clock; args.width = width; args.height = height; + args.interlace = 0; + args.scaled = 0; intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); status = intel_sdvo_read_response(output, NULL, 0); @@ -683,7 +687,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, dtd->part1.v_high = (((height >> 8) & 0xf) << 4) | ((v_blank_len >> 8) & 0xf); - dtd->part2.h_sync_off = h_sync_offset; + dtd->part2.h_sync_off = h_sync_offset & 0xff; dtd->part2.h_sync_width = h_sync_len & 0xff; dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | (v_sync_len & 0xf); @@ -705,27 +709,10 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, struct intel_sdvo_dtd *dtd) { - uint16_t width, height; - uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; - uint16_t h_sync_offset, v_sync_offset; - - width = mode->crtc_hdisplay; - height = mode->crtc_vdisplay; - - /* do some mode translations */ - h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; - h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; - - v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; - v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; - - h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; - v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; - mode->hdisplay = dtd->part1.h_active; mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; - mode->hsync_start += (dtd->part2.sync_off_width_high & 0xa0) << 2; + mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; mode->htotal = mode->hdisplay + dtd->part1.h_blank; @@ -735,7 +722,7 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; mode->vsync_start = mode->vdisplay; mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; - mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0a) << 2; + mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; mode->vsync_end = mode->vsync_start + (dtd->part2.v_sync_off_width & 0xf); @@ -745,7 +732,7 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->clock = dtd->part1.clock * 10; - mode->flags &= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); + mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); if (dtd->part2.dtd_flags & 0x2) mode->flags |= DRM_MODE_FLAG_PHSYNC; if (dtd->part2.dtd_flags & 0x4) @@ -924,6 +911,27 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output, SDVO_HBUF_TX_VSYNC); } +static void intel_sdvo_set_tv_format(struct intel_output *output) +{ + struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct intel_sdvo_tv_format *format, unset; + u8 status; + + format = &sdvo_priv->tv_format; + memset(&unset, 0, sizeof(unset)); + if (memcmp(format, &unset, sizeof(*format))) { + DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n", + SDVO_NAME(sdvo_priv)); + format->ntsc_m = 1; + intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, format, + sizeof(*format)); + status = intel_sdvo_read_response(output, NULL, 0); + if (status != SDVO_CMD_STATUS_SUCCESS) + DRM_DEBUG("%s: Failed to set TV format\n", + SDVO_NAME(sdvo_priv)); + } +} + static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -968,6 +976,12 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, &input_dtd); intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + drm_mode_set_crtcinfo(adjusted_mode, 0); + + mode->clock = adjusted_mode->clock; + + adjusted_mode->clock *= + intel_sdvo_get_pixel_multiplier(mode); } else { return false; } @@ -1012,7 +1026,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, sdvox |= SDVO_AUDIO_ENABLE; } - intel_sdvo_get_dtd_from_mode(&input_dtd, mode); + /* We have tried to get input timing in mode_fixup, and filled into + adjusted_mode */ + if (sdvo_priv->is_tv) + intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); + else + intel_sdvo_get_dtd_from_mode(&input_dtd, mode); /* If it's a TV, we already set the output timing in mode_fixup. * Otherwise, the output timing is equal to the input timing. @@ -1027,6 +1046,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* Set the input timing to the screen. Assume always input 0. */ intel_sdvo_set_target_input(output, true, false); + if (sdvo_priv->is_tv) + intel_sdvo_set_tv_format(output); + /* We would like to use intel_sdvo_create_preferred_input_timing() to * provide the device with a timing it can support, if it supports that * feature. However, presumably we would need to adjust the CRTC to @@ -1395,7 +1417,7 @@ static void intel_sdvo_check_tv_format(struct intel_output *output) { struct intel_sdvo_priv *dev_priv = output->dev_priv; - struct intel_sdvo_tv_format format, unset; + struct intel_sdvo_tv_format format; uint8_t status; intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMAT, NULL, 0); @@ -1403,15 +1425,7 @@ intel_sdvo_check_tv_format(struct intel_output *output) if (status != SDVO_CMD_STATUS_SUCCESS) return; - memset(&unset, 0, sizeof(unset)); - if (memcmp(&format, &unset, sizeof(format))) { - DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n", - SDVO_NAME(dev_priv)); - - format.ntsc_m = true; - intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, NULL, 0); - status = intel_sdvo_read_response(output, NULL, 0); - } + memcpy(&dev_priv->tv_format, &format, sizeof(format)); } /* @@ -1420,68 +1434,70 @@ intel_sdvo_check_tv_format(struct intel_output *output) * XXX: all 60Hz refresh? */ struct drm_display_mode sdvo_tv_modes[] = { - { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815680, 321, 384, 416, - 200, 0, 232, 201, 233, 4196112, 0, + { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, + 416, 0, 200, 201, 232, 233, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814080, 321, 384, 416, - 240, 0, 272, 241, 273, 4196112, 0, + { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384, + 416, 0, 240, 241, 272, 273, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910080, 401, 464, 496, - 300, 0, 332, 301, 333, 4196112, 0, + { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464, + 496, 0, 300, 301, 332, 333, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913280, 641, 704, 736, - 350, 0, 382, 351, 383, 4196112, 0, + { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704, + 736, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736, - 400, 0, 432, 401, 433, 4196112, 0, + { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704, + 736, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736, - 400, 0, 432, 401, 433, 4196112, 0, + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704, + 736, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624000, 705, 768, 800, - 480, 0, 512, 481, 513, 4196112, 0, + { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768, + 800, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232000, 705, 768, 800, - 576, 0, 608, 577, 609, 4196112, 0, + { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768, + 800, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751680, 721, 784, 816, - 350, 0, 382, 351, 383, 4196112, 0, + { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784, + 816, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199680, 721, 784, 816, - 400, 0, 432, 401, 433, 4196112, 0, + { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784, + 816, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116480, 721, 784, 816, - 480, 0, 512, 481, 513, 4196112, 0, + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784, + 816, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054080, 721, 784, 816, - 540, 0, 572, 541, 573, 4196112, 0, + { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784, + 816, 0, 540, 541, 572, 573, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816640, 721, 784, 816, - 576, 0, 608, 577, 609, 4196112, 0, + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784, + 816, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570560, 769, 832, 864, - 576, 0, 608, 577, 609, 4196112, 0, + { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832, + 864, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030080, 801, 864, 896, - 600, 0, 632, 601, 633, 4196112, 0, + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864, + 896, 0, 600, 601, 632, 633, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581760, 833, 896, 928, - 624, 0, 656, 625, 657, 4196112, 0, + { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896, + 928, 0, 624, 625, 656, 657, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707040, 921, 984, 1016, - 766, 0, 798, 767, 799, 4196112, 0, + { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984, + 1016, 0, 766, 767, 798, 799, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827200, 1025, 1088, 1120, - 768, 0, 800, 769, 801, 4196112, 0, + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088, + 1120, 0, 768, 769, 800, 801, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, - { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265920, 1281, 1344, 1376, - 1024, 0, 1056, 1025, 1057, 4196112, 0, + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344, + 1376, 0, 1024, 1025, 1056, 1057, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, }; static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { struct intel_output *output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = output->dev_priv; + struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0; uint8_t status; int i = 0; @@ -1491,15 +1507,22 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) /* Read the list of supported input resolutions for the selected TV * format. */ + memset(&tv_res, 0, sizeof(tv_res)); + memcpy(&tv_res, &sdvo_priv->tv_format, sizeof(tv_res)); intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, - NULL, 0); + &tv_res, sizeof(tv_res)); status = intel_sdvo_read_response(output, &reply, 3); if (status != SDVO_CMD_STATUS_SUCCESS) return; for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) - if (reply & (1 << i)) - drm_mode_probed_add(connector, &sdvo_tv_modes[i]); + if (reply & (1 << i)) { + struct drm_display_mode *nmode; + nmode = drm_mode_duplicate(connector->dev, + &sdvo_tv_modes[i]); + if (nmode) + drm_mode_probed_add(connector, nmode); + } } static int intel_sdvo_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 1117b9c151a..193938b7d7f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -100,6 +100,9 @@ struct intel_sdvo_preferred_input_timing_args { u16 clock; u16 width; u16 height; + u8 interlace:1; + u8 scaled:1; + u8 pad:6; } __attribute__((packed)); /* I2C registers for SDVO */ diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index ceca9471a75..d2c32983242 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1570,33 +1570,49 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop struct drm_device *dev = connector->dev; struct intel_output *intel_output = to_intel_output(connector); struct intel_tv_priv *tv_priv = intel_output->dev_priv; + struct drm_encoder *encoder = &intel_output->enc; + struct drm_crtc *crtc = encoder->crtc; int ret = 0; + bool changed = false; ret = drm_connector_property_set_value(connector, property, val); if (ret < 0) goto out; - if (property == dev->mode_config.tv_left_margin_property) + if (property == dev->mode_config.tv_left_margin_property && + tv_priv->margin[TV_MARGIN_LEFT] != val) { tv_priv->margin[TV_MARGIN_LEFT] = val; - else if (property == dev->mode_config.tv_right_margin_property) + changed = true; + } else if (property == dev->mode_config.tv_right_margin_property && + tv_priv->margin[TV_MARGIN_RIGHT] != val) { tv_priv->margin[TV_MARGIN_RIGHT] = val; - else if (property == dev->mode_config.tv_top_margin_property) + changed = true; + } else if (property == dev->mode_config.tv_top_margin_property && + tv_priv->margin[TV_MARGIN_TOP] != val) { tv_priv->margin[TV_MARGIN_TOP] = val; - else if (property == dev->mode_config.tv_bottom_margin_property) + changed = true; + } else if (property == dev->mode_config.tv_bottom_margin_property && + tv_priv->margin[TV_MARGIN_BOTTOM] != val) { tv_priv->margin[TV_MARGIN_BOTTOM] = val; - else if (property == dev->mode_config.tv_mode_property) { + changed = true; + } else if (property == dev->mode_config.tv_mode_property) { if (val >= NUM_TV_MODES) { ret = -EINVAL; goto out; } + if (!strcmp(tv_priv->tv_format, tv_modes[val].name)) + goto out; + tv_priv->tv_format = tv_modes[val].name; - intel_tv_mode_set(&intel_output->enc, NULL, NULL); + changed = true; } else { ret = -EINVAL; goto out; } - intel_tv_mode_set(&intel_output->enc, NULL, NULL); + if (changed && crtc) + drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, + crtc->y, crtc->fb); out: return ret; } diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 9d14eee3ed0..bc9d09dfa8e 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -388,17 +388,17 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) DRM_INFO("Loading RS780 CP Microcode\n"); for (i = 0; i < PM4_UCODE_SIZE; i++) { RADEON_WRITE(R600_CP_ME_RAM_DATA, - RV670_cp_microcode[i][0]); + RS780_cp_microcode[i][0]); RADEON_WRITE(R600_CP_ME_RAM_DATA, - RV670_cp_microcode[i][1]); + RS780_cp_microcode[i][1]); RADEON_WRITE(R600_CP_ME_RAM_DATA, - RV670_cp_microcode[i][2]); + RS780_cp_microcode[i][2]); } RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); DRM_INFO("Loading RS780 PFP Microcode\n"); for (i = 0; i < PFP_UCODE_SIZE; i++) - RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV670_pfp_microcode[i]); + RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RS780_pfp_microcode[i]); } RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c index 81e6ebf323e..55cd0fa6833 100644 --- a/drivers/input/mouse/hgpk.c +++ b/drivers/input/mouse/hgpk.c @@ -381,7 +381,7 @@ static void hgpk_disconnect(struct psmouse *psmouse) static void hgpk_recalib_work(struct work_struct *work) { - struct delayed_work *w = container_of(work, struct delayed_work, work); + struct delayed_work *w = to_delayed_work(work); struct hgpk_data *priv = container_of(w, struct hgpk_data, recalib_wq); struct psmouse *psmouse = priv->psmouse; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 5f3bff43462..0b92b2f6ea6 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -165,7 +165,7 @@ config SGI_XP depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 - select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP + select SGI_GRU if X86_64 && SMP ---help--- An SGI machine can be divided into multiple Single System Images which act independently of each other and have @@ -189,7 +189,7 @@ config HP_ILO config SGI_GRU tristate "SGI GRU driver" - depends on (X86_UV || IA64_SGI_UV || IA64_GENERIC) && SMP + depends on X86_UV && SMP default n select MMU_NOTIFIER ---help--- diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index d4775528abc..d184dfab963 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -53,6 +53,7 @@ struct at24_data { struct at24_platform_data chip; + struct memory_accessor macc; bool use_smbus; /* @@ -225,14 +226,11 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf, return status; } -static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr, +static ssize_t at24_read(struct at24_data *at24, char *buf, loff_t off, size_t count) { - struct at24_data *at24; ssize_t retval = 0; - at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); - if (unlikely(!count)) return count; @@ -262,12 +260,14 @@ static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr, return retval; } +static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct at24_data *at24; -/* - * REVISIT: export at24_bin{read,write}() to let other kernel code use - * eeprom data. For example, it might hold a board's Ethernet address, or - * board-specific calibration data generated on the manufacturing floor. - */ + at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); + return at24_read(at24, buf, off, count); +} /* @@ -347,14 +347,11 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf, return -ETIMEDOUT; } -static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr, +static ssize_t at24_write(struct at24_data *at24, char *buf, loff_t off, size_t count) { - struct at24_data *at24; ssize_t retval = 0; - at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); - if (unlikely(!count)) return count; @@ -384,6 +381,39 @@ static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr, return retval; } +static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct at24_data *at24; + + at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); + return at24_write(at24, buf, off, count); +} + +/*-------------------------------------------------------------------------*/ + +/* + * This lets other kernel code access the eeprom data. For example, it + * might hold a board's Ethernet address, or board-specific calibration + * data generated on the manufacturing floor. + */ + +static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf, + off_t offset, size_t count) +{ + struct at24_data *at24 = container_of(macc, struct at24_data, macc); + + return at24_read(at24, buf, offset, count); +} + +static ssize_t at24_macc_write(struct memory_accessor *macc, char *buf, + off_t offset, size_t count) +{ + struct at24_data *at24 = container_of(macc, struct at24_data, macc); + + return at24_write(at24, buf, offset, count); +} + /*-------------------------------------------------------------------------*/ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) @@ -413,6 +443,9 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) * is recommended anyhow. */ chip.page_size = 1; + + chip.setup = NULL; + chip.context = NULL; } if (!is_power_of_2(chip.byte_len)) @@ -463,6 +496,8 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) at24->bin.read = at24_bin_read; at24->bin.size = chip.byte_len; + at24->macc.read = at24_macc_read; + writable = !(chip.flags & AT24_FLAG_READONLY); if (writable) { if (!use_smbus || i2c_check_functionality(client->adapter, @@ -470,6 +505,8 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) unsigned write_max = chip.page_size; + at24->macc.write = at24_macc_write; + at24->bin.write = at24_bin_write; at24->bin.attr.mode |= S_IWUSR; @@ -520,6 +557,10 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) at24->write_max, use_smbus ? ", use_smbus" : ""); + /* export data to kernel code */ + if (chip.setup) + chip.setup(&at24->macc, chip.context); + return 0; err_clients: diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 290dbe99647..6bc0dac5c1e 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -30,6 +30,7 @@ struct at25_data { struct spi_device *spi; + struct memory_accessor mem; struct mutex lock; struct spi_eeprom chip; struct bin_attribute bin; @@ -75,6 +76,13 @@ at25_ee_read( struct spi_transfer t[2]; struct spi_message m; + if (unlikely(offset >= at25->bin.size)) + return 0; + if ((offset + count) > at25->bin.size) + count = at25->bin.size - offset; + if (unlikely(!count)) + return count; + cp = command; *cp++ = AT25_READ; @@ -127,13 +135,6 @@ at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr, dev = container_of(kobj, struct device, kobj); at25 = dev_get_drvdata(dev); - if (unlikely(off >= at25->bin.size)) - return 0; - if ((off + count) > at25->bin.size) - count = at25->bin.size - off; - if (unlikely(!count)) - return count; - return at25_ee_read(at25, buf, off, count); } @@ -146,6 +147,13 @@ at25_ee_write(struct at25_data *at25, char *buf, loff_t off, size_t count) unsigned buf_size; u8 *bounce; + if (unlikely(off >= at25->bin.size)) + return -EFBIG; + if ((off + count) > at25->bin.size) + count = at25->bin.size - off; + if (unlikely(!count)) + return count; + /* Temp buffer starts with command and address */ buf_size = at25->chip.page_size; if (buf_size > io_limit) @@ -253,18 +261,31 @@ at25_bin_write(struct kobject *kobj, struct bin_attribute *bin_attr, dev = container_of(kobj, struct device, kobj); at25 = dev_get_drvdata(dev); - if (unlikely(off >= at25->bin.size)) - return -EFBIG; - if ((off + count) > at25->bin.size) - count = at25->bin.size - off; - if (unlikely(!count)) - return count; - return at25_ee_write(at25, buf, off, count); } /*-------------------------------------------------------------------------*/ +/* Let in-kernel code access the eeprom data. */ + +static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf, + off_t offset, size_t count) +{ + struct at25_data *at25 = container_of(mem, struct at25_data, mem); + + return at25_ee_read(at25, buf, offset, count); +} + +static ssize_t at25_mem_write(struct memory_accessor *mem, char *buf, + off_t offset, size_t count) +{ + struct at25_data *at25 = container_of(mem, struct at25_data, mem); + + return at25_ee_write(at25, buf, offset, count); +} + +/*-------------------------------------------------------------------------*/ + static int at25_probe(struct spi_device *spi) { struct at25_data *at25 = NULL; @@ -317,6 +338,10 @@ static int at25_probe(struct spi_device *spi) at25->addrlen = addrlen; /* Export the EEPROM bytes through sysfs, since that's convenient. + * And maybe to other kernel code; it might hold a board's Ethernet + * address, or board-specific calibration data generated on the + * manufacturing floor. + * * Default to root-only access to the data; EEPROMs often hold data * that's sensitive for read and/or write, like ethernet addresses, * security codes, board-specific manufacturing calibrations, etc. @@ -324,17 +349,22 @@ static int at25_probe(struct spi_device *spi) at25->bin.attr.name = "eeprom"; at25->bin.attr.mode = S_IRUSR; at25->bin.read = at25_bin_read; + at25->mem.read = at25_mem_read; at25->bin.size = at25->chip.byte_len; if (!(chip->flags & EE_READONLY)) { at25->bin.write = at25_bin_write; at25->bin.attr.mode |= S_IWUSR; + at25->mem.write = at25_mem_write; } err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin); if (err) goto fail; + if (chip->setup) + chip->setup(&at25->mem, chip->context); + dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n", (at25->bin.size < 1024) ? at25->bin.size diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile index 9e9170b3599..bcd8136d2f9 100644 --- a/drivers/misc/sgi-gru/Makefile +++ b/drivers/misc/sgi-gru/Makefile @@ -3,5 +3,5 @@ ifdef CONFIG_SGI_GRU_DEBUG endif obj-$(CONFIG_SGI_GRU) := gru.o -gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o +gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h index 48762e7b98b..3fde33c1e8f 100644 --- a/drivers/misc/sgi-gru/gru_instructions.h +++ b/drivers/misc/sgi-gru/gru_instructions.h @@ -19,8 +19,11 @@ #ifndef __GRU_INSTRUCTIONS_H__ #define __GRU_INSTRUCTIONS_H__ -#define gru_flush_cache_hook(p) -#define gru_emulator_wait_hook(p, w) +extern int gru_check_status_proc(void *cb); +extern int gru_wait_proc(void *cb); +extern void gru_wait_abort_proc(void *cb); + + /* * Architecture dependent functions @@ -29,16 +32,16 @@ #if defined(CONFIG_IA64) #include <linux/compiler.h> #include <asm/intrinsics.h> -#define __flush_cache(p) ia64_fc(p) +#define __flush_cache(p) ia64_fc((unsigned long)p) /* Use volatile on IA64 to ensure ordering via st4.rel */ -#define gru_ordered_store_int(p,v) \ +#define gru_ordered_store_int(p, v) \ do { \ barrier(); \ *((volatile int *)(p)) = v; /* force st.rel */ \ } while (0) #elif defined(CONFIG_X86_64) #define __flush_cache(p) clflush(p) -#define gru_ordered_store_int(p,v) \ +#define gru_ordered_store_int(p, v) \ do { \ barrier(); \ *(int *)p = v; \ @@ -558,20 +561,19 @@ extern int gru_get_cb_exception_detail(void *cb, #define GRU_EXC_STR_SIZE 256 -extern int gru_check_status_proc(void *cb); -extern int gru_wait_proc(void *cb); -extern void gru_wait_abort_proc(void *cb); /* * Control block definition for checking status */ struct gru_control_block_status { unsigned int icmd :1; - unsigned int unused1 :31; + unsigned int ima :3; + unsigned int reserved0 :4; + unsigned int unused1 :24; unsigned int unused2 :24; unsigned int istatus :2; unsigned int isubstatus :4; - unsigned int inused3 :2; + unsigned int unused3 :2; }; /* Get CB status */ diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 3ee698ad859..ab118558552 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -32,6 +32,7 @@ #include <linux/device.h> #include <linux/io.h> #include <linux/uaccess.h> +#include <linux/security.h> #include <asm/pgtable.h> #include "gru.h" #include "grutables.h" @@ -266,6 +267,44 @@ err: return 1; } +static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, + int write, int atomic, unsigned long *gpa, int *pageshift) +{ + struct mm_struct *mm = gts->ts_mm; + struct vm_area_struct *vma; + unsigned long paddr; + int ret, ps; + + vma = find_vma(mm, vaddr); + if (!vma) + goto inval; + + /* + * Atomic lookup is faster & usually works even if called in non-atomic + * context. + */ + rmb(); /* Must/check ms_range_active before loading PTEs */ + ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); + if (ret) { + if (atomic) + goto upm; + if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) + goto inval; + } + if (is_gru_paddr(paddr)) + goto inval; + paddr = paddr & ~((1UL << ps) - 1); + *gpa = uv_soc_phys_ram_to_gpa(paddr); + *pageshift = ps; + return 0; + +inval: + return -1; +upm: + return -2; +} + + /* * Drop a TLB entry into the GRU. The fault is described by info in an TFH. * Input: @@ -280,10 +319,8 @@ static int gru_try_dropin(struct gru_thread_state *gts, struct gru_tlb_fault_handle *tfh, unsigned long __user *cb) { - struct mm_struct *mm = gts->ts_mm; - struct vm_area_struct *vma; - int pageshift, asid, write, ret; - unsigned long paddr, gpa, vaddr; + int pageshift = 0, asid, write, ret, atomic = !cb; + unsigned long gpa = 0, vaddr = 0; /* * NOTE: The GRU contains magic hardware that eliminates races between @@ -317,28 +354,19 @@ static int gru_try_dropin(struct gru_thread_state *gts, if (atomic_read(>s->ts_gms->ms_range_active)) goto failactive; - vma = find_vma(mm, vaddr); - if (!vma) + ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); + if (ret == -1) goto failinval; + if (ret == -2) + goto failupm; - /* - * Atomic lookup is faster & usually works even if called in non-atomic - * context. - */ - rmb(); /* Must/check ms_range_active before loading PTEs */ - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &pageshift); - if (ret) { - if (!cb) + if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) { + gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift); + if (atomic || !gru_update_cch(gts, 0)) { + gts->ts_force_cch_reload = 1; goto failupm; - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, - &pageshift)) - goto failinval; + } } - if (is_gru_paddr(paddr)) - goto failinval; - - paddr = paddr & ~((1UL << pageshift) - 1); - gpa = uv_soc_phys_ram_to_gpa(paddr); gru_cb_set_istatus_active(cb); tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, GRU_PAGESIZE(pageshift)); @@ -368,6 +396,7 @@ failupm: failfmm: /* FMM state on UPM call */ + gru_flush_cache(tfh); STAT(tlb_dropin_fail_fmm); gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); return 0; @@ -448,6 +477,7 @@ irqreturn_t gru_intr(int irq, void *dev_id) up_read(>s->ts_mm->mmap_sem); } else { tfh_user_polling_mode(tfh); + STAT(intr_mm_lock_failed); } } return IRQ_HANDLED; @@ -497,10 +527,8 @@ int gru_handle_user_call_os(unsigned long cb) if (!gts) return -EINVAL; - if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { - ret = -EINVAL; + if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) goto exit; - } /* * If force_unload is set, the UPM TLB fault is phony. The task @@ -508,6 +536,20 @@ int gru_handle_user_call_os(unsigned long cb) * unload the context. The task will page fault and assign a new * context. */ + if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 && + gts->ts_blade != uv_numa_blade_id()) { + STAT(call_os_offnode_reference); + gts->ts_force_unload = 1; + } + + /* + * CCH may contain stale data if ts_force_cch_reload is set. + */ + if (gts->ts_gru && gts->ts_force_cch_reload) { + gru_update_cch(gts, 0); + gts->ts_force_cch_reload = 0; + } + ret = -EAGAIN; cbrnum = thread_cbr_number(gts, ucbnum); if (gts->ts_force_unload) { @@ -541,11 +583,13 @@ int gru_get_exception_detail(unsigned long arg) if (!gts) return -EINVAL; - if (gts->ts_gru) { - ucbnum = get_cb_number((void *)excdet.cb); + ucbnum = get_cb_number((void *)excdet.cb); + if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { + ret = -EINVAL; + } else if (gts->ts_gru) { cbrnum = thread_cbr_number(gts, ucbnum); cbe = get_cbe_by_index(gts->ts_gru, cbrnum); - prefetchw(cbe); /* Harmless on hardware, required for emulator */ + prefetchw(cbe);/* Harmless on hardware, required for emulator */ excdet.opc = cbe->opccpy; excdet.exopc = cbe->exopccpy; excdet.ecause = cbe->ecause; @@ -567,6 +611,31 @@ int gru_get_exception_detail(unsigned long arg) /* * User request to unload a context. Content is saved for possible reload. */ +static int gru_unload_all_contexts(void) +{ + struct gru_thread_state *gts; + struct gru_state *gru; + int gid, ctxnum; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + foreach_gid(gid) { + gru = GID_TO_GRU(gid); + spin_lock(&gru->gs_lock); + for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { + gts = gru->gs_gts[ctxnum]; + if (gts && mutex_trylock(>s->ts_ctxlock)) { + spin_unlock(&gru->gs_lock); + gru_unload_context(gts, 1); + gru_unlock_gts(gts); + spin_lock(&gru->gs_lock); + } + } + spin_unlock(&gru->gs_lock); + } + return 0; +} + int gru_user_unload_context(unsigned long arg) { struct gru_thread_state *gts; @@ -578,6 +647,9 @@ int gru_user_unload_context(unsigned long arg) gru_dbg(grudev, "gseg 0x%lx\n", req.gseg); + if (!req.gseg) + return gru_unload_all_contexts(); + gts = gru_find_lock_gts(req.gseg); if (!gts) return -EINVAL; @@ -609,7 +681,7 @@ int gru_user_flush_tlb(unsigned long arg) if (!gts) return -EINVAL; - gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len); + gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len); gru_unlock_gts(gts); return 0; diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index c67e4e8bd62..3e6e42d2f01 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -45,7 +45,9 @@ #include <asm/uv/uv_mmrs.h> struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; -unsigned long gru_start_paddr, gru_end_paddr __read_mostly; +unsigned long gru_start_paddr __read_mostly; +unsigned long gru_end_paddr __read_mostly; +unsigned int gru_max_gids __read_mostly; struct gru_stats_s gru_stats; /* Guaranteed user available resources on each node */ @@ -101,7 +103,7 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) return -EPERM; if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || - vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) + vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) return -EINVAL; vma->vm_flags |= @@ -273,8 +275,11 @@ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, gru->gs_blade_id = bid; gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; + gru->gs_asid_limit = MAX_ASID; gru_tgh_flush_init(gru); - gru_dbg(grudev, "bid %d, nid %d, gru %x, vaddr %p (0x%lx)\n", + if (gru->gs_gid >= gru_max_gids) + gru_max_gids = gru->gs_gid + 1; + gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, gru->gs_gru_base_paddr); gru_kservices_init(gru); @@ -295,7 +300,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) for_each_online_node(nid) { bid = uv_node_to_blade_id(nid); pnode = uv_node_to_pnode(nid); - if (gru_base[bid]) + if (bid < 0 || gru_base[bid]) continue; page = alloc_pages_node(nid, GFP_KERNEL, order); if (!page) @@ -308,11 +313,11 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) dsrbytes = 0; cbrs = 0; for (gru = gru_base[bid]->bs_grus, chip = 0; - chip < GRU_CHIPLETS_PER_BLADE; + chip < GRU_CHIPLETS_PER_BLADE; chip++, gru++) { paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); - gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip); + gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip); n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; cbrs = max(cbrs, n); n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; @@ -370,26 +375,26 @@ static int __init gru_init(void) void *gru_start_vaddr; if (!is_uv_system()) - return 0; + return -ENODEV; #if defined CONFIG_IA64 gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ #else gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & 0x7fffffffffffUL; - #endif gru_start_vaddr = __va(gru_start_paddr); - gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE; + gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", gru_start_paddr, gru_end_paddr); irq = get_base_irq(); for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { ret = request_irq(irq + chip, gru_intr, 0, id, NULL); - /* TODO: fix irq handling on x86. For now ignore failures because + /* TODO: fix irq handling on x86. For now ignore failure because * interrupts are not required & not yet fully supported */ if (ret) { - printk("!!!WARNING: GRU ignoring request failure!!!\n"); + printk(KERN_WARNING + "!!!WARNING: GRU ignoring request failure!!!\n"); ret = 0; } if (ret) { @@ -435,7 +440,7 @@ exit1: static void __exit gru_exit(void) { - int i, bid; + int i, bid, gid; int order = get_order(sizeof(struct gru_state) * GRU_CHIPLETS_PER_BLADE); @@ -445,6 +450,9 @@ static void __exit gru_exit(void) for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) free_irq(IRQ_GRU + i, NULL); + foreach_gid(gid) + gru_kservices_exit(GID_TO_GRU(gid)); + for (bid = 0; bid < GRU_MAX_BLADES; bid++) free_pages((unsigned long)gru_base[bid], order); @@ -469,7 +477,11 @@ struct vm_operations_struct gru_vm_ops = { .fault = gru_fault, }; +#ifndef MODULE fs_initcall(gru_init); +#else +module_init(gru_init); +#endif module_exit(gru_exit); module_param(gru_options, ulong, 0644); diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c new file mode 100644 index 00000000000..9b7ccb32869 --- /dev/null +++ b/drivers/misc/sgi-gru/gruhandles.c @@ -0,0 +1,183 @@ +/* + * GRU KERNEL MCS INSTRUCTIONS + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include "gru.h" +#include "grulib.h" +#include "grutables.h" + +/* 10 sec */ +#ifdef CONFIG_IA64 +#include <asm/processor.h> +#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) +#else +#include <asm/tsc.h> +#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) +#endif + +/* Extract the status field from a kernel handle */ +#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) + +struct mcs_op_statistic mcs_op_statistics[mcsop_last]; + +static void update_mcs_stats(enum mcs_op op, unsigned long clks) +{ + atomic_long_inc(&mcs_op_statistics[op].count); + atomic_long_add(clks, &mcs_op_statistics[op].total); + if (mcs_op_statistics[op].max < clks) + mcs_op_statistics[op].max = clks; +} + +static void start_instruction(void *h) +{ + unsigned long *w0 = h; + + wmb(); /* setting CMD bit must be last */ + *w0 = *w0 | 1; + gru_flush_cache(h); +} + +static int wait_instruction_complete(void *h, enum mcs_op opc) +{ + int status; + cycles_t start_time = get_cycles(); + + while (1) { + cpu_relax(); + status = GET_MSEG_HANDLE_STATUS(h); + if (status != CCHSTATUS_ACTIVE) + break; + if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) + panic("GRU %p is malfunctioning\n", h); + } + if (gru_options & OPT_STATS) + update_mcs_stats(opc, get_cycles() - start_time); + return status; +} + +int cch_allocate(struct gru_context_configuration_handle *cch, + int asidval, int sizeavail, unsigned long cbrmap, + unsigned long dsrmap) +{ + int i; + + for (i = 0; i < 8; i++) { + cch->asid[i] = (asidval++); + cch->sizeavail[i] = sizeavail; + } + cch->dsr_allocation_map = dsrmap; + cch->cbr_allocation_map = cbrmap; + cch->opc = CCHOP_ALLOCATE; + start_instruction(cch); + return wait_instruction_complete(cch, cchop_allocate); +} + +int cch_start(struct gru_context_configuration_handle *cch) +{ + cch->opc = CCHOP_START; + start_instruction(cch); + return wait_instruction_complete(cch, cchop_start); +} + +int cch_interrupt(struct gru_context_configuration_handle *cch) +{ + cch->opc = CCHOP_INTERRUPT; + start_instruction(cch); + return wait_instruction_complete(cch, cchop_interrupt); +} + +int cch_deallocate(struct gru_context_configuration_handle *cch) +{ + cch->opc = CCHOP_DEALLOCATE; + start_instruction(cch); + return wait_instruction_complete(cch, cchop_deallocate); +} + +int cch_interrupt_sync(struct gru_context_configuration_handle + *cch) +{ + cch->opc = CCHOP_INTERRUPT_SYNC; + start_instruction(cch); + return wait_instruction_complete(cch, cchop_interrupt_sync); +} + +int tgh_invalidate(struct gru_tlb_global_handle *tgh, + unsigned long vaddr, unsigned long vaddrmask, + int asid, int pagesize, int global, int n, + unsigned short ctxbitmap) +{ + tgh->vaddr = vaddr; + tgh->asid = asid; + tgh->pagesize = pagesize; + tgh->n = n; + tgh->global = global; + tgh->vaddrmask = vaddrmask; + tgh->ctxbitmap = ctxbitmap; + tgh->opc = TGHOP_TLBINV; + start_instruction(tgh); + return wait_instruction_complete(tgh, tghop_invalidate); +} + +void tfh_write_only(struct gru_tlb_fault_handle *tfh, + unsigned long pfn, unsigned long vaddr, + int asid, int dirty, int pagesize) +{ + tfh->fillasid = asid; + tfh->fillvaddr = vaddr; + tfh->pfn = pfn; + tfh->dirty = dirty; + tfh->pagesize = pagesize; + tfh->opc = TFHOP_WRITE_ONLY; + start_instruction(tfh); +} + +void tfh_write_restart(struct gru_tlb_fault_handle *tfh, + unsigned long paddr, int gaa, + unsigned long vaddr, int asid, int dirty, + int pagesize) +{ + tfh->fillasid = asid; + tfh->fillvaddr = vaddr; + tfh->pfn = paddr >> GRU_PADDR_SHIFT; + tfh->gaa = gaa; + tfh->dirty = dirty; + tfh->pagesize = pagesize; + tfh->opc = TFHOP_WRITE_RESTART; + start_instruction(tfh); +} + +void tfh_restart(struct gru_tlb_fault_handle *tfh) +{ + tfh->opc = TFHOP_RESTART; + start_instruction(tfh); +} + +void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh) +{ + tfh->opc = TFHOP_USER_POLLING_MODE; + start_instruction(tfh); +} + +void tfh_exception(struct gru_tlb_fault_handle *tfh) +{ + tfh->opc = TFHOP_EXCEPTION; + start_instruction(tfh); +} + diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h index b63018d60fe..1ed74d7508c 100644 --- a/drivers/misc/sgi-gru/gruhandles.h +++ b/drivers/misc/sgi-gru/gruhandles.h @@ -489,170 +489,28 @@ enum gru_cbr_state { * 64m 26 8 * ... */ -#define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6) +#define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6) #define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh)) /* minimum TLB purge count to ensure a full purge */ #define GRUMAXINVAL 1024UL - -/* Extract the status field from a kernel handle */ -#define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) - -static inline void start_instruction(void *h) -{ - unsigned long *w0 = h; - - wmb(); /* setting CMD bit must be last */ - *w0 = *w0 | 1; - gru_flush_cache(h); -} - -static inline int wait_instruction_complete(void *h) -{ - int status; - - do { - cpu_relax(); - barrier(); - status = GET_MSEG_HANDLE_STATUS(h); - } while (status == CCHSTATUS_ACTIVE); - return status; -} - -#if defined CONFIG_IA64 -static inline void cch_allocate_set_asids( - struct gru_context_configuration_handle *cch, int asidval) -{ - int i; - - for (i = 0; i <= RGN_HPAGE; i++) { /* assume HPAGE is last region */ - cch->asid[i] = (asidval++); -#if 0 - /* ZZZ hugepages not supported yet */ - if (i == RGN_HPAGE) - cch->sizeavail[i] = GRU_SIZEAVAIL(hpage_shift); - else -#endif - cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT); - } -} -#elif defined CONFIG_X86_64 -static inline void cch_allocate_set_asids( - struct gru_context_configuration_handle *cch, int asidval) -{ - int i; - - for (i = 0; i < 8; i++) { - cch->asid[i] = asidval++; - cch->sizeavail[i] = GRU_SIZEAVAIL(PAGE_SHIFT) | - GRU_SIZEAVAIL(21); - } -} -#endif - -static inline int cch_allocate(struct gru_context_configuration_handle *cch, - int asidval, unsigned long cbrmap, - unsigned long dsrmap) -{ - cch_allocate_set_asids(cch, asidval); - cch->dsr_allocation_map = dsrmap; - cch->cbr_allocation_map = cbrmap; - cch->opc = CCHOP_ALLOCATE; - start_instruction(cch); - return wait_instruction_complete(cch); -} - -static inline int cch_start(struct gru_context_configuration_handle *cch) -{ - cch->opc = CCHOP_START; - start_instruction(cch); - return wait_instruction_complete(cch); -} - -static inline int cch_interrupt(struct gru_context_configuration_handle *cch) -{ - cch->opc = CCHOP_INTERRUPT; - start_instruction(cch); - return wait_instruction_complete(cch); -} - -static inline int cch_deallocate(struct gru_context_configuration_handle *cch) -{ - cch->opc = CCHOP_DEALLOCATE; - start_instruction(cch); - return wait_instruction_complete(cch); -} - -static inline int cch_interrupt_sync(struct gru_context_configuration_handle - *cch) -{ - cch->opc = CCHOP_INTERRUPT_SYNC; - start_instruction(cch); - return wait_instruction_complete(cch); -} - -static inline int tgh_invalidate(struct gru_tlb_global_handle *tgh, - unsigned long vaddr, unsigned long vaddrmask, - int asid, int pagesize, int global, int n, - unsigned short ctxbitmap) -{ - tgh->vaddr = vaddr; - tgh->asid = asid; - tgh->pagesize = pagesize; - tgh->n = n; - tgh->global = global; - tgh->vaddrmask = vaddrmask; - tgh->ctxbitmap = ctxbitmap; - tgh->opc = TGHOP_TLBINV; - start_instruction(tgh); - return wait_instruction_complete(tgh); -} - -static inline void tfh_write_only(struct gru_tlb_fault_handle *tfh, - unsigned long pfn, unsigned long vaddr, - int asid, int dirty, int pagesize) -{ - tfh->fillasid = asid; - tfh->fillvaddr = vaddr; - tfh->pfn = pfn; - tfh->dirty = dirty; - tfh->pagesize = pagesize; - tfh->opc = TFHOP_WRITE_ONLY; - start_instruction(tfh); -} - -static inline void tfh_write_restart(struct gru_tlb_fault_handle *tfh, - unsigned long paddr, int gaa, - unsigned long vaddr, int asid, int dirty, - int pagesize) -{ - tfh->fillasid = asid; - tfh->fillvaddr = vaddr; - tfh->pfn = paddr >> GRU_PADDR_SHIFT; - tfh->gaa = gaa; - tfh->dirty = dirty; - tfh->pagesize = pagesize; - tfh->opc = TFHOP_WRITE_RESTART; - start_instruction(tfh); -} - -static inline void tfh_restart(struct gru_tlb_fault_handle *tfh) -{ - tfh->opc = TFHOP_RESTART; - start_instruction(tfh); -} - -static inline void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh) -{ - tfh->opc = TFHOP_USER_POLLING_MODE; - start_instruction(tfh); -} - -static inline void tfh_exception(struct gru_tlb_fault_handle *tfh) -{ - tfh->opc = TFHOP_EXCEPTION; - start_instruction(tfh); -} +int cch_allocate(struct gru_context_configuration_handle *cch, + int asidval, int sizeavail, unsigned long cbrmap, unsigned long dsrmap); + +int cch_start(struct gru_context_configuration_handle *cch); +int cch_interrupt(struct gru_context_configuration_handle *cch); +int cch_deallocate(struct gru_context_configuration_handle *cch); +int cch_interrupt_sync(struct gru_context_configuration_handle *cch); +int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, + unsigned long vaddrmask, int asid, int pagesize, int global, int n, + unsigned short ctxbitmap); +void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn, + unsigned long vaddr, int asid, int dirty, int pagesize); +void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, + int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); +void tfh_restart(struct gru_tlb_fault_handle *tfh); +void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh); +void tfh_exception(struct gru_tlb_fault_handle *tfh); #endif /* __GRUHANDLES_H__ */ diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index 880c55dfb66..d8bd7d84a7c 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c @@ -52,8 +52,10 @@ */ /* Blade percpu resources PERMANENTLY reserved for kernel use */ -#define GRU_NUM_KERNEL_CBR 1 +#define GRU_NUM_KERNEL_CBR 1 #define GRU_NUM_KERNEL_DSR_BYTES 256 +#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \ + GRU_CACHE_LINE_BYTES) #define KERNEL_CTXNUM 15 /* GRU instruction attributes for all instructions */ @@ -94,7 +96,6 @@ struct message_header { char fill; }; -#define QLINES(mq) ((mq) + offsetof(struct message_queue, qlines)) #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) @@ -122,7 +123,7 @@ int gru_get_cb_exception_detail(void *cb, struct gru_control_block_extended *cbe; cbe = get_cbe(GRUBASE(cb), get_cb_number(cb)); - prefetchw(cbe); /* Harmless on hardware, required for emulator */ + prefetchw(cbe); /* Harmless on hardware, required for emulator */ excdet->opc = cbe->opccpy; excdet->exopc = cbe->exopccpy; excdet->ecause = cbe->ecause; @@ -250,7 +251,8 @@ static inline void restore_present2(void *p, int val) * Create a message queue. * qlines - message queue size in cache lines. Includes 2-line header. */ -int gru_create_message_queue(void *p, unsigned int bytes) +int gru_create_message_queue(struct gru_message_queue_desc *mqd, + void *p, unsigned int bytes, int nasid, int vector, int apicid) { struct message_queue *mq = p; unsigned int qlines; @@ -265,6 +267,12 @@ int gru_create_message_queue(void *p, unsigned int bytes) mq->hstatus[0] = 0; mq->hstatus[1] = 1; mq->head = gru_mesq_head(2, qlines / 2 + 1); + mqd->mq = mq; + mqd->mq_gpa = uv_gpa(mq); + mqd->qlines = qlines; + mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid); + mqd->interrupt_vector = vector; + mqd->interrupt_apicid = apicid; return 0; } EXPORT_SYMBOL_GPL(gru_create_message_queue); @@ -277,8 +285,8 @@ EXPORT_SYMBOL_GPL(gru_create_message_queue); * -1 - if mesq sent successfully but queue not full * >0 - unexpected error. MQE_xxx returned */ -static int send_noop_message(void *cb, - unsigned long mq, void *mesg) +static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd, + void *mesg) { const struct message_header noop_header = { .present = MQS_NOOP, .lines = 1}; @@ -289,7 +297,7 @@ static int send_noop_message(void *cb, STAT(mesq_noop); save_mhdr = *mhdr; *mhdr = noop_header; - gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA); + gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA); ret = gru_wait(cb); if (ret) { @@ -313,7 +321,7 @@ static int send_noop_message(void *cb, break; case CBSS_PUT_NACKED: STAT(mesq_noop_put_nacked); - m = mq + (gru_get_amo_value_head(cb) << 6); + m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1, IMA); if (gru_wait(cb) == CBS_IDLE) @@ -333,30 +341,20 @@ static int send_noop_message(void *cb, /* * Handle a gru_mesq full. */ -static int send_message_queue_full(void *cb, - unsigned long mq, void *mesg, int lines) +static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd, + void *mesg, int lines) { union gru_mesqhead mqh; unsigned int limit, head; unsigned long avalue; - int half, qlines, save; + int half, qlines; /* Determine if switching to first/second half of q */ avalue = gru_get_amo_value(cb); head = gru_get_amo_value_head(cb); limit = gru_get_amo_value_limit(cb); - /* - * Fetch "qlines" from the queue header. Since the queue may be - * in memory that can't be accessed using socket addresses, use - * the GRU to access the data. Use DSR space from the message. - */ - save = *(int *)mesg; - gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA); - if (gru_wait(cb) != CBS_IDLE) - goto cberr; - qlines = *(int *)mesg; - *(int *)mesg = save; + qlines = mqd->qlines; half = (limit != qlines); if (half) @@ -365,7 +363,7 @@ static int send_message_queue_full(void *cb, mqh = gru_mesq_head(2, qlines / 2 + 1); /* Try to get lock for switching head pointer */ - gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA); + gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA); if (gru_wait(cb) != CBS_IDLE) goto cberr; if (!gru_get_amo_value(cb)) { @@ -375,8 +373,8 @@ static int send_message_queue_full(void *cb, /* Got the lock. Send optional NOP if queue not full, */ if (head != limit) { - if (send_noop_message(cb, mq, mesg)) { - gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), + if (send_noop_message(cb, mqd, mesg)) { + gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA); if (gru_wait(cb) != CBS_IDLE) goto cberr; @@ -387,14 +385,16 @@ static int send_message_queue_full(void *cb, } /* Then flip queuehead to other half of queue. */ - gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA); + gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue, + IMA); if (gru_wait(cb) != CBS_IDLE) goto cberr; /* If not successfully in swapping queue head, clear the hstatus lock */ if (gru_get_amo_value(cb) != avalue) { STAT(mesq_qf_switch_head_failed); - gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA); + gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, + IMA); if (gru_wait(cb) != CBS_IDLE) goto cberr; } @@ -404,15 +404,25 @@ cberr: return MQE_UNEXPECTED_CB_ERR; } +/* + * Send a cross-partition interrupt to the SSI that contains the target + * message queue. Normally, the interrupt is automatically delivered by hardware + * but some error conditions require explicit delivery. + */ +static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd) +{ + if (mqd->interrupt_vector) + uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid, + mqd->interrupt_vector); +} + /* * Handle a gru_mesq failure. Some of these failures are software recoverable * or retryable. */ -static int send_message_failure(void *cb, - unsigned long mq, - void *mesg, - int lines) +static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd, + void *mesg, int lines) { int substatus, ret = 0; unsigned long m; @@ -429,7 +439,7 @@ static int send_message_failure(void *cb, break; case CBSS_QLIMIT_REACHED: STAT(mesq_send_qlimit_reached); - ret = send_message_queue_full(cb, mq, mesg, lines); + ret = send_message_queue_full(cb, mqd, mesg, lines); break; case CBSS_AMO_NACKED: STAT(mesq_send_amo_nacked); @@ -437,12 +447,14 @@ static int send_message_failure(void *cb, break; case CBSS_PUT_NACKED: STAT(mesq_send_put_nacked); - m =mq + (gru_get_amo_value_head(cb) << 6); + m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); - if (gru_wait(cb) == CBS_IDLE) + if (gru_wait(cb) == CBS_IDLE) { ret = MQE_OK; - else + send_message_queue_interrupt(mqd); + } else { ret = MQE_UNEXPECTED_CB_ERR; + } break; default: BUG(); @@ -452,12 +464,12 @@ static int send_message_failure(void *cb, /* * Send a message to a message queue - * cb GRU control block to use to send message - * mq message queue + * mqd message queue descriptor * mesg message. ust be vaddr within a GSEG * bytes message size (<= 2 CL) */ -int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes) +int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg, + unsigned int bytes) { struct message_header *mhdr; void *cb; @@ -481,10 +493,10 @@ int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes) do { ret = MQE_OK; - gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA); + gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA); istatus = gru_wait(cb); if (istatus != CBS_IDLE) - ret = send_message_failure(cb, mq, dsr, clines); + ret = send_message_failure(cb, mqd, dsr, clines); } while (ret == MQIE_AGAIN); gru_free_cpu_resources(cb, dsr); @@ -497,9 +509,9 @@ EXPORT_SYMBOL_GPL(gru_send_message_gpa); /* * Advance the receive pointer for the queue to the next message. */ -void gru_free_message(void *rmq, void *mesg) +void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg) { - struct message_queue *mq = rmq; + struct message_queue *mq = mqd->mq; struct message_header *mhdr = mq->next; void *next, *pnext; int half = -1; @@ -529,16 +541,16 @@ EXPORT_SYMBOL_GPL(gru_free_message); * present. User must call next_message() to move to next message. * rmq message queue */ -void *gru_get_next_message(void *rmq) +void *gru_get_next_message(struct gru_message_queue_desc *mqd) { - struct message_queue *mq = rmq; + struct message_queue *mq = mqd->mq; struct message_header *mhdr = mq->next; int present = mhdr->present; /* skip NOOP messages */ STAT(mesq_receive); while (present == MQS_NOOP) { - gru_free_message(rmq, mhdr); + gru_free_message(mqd, mhdr); mhdr = mq->next; present = mhdr->present; } @@ -576,7 +588,7 @@ int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr)) return MQE_BUG_NO_RESOURCES; gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr), - XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA); + XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA); ret = gru_wait(cb); gru_free_cpu_resources(cb, dsr); return ret; @@ -611,7 +623,7 @@ static int quicktest(struct gru_state *gru) if (word0 != word1 || word0 != MAGIC) { printk - ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n", + ("GRU quicktest err: gid %d, found 0x%lx, expected 0x%lx\n", gru->gs_gid, word1, MAGIC); BUG(); /* ZZZ should not be fatal */ } @@ -660,15 +672,15 @@ int gru_kservices_init(struct gru_state *gru) cch->tlb_int_enable = 0; cch->tfm_done_bit_enable = 0; cch->unmap_enable = 1; - err = cch_allocate(cch, 0, cbr_map, dsr_map); + err = cch_allocate(cch, 0, 0, cbr_map, dsr_map); if (err) { gru_dbg(grudev, - "Unable to allocate kernel CCH: gru %d, err %d\n", + "Unable to allocate kernel CCH: gid %d, err %d\n", gru->gs_gid, err); BUG(); } if (cch_start(cch)) { - gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n", + gru_dbg(grudev, "Unable to start kernel CCH: gid %d, err %d\n", gru->gs_gid, err); BUG(); } @@ -678,3 +690,22 @@ int gru_kservices_init(struct gru_state *gru) quicktest(gru); return 0; } + +void gru_kservices_exit(struct gru_state *gru) +{ + struct gru_context_configuration_handle *cch; + struct gru_blade_state *bs; + + bs = gru->gs_blade; + if (gru != &bs->bs_grus[1]) + return; + + cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM); + lock_cch_handle(cch); + if (cch_interrupt_sync(cch)) + BUG(); + if (cch_deallocate(cch)) + BUG(); + unlock_cch_handle(cch); +} + diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h index eb17e0a3ac6..747ed315d56 100644 --- a/drivers/misc/sgi-gru/grukservices.h +++ b/drivers/misc/sgi-gru/grukservices.h @@ -41,6 +41,15 @@ * - gru_create_message_queue() needs interrupt vector info */ +struct gru_message_queue_desc { + void *mq; /* message queue vaddress */ + unsigned long mq_gpa; /* global address of mq */ + int qlines; /* queue size in CL */ + int interrupt_vector; /* interrupt vector */ + int interrupt_pnode; /* pnode for interrupt */ + int interrupt_apicid; /* lapicid for interrupt */ +}; + /* * Initialize a user allocated chunk of memory to be used as * a message queue. The caller must ensure that the queue is @@ -51,14 +60,19 @@ * to manage the queue. * * Input: - * p pointer to user allocated memory. + * mqd pointer to message queue descriptor + * p pointer to user allocated mesq memory. * bytes size of message queue in bytes + * vector interrupt vector (zero if no interrupts) + * nasid nasid of blade where interrupt is delivered + * apicid apicid of cpu for interrupt * * Errors: * 0 OK * >0 error */ -extern int gru_create_message_queue(void *p, unsigned int bytes); +extern int gru_create_message_queue(struct gru_message_queue_desc *mqd, + void *p, unsigned int bytes, int nasid, int vector, int apicid); /* * Send a message to a message queue. @@ -68,7 +82,7 @@ extern int gru_create_message_queue(void *p, unsigned int bytes); * * * Input: - * xmq message queue - must be a UV global physical address + * mqd pointer to message queue descriptor * mesg pointer to message. Must be 64-bit aligned * bytes size of message in bytes * @@ -77,8 +91,8 @@ extern int gru_create_message_queue(void *p, unsigned int bytes); * >0 Send failure - see error codes below * */ -extern int gru_send_message_gpa(unsigned long mq_gpa, void *mesg, - unsigned int bytes); +extern int gru_send_message_gpa(struct gru_message_queue_desc *mqd, + void *mesg, unsigned int bytes); /* Status values for gru_send_message() */ #define MQE_OK 0 /* message sent successfully */ @@ -94,10 +108,11 @@ extern int gru_send_message_gpa(unsigned long mq_gpa, void *mesg, * API extensions may allow for out-of-order freeing. * * Input - * mq message queue + * mqd pointer to message queue descriptor * mesq message being freed */ -extern void gru_free_message(void *mq, void *mesq); +extern void gru_free_message(struct gru_message_queue_desc *mqd, + void *mesq); /* * Get next message from message queue. Returns pointer to @@ -106,13 +121,13 @@ extern void gru_free_message(void *mq, void *mesq); * in order to move the queue pointers to next message. * * Input - * mq message queue + * mqd pointer to message queue descriptor * * Output: * p pointer to message * NULL no message available */ -extern void *gru_get_next_message(void *mq); +extern void *gru_get_next_message(struct gru_message_queue_desc *mqd); /* diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 3d2fc216bae..ec3f7a17d22 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -76,10 +76,9 @@ int gru_cpu_fault_map_id(void) /* Hit the asid limit. Start over */ static int gru_wrap_asid(struct gru_state *gru) { - gru_dbg(grudev, "gru %p\n", gru); + gru_dbg(grudev, "gid %d\n", gru->gs_gid); STAT(asid_wrap); gru->gs_asid_gen++; - gru_flush_all_tlb(gru); return MIN_ASID; } @@ -88,19 +87,21 @@ static int gru_reset_asid_limit(struct gru_state *gru, int asid) { int i, gid, inuse_asid, limit; - gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); + gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); STAT(asid_next); limit = MAX_ASID; if (asid >= limit) asid = gru_wrap_asid(gru); + gru_flush_all_tlb(gru); gid = gru->gs_gid; again: for (i = 0; i < GRU_NUM_CCH; i++) { if (!gru->gs_gts[i]) continue; inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; - gru_dbg(grudev, "gru %p, inuse_asid 0x%x, cxtnum %d, gts %p\n", - gru, inuse_asid, i, gru->gs_gts[i]); + gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", + gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, + inuse_asid, i); if (inuse_asid == asid) { asid += ASID_INC; if (asid >= limit) { @@ -120,8 +121,8 @@ again: } gru->gs_asid_limit = limit; gru->gs_asid = asid; - gru_dbg(grudev, "gru %p, new asid 0x%x, new_limit 0x%x\n", gru, asid, - limit); + gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, + asid, limit); return asid; } @@ -130,14 +131,12 @@ static int gru_assign_asid(struct gru_state *gru) { int asid; - spin_lock(&gru->gs_asid_lock); gru->gs_asid += ASID_INC; asid = gru->gs_asid; if (asid >= gru->gs_asid_limit) asid = gru_reset_asid_limit(gru, asid); - spin_unlock(&gru->gs_asid_lock); - gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); + gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); return asid; } @@ -215,17 +214,20 @@ static int check_gru_resources(struct gru_state *gru, int cbr_au_count, * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG * context. */ -static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, - int ctxnum) +static int gru_load_mm_tracker(struct gru_state *gru, + struct gru_thread_state *gts) { + struct gru_mm_struct *gms = gts->ts_gms; struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; - unsigned short ctxbitmap = (1 << ctxnum); + unsigned short ctxbitmap = (1 << gts->ts_ctxnum); int asid; spin_lock(&gms->ms_asid_lock); asid = asids->mt_asid; - if (asid == 0 || asids->mt_asid_gen != gru->gs_asid_gen) { + spin_lock(&gru->gs_asid_lock); + if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != + gru->gs_asid_gen)) { asid = gru_assign_asid(gru); asids->mt_asid = asid; asids->mt_asid_gen = gru->gs_asid_gen; @@ -233,6 +235,7 @@ static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, } else { STAT(asid_reuse); } + spin_unlock(&gru->gs_asid_lock); BUG_ON(asids->mt_ctxbitmap & ctxbitmap); asids->mt_ctxbitmap |= ctxbitmap; @@ -241,24 +244,28 @@ static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, spin_unlock(&gms->ms_asid_lock); gru_dbg(grudev, - "gru %x, gms %p, ctxnum 0x%d, asid 0x%x, asidmap 0x%lx\n", - gru->gs_gid, gms, ctxnum, asid, gms->ms_asidmap[0]); + "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", + gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, + gms->ms_asidmap[0]); return asid; } static void gru_unload_mm_tracker(struct gru_state *gru, - struct gru_mm_struct *gms, int ctxnum) + struct gru_thread_state *gts) { + struct gru_mm_struct *gms = gts->ts_gms; struct gru_mm_tracker *asids; unsigned short ctxbitmap; asids = &gms->ms_asids[gru->gs_gid]; - ctxbitmap = (1 << ctxnum); + ctxbitmap = (1 << gts->ts_ctxnum); spin_lock(&gms->ms_asid_lock); + spin_lock(&gru->gs_asid_lock); BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); asids->mt_ctxbitmap ^= ctxbitmap; - gru_dbg(grudev, "gru %x, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", - gru->gs_gid, gms, ctxnum, gms->ms_asidmap[0]); + gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", + gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); + spin_unlock(&gru->gs_asid_lock); spin_unlock(&gms->ms_asid_lock); } @@ -319,6 +326,7 @@ static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, gts->ts_vma = vma; gts->ts_tlb_int_select = -1; gts->ts_gms = gru_register_mmu_notifier(); + gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); if (!gts->ts_gms) goto err; @@ -399,7 +407,7 @@ static void gru_free_gru_context(struct gru_thread_state *gts) struct gru_state *gru; gru = gts->ts_gru; - gru_dbg(grudev, "gts %p, gru %p\n", gts, gru); + gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); spin_lock(&gru->gs_lock); gru->gs_gts[gts->ts_ctxnum] = NULL; @@ -408,6 +416,7 @@ static void gru_free_gru_context(struct gru_thread_state *gts) __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); gts->ts_ctxnum = NULLCTX; gts->ts_gru = NULL; + gts->ts_blade = -1; spin_unlock(&gru->gs_lock); gts_drop(gts); @@ -432,8 +441,8 @@ static inline long gru_copy_handle(void *d, void *s) return GRU_HANDLE_BYTES; } -static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap, - unsigned long length) +static void gru_prefetch_context(void *gseg, void *cb, void *cbe, + unsigned long cbrmap, unsigned long length) { int i, scr; @@ -500,12 +509,12 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate) zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); + gru_dbg(grudev, "gts %p\n", gts); lock_cch_handle(cch); if (cch_interrupt_sync(cch)) BUG(); - gru_dbg(grudev, "gts %p\n", gts); - gru_unload_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); + gru_unload_mm_tracker(gru, gts); if (savestate) gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, gts->ts_cbr_map, @@ -534,7 +543,7 @@ static void gru_load_context(struct gru_thread_state *gts) cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); lock_cch_handle(cch); - asid = gru_load_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); + asid = gru_load_mm_tracker(gru, gts); cch->tfm_fault_bit_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); @@ -544,7 +553,8 @@ static void gru_load_context(struct gru_thread_state *gts) cch->tlb_int_select = gts->ts_tlb_int_select; } cch->tfm_done_bit_enable = 0; - err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map); + err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map, + gts->ts_dsr_map); if (err) { gru_dbg(grudev, "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", @@ -565,11 +575,12 @@ static void gru_load_context(struct gru_thread_state *gts) /* * Update fields in an active CCH: * - retarget interrupts on local blade + * - update sizeavail mask * - force a delayed context unload by clearing the CCH asids. This * forces TLB misses for new GRU instructions. The context is unloaded * when the next TLB miss occurs. */ -static int gru_update_cch(struct gru_thread_state *gts, int int_select) +int gru_update_cch(struct gru_thread_state *gts, int force_unload) { struct gru_context_configuration_handle *cch; struct gru_state *gru = gts->ts_gru; @@ -583,9 +594,11 @@ static int gru_update_cch(struct gru_thread_state *gts, int int_select) goto exit; if (cch_interrupt(cch)) BUG(); - if (int_select >= 0) { - gts->ts_tlb_int_select = int_select; - cch->tlb_int_select = int_select; + if (!force_unload) { + for (i = 0; i < 8; i++) + cch->sizeavail[i] = gts->ts_sizeavail; + gts->ts_tlb_int_select = gru_cpu_fault_map_id(); + cch->tlb_int_select = gru_cpu_fault_map_id(); } else { for (i = 0; i < 8; i++) cch->asid[i] = 0; @@ -617,7 +630,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts) gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, gru_cpu_fault_map_id()); - return gru_update_cch(gts, gru_cpu_fault_map_id()); + return gru_update_cch(gts, 0); } @@ -688,7 +701,7 @@ static void gru_steal_context(struct gru_thread_state *gts) STAT(steal_context_failed); } gru_dbg(grudev, - "stole gru %x, ctxnum %d from gts %p. Need cb %d, ds %d;" + "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" " avail cb %ld, ds %ld\n", gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), hweight64(gru->gs_dsr_map)); @@ -727,6 +740,7 @@ again: } reserve_gru_resources(gru, gts); gts->ts_gru = gru; + gts->ts_blade = gru->gs_blade_id; gts->ts_ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); @@ -737,7 +751,7 @@ again: STAT(assign_context); gru_dbg(grudev, - "gseg %p, gts %p, gru %x, ctx %d, cbr %d, dsr %d\n", + "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, gts->ts_gru->gs_gid, gts->ts_ctxnum, gts->ts_cbr_au_count, gts->ts_dsr_au_count); @@ -773,8 +787,8 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; again: - preempt_disable(); mutex_lock(>s->ts_ctxlock); + preempt_disable(); if (gts->ts_gru) { if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { STAT(migrated_nopfn_unload); diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 73b0ca061bb..ee74821b171 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c @@ -62,7 +62,9 @@ static int statistics_show(struct seq_file *s, void *p) printstat(s, asid_wrap); printstat(s, asid_reuse); printstat(s, intr); + printstat(s, intr_mm_lock_failed); printstat(s, call_os); + printstat(s, call_os_offnode_reference); printstat(s, call_os_check_for_bug); printstat(s, call_os_wait_queue); printstat(s, user_flush_tlb); @@ -120,6 +122,30 @@ static ssize_t statistics_write(struct file *file, const char __user *userbuf, return count; } +static int mcs_statistics_show(struct seq_file *s, void *p) +{ + int op; + unsigned long total, count, max; + static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt", + "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"}; + + for (op = 0; op < mcsop_last; op++) { + count = atomic_long_read(&mcs_op_statistics[op].count); + total = atomic_long_read(&mcs_op_statistics[op].total); + max = mcs_op_statistics[op].max; + seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, + count ? total / count : 0, max); + } + return 0; +} + +static ssize_t mcs_statistics_write(struct file *file, + const char __user *userbuf, size_t count, loff_t *data) +{ + memset(mcs_op_statistics, 0, sizeof(mcs_op_statistics)); + return count; +} + static int options_show(struct seq_file *s, void *p) { seq_printf(s, "0x%lx\n", gru_options); @@ -135,6 +161,7 @@ static ssize_t options_write(struct file *file, const char __user *userbuf, if (copy_from_user (buf, userbuf, count < sizeof(buf) ? count : sizeof(buf))) return -EFAULT; + buf[count - 1] = '\0'; if (!strict_strtoul(buf, 10, &val)) gru_options = val; @@ -199,7 +226,7 @@ static void seq_stop(struct seq_file *file, void *data) static void *seq_start(struct seq_file *file, loff_t *gid) { - if (*gid < GRU_MAX_GRUS) + if (*gid < gru_max_gids) return gid; return NULL; } @@ -207,7 +234,7 @@ static void *seq_start(struct seq_file *file, loff_t *gid) static void *seq_next(struct seq_file *file, void *data, loff_t *gid) { (*gid)++; - if (*gid < GRU_MAX_GRUS) + if (*gid < gru_max_gids) return gid; return NULL; } @@ -231,6 +258,11 @@ static int statistics_open(struct inode *inode, struct file *file) return single_open(file, statistics_show, NULL); } +static int mcs_statistics_open(struct inode *inode, struct file *file) +{ + return single_open(file, mcs_statistics_show, NULL); +} + static int options_open(struct inode *inode, struct file *file) { return single_open(file, options_show, NULL); @@ -255,6 +287,14 @@ static const struct file_operations statistics_fops = { .release = single_release, }; +static const struct file_operations mcs_statistics_fops = { + .open = mcs_statistics_open, + .read = seq_read, + .write = mcs_statistics_write, + .llseek = seq_lseek, + .release = single_release, +}; + static const struct file_operations options_fops = { .open = options_open, .read = seq_read, @@ -283,6 +323,7 @@ static struct proc_entry { struct proc_dir_entry *entry; } proc_files[] = { {"statistics", 0644, &statistics_fops}, + {"mcs_statistics", 0644, &mcs_statistics_fops}, {"debug_options", 0644, &options_fops}, {"cch_status", 0444, &cch_fops}, {"gru_status", 0444, &gru_fops}, diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index a78f70deeb5..bf1eeb7553e 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -153,6 +153,7 @@ extern struct gru_stats_s gru_stats; extern struct gru_blade_state *gru_base[]; extern unsigned long gru_start_paddr, gru_end_paddr; +extern unsigned int gru_max_gids; #define GRU_MAX_BLADES MAX_NUMNODES #define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) @@ -184,7 +185,9 @@ struct gru_stats_s { atomic_long_t asid_wrap; atomic_long_t asid_reuse; atomic_long_t intr; + atomic_long_t intr_mm_lock_failed; atomic_long_t call_os; + atomic_long_t call_os_offnode_reference; atomic_long_t call_os_check_for_bug; atomic_long_t call_os_wait_queue; atomic_long_t user_flush_tlb; @@ -237,6 +240,17 @@ struct gru_stats_s { }; +enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, + cchop_deallocate, tghop_invalidate, mcsop_last}; + +struct mcs_op_statistic { + atomic_long_t count; + atomic_long_t total; + unsigned long max; +}; + +extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; + #define OPT_DPRINT 1 #define OPT_STATS 2 #define GRU_QUICKLOOK 4 @@ -278,13 +292,12 @@ struct gru_stats_s { /* Generate a GRU asid value from a GRU base asid & a virtual address. */ #if defined CONFIG_IA64 #define VADDR_HI_BIT 64 -#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) #elif defined CONFIG_X86_64 #define VADDR_HI_BIT 48 -#define GRUREGION(addr) (0) /* ZZZ could do better */ #else #error "Unsupported architecture" #endif +#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) #define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) /*------------------------------------------------------------------------------ @@ -297,12 +310,12 @@ struct gru_state; * This structure is pointed to from the mmstruct via the notifier pointer. * There is one of these per address space. */ -struct gru_mm_tracker { - unsigned int mt_asid_gen; /* ASID wrap count */ - int mt_asid; /* current base ASID for gru */ - unsigned short mt_ctxbitmap; /* bitmap of contexts using +struct gru_mm_tracker { /* pack to reduce size */ + unsigned int mt_asid_gen:24; /* ASID wrap count */ + unsigned int mt_asid:24; /* current base ASID for gru */ + unsigned short mt_ctxbitmap:16;/* bitmap of contexts using asid */ -}; +} __attribute__ ((packed)); struct gru_mm_struct { struct mmu_notifier ms_notifier; @@ -348,6 +361,7 @@ struct gru_thread_state { long ts_user_options;/* misc user option flags */ pid_t ts_tgid_owner; /* task that is using the context - for migration */ + unsigned short ts_sizeavail; /* Pagesizes in use */ int ts_tsid; /* thread that owns the structure */ int ts_tlb_int_select;/* target cpu if interrupts @@ -359,6 +373,9 @@ struct gru_thread_state { required for contest */ unsigned char ts_cbr_au_count;/* Number of CBR resources required for contest */ + char ts_blade; /* If >= 0, migrate context if + ref from diferent blade */ + char ts_force_cch_reload; char ts_force_unload;/* force context to be unloaded after migration */ char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each @@ -392,12 +409,12 @@ struct gru_state { gru segments (64) */ void *gs_gru_base_vaddr; /* Virtual address of gru segments (64) */ - unsigned char gs_gid; /* unique GRU number */ + unsigned short gs_gid; /* unique GRU number */ + unsigned short gs_blade_id; /* blade of GRU */ unsigned char gs_tgh_local_shift; /* used to pick TGH for local flush */ unsigned char gs_tgh_first_remote; /* starting TGH# for remote flush */ - unsigned short gs_blade_id; /* blade of GRU */ spinlock_t gs_asid_lock; /* lock used for assigning asids */ spinlock_t gs_lock; /* lock used for @@ -492,6 +509,10 @@ struct gru_blade_state { (i) < GRU_CHIPLETS_PER_BLADE; \ (i)++, (gru)++) +/* Scan all GRUs */ +#define foreach_gid(gid) \ + for ((gid) = 0; (gid) < gru_max_gids; (gid)++) + /* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */ #define for_each_gts_on_gru(gts, gru, ctxnum) \ for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \ @@ -578,9 +599,11 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, int tsid); extern void gru_unload_context(struct gru_thread_state *gts, int savestate); +extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); extern void gts_drop(struct gru_thread_state *gts); extern void gru_tgh_flush_init(struct gru_state *gru); extern int gru_kservices_init(struct gru_state *gru); +extern void gru_kservices_exit(struct gru_state *gru); extern irqreturn_t gru_intr(int irq, void *dev_id); extern int gru_handle_user_call_os(unsigned long address); extern int gru_user_flush_tlb(unsigned long arg); diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c index c84496a7769..1d125091f5e 100644 --- a/drivers/misc/sgi-gru/grutlbpurge.c +++ b/drivers/misc/sgi-gru/grutlbpurge.c @@ -187,7 +187,7 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n", gid, asid, num, asids->mt_ctxbitmap); tgh = get_lock_tgh_handle(gru); - tgh_invalidate(tgh, start, 0, asid, grupagesize, 0, + tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, num - 1, asids->mt_ctxbitmap); get_unlock_tgh_handle(tgh); } else { @@ -210,11 +210,10 @@ void gru_flush_all_tlb(struct gru_state *gru) { struct gru_tlb_global_handle *tgh; - gru_dbg(grudev, "gru %p, gid %d\n", gru, gru->gs_gid); + gru_dbg(grudev, "gid %d\n", gru->gs_gid); tgh = get_lock_tgh_handle(gru); - tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0); + tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff); get_unlock_tgh_handle(tgh); - preempt_enable(); } /* diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 275b78896a7..114444cfd49 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -92,7 +92,9 @@ struct xpc_rsvd_page { u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ union { unsigned long vars_pa; /* phys address of struct xpc_vars */ - unsigned long activate_mq_gpa; /* gru phy addr of activate_mq */ + unsigned long activate_gru_mq_desc_gpa; /* phys addr of */ + /* activate mq's */ + /* gru mq descriptor */ } sn; unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */ @@ -189,7 +191,9 @@ struct xpc_gru_mq_uv { int irq; /* irq raised when message is received in mq */ int mmr_blade; /* blade where watchlist was allocated from */ unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */ + unsigned long mmr_value; /* value of irq mmr located on mmr_blade */ int watchlist_num; /* number of watchlist allocatd by BIOS */ + void *gru_mq_desc; /* opaque structure used by the GRU driver */ }; /* @@ -197,6 +201,7 @@ struct xpc_gru_mq_uv { * heartbeat, partition active state, and channel state. This is UV only. */ struct xpc_activate_mq_msghdr_uv { + unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */ short partid; /* sender's partid */ u8 act_state; /* sender's act_state at time msg sent */ u8 type; /* message's type */ @@ -232,7 +237,7 @@ struct xpc_activate_mq_msg_heartbeat_req_uv { struct xpc_activate_mq_msg_activate_req_uv { struct xpc_activate_mq_msghdr_uv hdr; unsigned long rp_gpa; - unsigned long activate_mq_gpa; + unsigned long activate_gru_mq_desc_gpa; }; struct xpc_activate_mq_msg_deactivate_req_uv { @@ -263,7 +268,7 @@ struct xpc_activate_mq_msg_chctl_openreply_uv { short ch_number; short remote_nentries; /* ??? Is this needed? What is? */ short local_nentries; /* ??? Is this needed? What is? */ - unsigned long local_notify_mq_gpa; + unsigned long notify_gru_mq_desc_gpa; }; /* @@ -510,8 +515,8 @@ struct xpc_channel_sn2 { }; struct xpc_channel_uv { - unsigned long remote_notify_mq_gpa; /* gru phys address of remote */ - /* partition's notify mq */ + void *cached_notify_gru_mq_desc; /* remote partition's notify mq's */ + /* gru mq descriptor */ struct xpc_send_msg_slot_uv *send_msg_slots; void *recv_msg_slots; /* each slot will hold a xpc_notify_mq_msg_uv */ @@ -682,8 +687,12 @@ struct xpc_partition_sn2 { }; struct xpc_partition_uv { - unsigned long remote_activate_mq_gpa; /* gru phys address of remote */ - /* partition's activate mq */ + unsigned long activate_gru_mq_desc_gpa; /* phys addr of parititon's */ + /* activate mq's gru mq */ + /* descriptor */ + void *cached_activate_gru_mq_desc; /* cached copy of partition's */ + /* activate mq's gru mq descriptor */ + struct mutex cached_activate_gru_mq_desc_mutex; spinlock_t flags_lock; /* protect updating of flags */ unsigned int flags; /* general flags */ u8 remote_act_state; /* remote partition's act_state */ @@ -694,8 +703,9 @@ struct xpc_partition_uv { /* struct xpc_partition_uv flags */ -#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001 -#define XPC_P_ENGAGED_UV 0x00000002 +#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001 +#define XPC_P_ENGAGED_UV 0x00000002 +#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000004 /* struct xpc_partition_uv act_state change requests */ @@ -804,6 +814,7 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int); extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_disconnect_wait(int); extern int (*xpc_setup_partitions_sn) (void); +extern void (*xpc_teardown_partitions_sn) (void); extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *, unsigned long *, size_t *); @@ -846,8 +857,8 @@ extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *, unsigned long *); extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *); -extern void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *, - unsigned long); +extern enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *, + unsigned long); extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *, u16, u8, xpc_notify_func, void *); diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 45fd653dbe3..99a2534c38a 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -183,6 +183,7 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, &part->remote_openclose_args[ch_number]; struct xpc_channel *ch = &part->channels[ch_number]; enum xp_retval reason; + enum xp_retval ret; spin_lock_irqsave(&ch->lock, irq_flags); @@ -399,8 +400,13 @@ again: DBUG_ON(args->local_nentries == 0); DBUG_ON(args->remote_nentries == 0); + ret = xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa); + if (ret != xpSuccess) { + XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } ch->flags |= XPC_C_ROPENREPLY; - xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa); if (args->local_nentries < ch->remote_nentries) { dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 6576170de96..1ab9fda87fa 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -171,6 +171,7 @@ static struct notifier_block xpc_die_notifier = { }; int (*xpc_setup_partitions_sn) (void); +void (*xpc_teardown_partitions_sn) (void); enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, unsigned long *rp_pa, size_t *len); @@ -217,8 +218,8 @@ void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch, void (*xpc_send_chctl_openreply) (struct xpc_channel *ch, unsigned long *irq_flags); -void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, - unsigned long msgqueue_pa); +enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, + unsigned long msgqueue_pa); enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags, void *payload, u16 payload_size, @@ -998,6 +999,7 @@ xpc_setup_partitions(void) static void xpc_teardown_partitions(void) { + xpc_teardown_partitions_sn(); kfree(xpc_partitions); } diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 2e975762c32..eaaa964942d 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -66,6 +66,12 @@ xpc_setup_partitions_sn_sn2(void) return 0; } +static void +xpc_teardown_partitions_sn_sn2(void) +{ + /* nothing needs to be done */ +} + /* SH_IPI_ACCESS shub register value on startup */ static u64 xpc_sh1_IPI_access_sn2; static u64 xpc_sh2_IPI_access0_sn2; @@ -436,11 +442,12 @@ xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch) XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST); } -static void +static enum xp_retval xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch, unsigned long msgqueue_pa) { ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa; + return xpSuccess; } /* @@ -1737,20 +1744,20 @@ xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg_sn2 *msg; - s64 put; + s64 put, remote_nentries = ch->remote_nentries; /* flags are zeroed when the buffer is allocated */ - if (ch_sn2->remote_GP.put < ch->remote_nentries) + if (ch_sn2->remote_GP.put < remote_nentries) return; - put = max(ch_sn2->w_remote_GP.put, ch->remote_nentries); + put = max(ch_sn2->w_remote_GP.put, remote_nentries); do { msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + - (put % ch->remote_nentries) * + (put % remote_nentries) * ch->entry_size); DBUG_ON(!(msg->flags & XPC_M_SN2_READY)); DBUG_ON(!(msg->flags & XPC_M_SN2_DONE)); - DBUG_ON(msg->number != put - ch->remote_nentries); + DBUG_ON(msg->number != put - remote_nentries); msg->flags = 0; } while (++put < ch_sn2->remote_GP.put); } @@ -2315,6 +2322,7 @@ xpc_init_sn2(void) size_t buf_size; xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2; + xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_sn2; xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2; xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2; xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 29c0502a96b..f7fff4727ed 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -31,6 +31,21 @@ #include "../sgi-gru/grukservices.h" #include "xpc.h" +#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV +struct uv_IO_APIC_route_entry { + __u64 vector : 8, + delivery_mode : 3, + dest_mode : 1, + delivery_status : 1, + polarity : 1, + __reserved_1 : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15, + dest : 32; +}; +#endif + static atomic64_t xpc_heartbeat_uv; static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); @@ -56,26 +71,52 @@ xpc_setup_partitions_sn_uv(void) for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { part_uv = &xpc_partitions[partid].sn.uv; + mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex); spin_lock_init(&part_uv->flags_lock); part_uv->remote_act_state = XPC_P_AS_INACTIVE; } return 0; } +static void +xpc_teardown_partitions_sn_uv(void) +{ + short partid; + struct xpc_partition_uv *part_uv; + unsigned long irq_flags; + + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part_uv = &xpc_partitions[partid].sn.uv; + + if (part_uv->cached_activate_gru_mq_desc != NULL) { + mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + kfree(part_uv->cached_activate_gru_mq_desc); + part_uv->cached_activate_gru_mq_desc = NULL; + mutex_unlock(&part_uv-> + cached_activate_gru_mq_desc_mutex); + } + } +} + static int xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) { + int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); + #if defined CONFIG_X86_64 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); if (mq->irq < 0) { dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", - mq->irq); + -mq->irq); + return mq->irq; } -#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV - int mmr_pnode; - unsigned long mmr_value; + mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); +#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) mq->irq = SGI_XPC_ACTIVATE; else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) @@ -83,10 +124,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) else return -EINVAL; - mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); - mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; - - uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); + mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; + uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); #else #error not a supported configuration #endif @@ -127,7 +166,7 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) return ret; } #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV - ret = sn_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address), + ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address), mq->order, &mq->mmr_offset); if (ret < 0) { dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", @@ -168,12 +207,22 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, int pg_order; struct page *page; struct xpc_gru_mq_uv *mq; + struct uv_IO_APIC_route_entry *mmr_value; mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); if (mq == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " "a xpc_gru_mq_uv structure\n"); ret = -ENOMEM; + goto out_0; + } + + mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), + GFP_KERNEL); + if (mq->gru_mq_desc == NULL) { + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " + "a gru_message_queue_desc structure\n"); + ret = -ENOMEM; goto out_1; } @@ -194,14 +243,6 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, } mq->address = page_address(page); - ret = gru_create_message_queue(mq->address, mq_size); - if (ret != 0) { - dev_err(xpc_part, "gru_create_message_queue() returned " - "error=%d\n", ret); - ret = -EINVAL; - goto out_3; - } - /* enable generation of irq when GRU mq operation occurs to this mq */ ret = xpc_gru_mq_watchlist_alloc_uv(mq); if (ret != 0) @@ -214,10 +255,20 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); if (ret != 0) { dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", - mq->irq, ret); + mq->irq, -ret); goto out_5; } + mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; + ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, + nid, mmr_value->vector, mmr_value->dest); + if (ret != 0) { + dev_err(xpc_part, "gru_create_message_queue() returned " + "error=%d\n", ret); + ret = -EINVAL; + goto out_6; + } + /* allow other partitions to access this GRU mq */ xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); if (xp_ret != xpSuccess) { @@ -237,8 +288,10 @@ out_4: out_3: free_pages((unsigned long)mq->address, pg_order); out_2: - kfree(mq); + kfree(mq->gru_mq_desc); out_1: + kfree(mq); +out_0: return ERR_PTR(ret); } @@ -268,13 +321,14 @@ xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) } static enum xp_retval -xpc_send_gru_msg(unsigned long mq_gpa, void *msg, size_t msg_size) +xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg, + size_t msg_size) { enum xp_retval xp_ret; int ret; while (1) { - ret = gru_send_message_gpa(mq_gpa, msg, msg_size); + ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size); if (ret == MQE_OK) { xp_ret = xpSuccess; break; @@ -421,7 +475,15 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; - part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; + + if (msg->activate_gru_mq_desc_gpa != + part_uv->activate_gru_mq_desc_gpa) { + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + part_uv->activate_gru_mq_desc_gpa = + msg->activate_gru_mq_desc_gpa; + } spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); (*wakeup_hb_checker)++; @@ -498,7 +560,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, args = &part->remote_openclose_args[msg->ch_number]; args->remote_nentries = msg->remote_nentries; args->local_nentries = msg->local_nentries; - args->local_msgqueue_pa = msg->local_notify_mq_gpa; + args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; @@ -558,9 +620,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) short partid; struct xpc_partition *part; int wakeup_hb_checker = 0; + int part_referenced; while (1) { - msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address); + msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc); if (msg_hdr == NULL) break; @@ -571,14 +634,15 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) partid); } else { part = &xpc_partitions[partid]; - if (xpc_part_ref(part)) { - xpc_handle_activate_mq_msg_uv(part, msg_hdr, - &wakeup_hb_checker); + + part_referenced = xpc_part_ref(part); + xpc_handle_activate_mq_msg_uv(part, msg_hdr, + &wakeup_hb_checker); + if (part_referenced) xpc_part_deref(part); - } } - gru_free_message(xpc_activate_mq_uv->address, msg_hdr); + gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr); } if (wakeup_hb_checker) @@ -588,21 +652,73 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) } static enum xp_retval +xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc, + unsigned long gru_mq_desc_gpa) +{ + enum xp_retval ret; + + ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa, + sizeof(struct gru_message_queue_desc)); + if (ret == xpSuccess) + gru_mq_desc->mq = NULL; + + return ret; +} + +static enum xp_retval xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, int msg_type) { struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; + struct xpc_partition_uv *part_uv = &part->sn.uv; + struct gru_message_queue_desc *gru_mq_desc; + unsigned long irq_flags; + enum xp_retval ret; DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); msg_hdr->type = msg_type; - msg_hdr->partid = XPC_PARTID(part); + msg_hdr->partid = xp_partition_id; msg_hdr->act_state = part->act_state; msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; + mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); +again: + if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) { + gru_mq_desc = part_uv->cached_activate_gru_mq_desc; + if (gru_mq_desc == NULL) { + gru_mq_desc = kmalloc(sizeof(struct + gru_message_queue_desc), + GFP_KERNEL); + if (gru_mq_desc == NULL) { + ret = xpNoMemory; + goto done; + } + part_uv->cached_activate_gru_mq_desc = gru_mq_desc; + } + + ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc, + part_uv-> + activate_gru_mq_desc_gpa); + if (ret != xpSuccess) + goto done; + + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + } + /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ - return xpc_send_gru_msg(part->sn.uv.remote_activate_mq_gpa, msg, - msg_size); + ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, + msg_size); + if (ret != xpSuccess) { + smp_rmb(); /* ensure a fresh copy of part_uv->flags */ + if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) + goto again; + } +done: + mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex); + return ret; } static void @@ -620,7 +736,7 @@ static void xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, void *msg, size_t msg_size, int msg_type) { - struct xpc_partition *part = &xpc_partitions[ch->number]; + struct xpc_partition *part = &xpc_partitions[ch->partid]; enum xp_retval ret; ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); @@ -692,7 +808,8 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, static int xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) { - rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address); + rp->sn.activate_gru_mq_desc_gpa = + uv_gpa(xpc_activate_mq_uv->gru_mq_desc); return 0; } @@ -787,7 +904,8 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; - part->sn.uv.remote_activate_mq_gpa = remote_rp->sn.activate_mq_gpa; + part->sn.uv.activate_gru_mq_desc_gpa = + remote_rp->sn.activate_gru_mq_desc_gpa; /* * ??? Is it a good idea to make this conditional on what is @@ -795,7 +913,8 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, */ if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { msg.rp_gpa = uv_gpa(xpc_rsvd_page); - msg.activate_mq_gpa = xpc_rsvd_page->sn.activate_mq_gpa; + msg.activate_gru_mq_desc_gpa = + xpc_rsvd_page->sn.activate_gru_mq_desc_gpa; xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); } @@ -857,7 +976,8 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) if (head->first == NULL) head->last = NULL; } - head->n_entries++; + head->n_entries--; + BUG_ON(head->n_entries < 0); spin_unlock_irqrestore(&head->lock, irq_flags); first->next = NULL; return first; @@ -876,8 +996,7 @@ xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, else head->first = last; head->last = last; - head->n_entries--; - BUG_ON(head->n_entries < 0); + head->n_entries++; spin_unlock_irqrestore(&head->lock, irq_flags); } @@ -1037,6 +1156,12 @@ xpc_setup_msg_structures_uv(struct xpc_channel *ch) DBUG_ON(ch->flags & XPC_C_SETUP); + ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct + gru_message_queue_desc), + GFP_KERNEL); + if (ch_uv->cached_notify_gru_mq_desc == NULL) + return xpNoMemory; + ret = xpc_allocate_send_msg_slot_uv(ch); if (ret == xpSuccess) { @@ -1060,7 +1185,8 @@ xpc_teardown_msg_structures_uv(struct xpc_channel *ch) DBUG_ON(!spin_is_locked(&ch->lock)); - ch_uv->remote_notify_mq_gpa = 0; + kfree(ch_uv->cached_notify_gru_mq_desc); + ch_uv->cached_notify_gru_mq_desc = NULL; if (ch->flags & XPC_C_SETUP) { xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); @@ -1111,7 +1237,7 @@ xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) msg.ch_number = ch->number; msg.local_nentries = ch->local_nentries; msg.remote_nentries = ch->remote_nentries; - msg.local_notify_mq_gpa = uv_gpa(xpc_notify_mq_uv); + msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); } @@ -1128,11 +1254,15 @@ xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) xpc_wakeup_channel_mgr(part); } -static void +static enum xp_retval xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, - unsigned long msgqueue_pa) + unsigned long gru_mq_desc_gpa) { - ch->sn.uv.remote_notify_mq_gpa = msgqueue_pa; + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + + DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL); + return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc, + gru_mq_desc_gpa); } static void @@ -1339,7 +1469,8 @@ xpc_handle_notify_IRQ_uv(int irq, void *dev_id) short partid; struct xpc_partition *part; - while ((msg = gru_get_next_message(xpc_notify_mq_uv)) != NULL) { + while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) != + NULL) { partid = msg->hdr.partid; if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { @@ -1354,7 +1485,7 @@ xpc_handle_notify_IRQ_uv(int irq, void *dev_id) } } - gru_free_message(xpc_notify_mq_uv, msg); + gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg); } return IRQ_HANDLED; @@ -1438,7 +1569,8 @@ xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, msg->hdr.msg_slot_number = msg_slot->msg_slot_number; memcpy(&msg->payload, payload, payload_size); - ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, msg_size); + ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, + msg_size); if (ret == xpSuccess) goto out_1; @@ -1529,7 +1661,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload) msg->hdr.partid = xp_partition_id; msg->hdr.size = 0; /* size of zero indicates this is an ACK */ - ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, + ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, sizeof(struct xpc_notify_mq_msghdr_uv)); if (ret != xpSuccess) XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); @@ -1541,6 +1673,7 @@ int xpc_init_uv(void) { xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv; + xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_uv; xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv; xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv; xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f062b424704..16899eee397 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -974,7 +974,7 @@ config ENC28J60_WRITEVERIFY config ETHOC tristate "OpenCores 10/100 Mbps Ethernet MAC support" - depends on NET_ETHERNET + depends on NET_ETHERNET && HAS_IOMEM select MII select PHYLIB help @@ -2547,6 +2547,23 @@ config S2IO More specific information on configuring the driver is in <file:Documentation/networking/s2io.txt>. +config VXGE + tristate "Neterion X3100 Series 10GbE PCIe Server Adapter" + depends on PCI && INET + ---help--- + This driver supports Neterion Inc's X3100 Series 10 GbE PCIe + I/O Virtualized Server Adapter. + More specific information on configuring the driver is in + <file:Documentation/networking/vxge.txt>. + +config VXGE_DEBUG_TRACE_ALL + bool "Enabling All Debug trace statments in driver" + default n + depends on VXGE + ---help--- + Say Y here if you want to enabling all the debug trace statements in + driver. By default only few debug trace statements are enabled. + config MYRI10GE tristate "Myricom Myri-10G Ethernet support" depends on PCI && INET diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 98409c9dd44..edc9a0d6171 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -220,6 +220,7 @@ obj-$(CONFIG_R8169) += r8169.o obj-$(CONFIG_AMD8111_ETH) += amd8111e.o obj-$(CONFIG_IBMVETH) += ibmveth.o obj-$(CONFIG_S2IO) += s2io.o +obj-$(CONFIG_VXGE) += vxge/ obj-$(CONFIG_MYRI10GE) += myri10ge/ obj-$(CONFIG_SMC91X) += smc91x.o obj-$(CONFIG_SMC911X) += smc911x.o diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 254ec62b5f5..d8350860c0f 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -559,7 +559,7 @@ static void dm9000_show_carrier(board_info_t *db, static void dm9000_poll_work(struct work_struct *w) { - struct delayed_work *dw = container_of(w, struct delayed_work, work); + struct delayed_work *dw = to_delayed_work(w); board_info_t *db = container_of(dw, board_info_t, phy_poll); struct net_device *ndev = db->ndev; diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index db1e31f9520..33fa9eee4ca 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c @@ -8,7 +8,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/version.h> #include <linux/io.h> #include <linux/module.h> #include <linux/moduleparam.h> diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 049b0a7e01f..8bbe7f61799 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -129,7 +129,8 @@ static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task struct sk_buff *skb; skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd); - dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE); + dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, + DMA_FROM_DEVICE); kfree_skb(skb); } } @@ -150,7 +151,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task bd = (struct bcom_fec_bd *)bcom_prepare_next_buffer(rxtsk); bd->status = FEC_RX_BUFFER_SIZE; - bd->skb_pa = dma_map_single(&dev->dev, skb->data, + bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); bcom_submit_next_buffer(rxtsk, skb); @@ -270,15 +271,6 @@ static void mpc52xx_fec_phy_stop(struct net_device *dev) phy_write(priv->phydev, MII_BMCR, BMCR_PDOWN); } -static int mpc52xx_fec_phy_mii_ioctl(struct mpc52xx_fec_priv *priv, - struct mii_ioctl_data *mii_data, int cmd) -{ - if (!priv->phydev) - return -ENOTSUPP; - - return phy_mii_ioctl(priv->phydev, mii_data, cmd); -} - static void mpc52xx_fec_phy_hw_init(struct mpc52xx_fec_priv *priv) { struct mpc52xx_fec __iomem *fec = priv->fec; @@ -370,7 +362,7 @@ static int mpc52xx_fec_close(struct net_device *dev) * invariant will hold if you make sure that the netif_*_queue() * calls are done at the proper times. */ -static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); struct bcom_fec_bd *bd; @@ -378,7 +370,7 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d if (bcom_queue_full(priv->tx_dmatsk)) { if (net_ratelimit()) dev_err(&dev->dev, "transmit queue overrun\n"); - return 1; + return NETDEV_TX_BUSY; } spin_lock_irq(&priv->lock); @@ -388,7 +380,8 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d bcom_prepare_next_buffer(priv->tx_dmatsk); bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC; - bd->skb_pa = dma_map_single(&dev->dev, skb->data, skb->len, DMA_TO_DEVICE); + bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); bcom_submit_next_buffer(priv->tx_dmatsk, skb); @@ -398,7 +391,7 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d spin_unlock_irq(&priv->lock); - return 0; + return NETDEV_TX_OK; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -430,7 +423,8 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) struct bcom_fec_bd *bd; skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL, (struct bcom_bd **)&bd); - dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_TO_DEVICE); + dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, + DMA_TO_DEVICE); dev_kfree_skb_irq(skb); } @@ -455,7 +449,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, (struct bcom_bd **)&bd); - dma_unmap_single(&dev->dev, bd->skb_pa, rskb->len, DMA_FROM_DEVICE); + dma_unmap_single(dev->dev.parent, bd->skb_pa, rskb->len, + DMA_FROM_DEVICE); /* Test for errors in received frame */ if (status & BCOM_FEC_RX_BD_ERRORS) { @@ -464,7 +459,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) bcom_prepare_next_buffer(priv->rx_dmatsk); bd->status = FEC_RX_BUFFER_SIZE; - bd->skb_pa = dma_map_single(&dev->dev, rskb->data, + bd->skb_pa = dma_map_single(dev->dev.parent, + rskb->data, FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); bcom_submit_next_buffer(priv->rx_dmatsk, rskb); @@ -499,7 +495,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) bcom_prepare_next_buffer(priv->rx_dmatsk); bd->status = FEC_RX_BUFFER_SIZE; - bd->skb_pa = dma_map_single(&dev->dev, skb->data, + bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); bcom_submit_next_buffer(priv->rx_dmatsk, skb); @@ -847,12 +843,20 @@ static void mpc52xx_fec_get_drvinfo(struct net_device *dev, static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + return phy_ethtool_gset(priv->phydev, cmd); } static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + return phy_ethtool_sset(priv->phydev, cmd); } @@ -882,9 +886,28 @@ static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); - return mpc52xx_fec_phy_mii_ioctl(priv, if_mii(rq), cmd); + if (!priv->phydev) + return -ENOTSUPP; + + return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); } +static const struct net_device_ops mpc52xx_fec_netdev_ops = { + .ndo_open = mpc52xx_fec_open, + .ndo_stop = mpc52xx_fec_close, + .ndo_start_xmit = mpc52xx_fec_start_xmit, + .ndo_set_multicast_list = mpc52xx_fec_set_multicast_list, + .ndo_set_mac_address = mpc52xx_fec_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = mpc52xx_fec_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_tx_timeout = mpc52xx_fec_tx_timeout, + .ndo_get_stats = mpc52xx_fec_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mpc52xx_fec_poll_controller, +#endif +}; + /* ======================================================================== */ /* OF Driver */ /* ======================================================================== */ @@ -929,22 +952,10 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) return -EBUSY; /* Init ether ndev with what we have */ - ndev->open = mpc52xx_fec_open; - ndev->stop = mpc52xx_fec_close; - ndev->hard_start_xmit = mpc52xx_fec_hard_start_xmit; - ndev->do_ioctl = mpc52xx_fec_ioctl; + ndev->netdev_ops = &mpc52xx_fec_netdev_ops; ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops; - ndev->get_stats = mpc52xx_fec_get_stats; - ndev->set_mac_address = mpc52xx_fec_set_mac_address; - ndev->set_multicast_list = mpc52xx_fec_set_multicast_list; - ndev->tx_timeout = mpc52xx_fec_tx_timeout; ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; ndev->base_addr = mem.start; -#ifdef CONFIG_NET_POLL_CONTROLLER - ndev->poll_controller = mpc52xx_fec_poll_controller; -#endif - - priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */ spin_lock_init(&priv->lock); diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index b3079a5a7f2..aa1eb88c21f 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c @@ -204,6 +204,7 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np) snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, (unsigned long long)taddr); } +EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name); /* Scan the bus in reverse, looking for an empty spot */ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus) @@ -387,7 +388,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev, * The TBIPHY-only buses will find PHYs at every address, * so we mask them all but the TBI */ - if (!of_device_is_compatible(np, "fsl,gianfar-mdio")) + if (of_device_is_compatible(np, "fsl,gianfar-tbi")) new_bus->phy_mask = ~(1 << tbiaddr); err = mdiobus_register(new_bus); diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index dd499d7cde2..0642d52aef5 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -45,7 +45,6 @@ #include <linux/crc32.h> #include <linux/workqueue.h> #include <linux/ethtool.h> -#include <linux/fsl_devices.h> /* The maximum number of packets to be handled in one call of gfar_poll */ #define GFAR_DEV_WEIGHT 64 diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 500a40b2afe..b06691937ce 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -55,6 +55,8 @@ #include <asm/system.h> #include <linux/interrupt.h> #include <linux/ioport.h> +#include <linux/firmware.h> +#include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/if_arp.h> @@ -71,8 +73,6 @@ #include <linux/init.h> #include <linux/yam.h> -#include "yam9600.h" -#include "yam1200.h" /* --------------------------------------------------------------------- */ @@ -82,6 +82,9 @@ static const char yam_drvinfo[] __initdata = KERN_INFO \ /* --------------------------------------------------------------------- */ +#define FIRMWARE_9600 "yam/9600.bin" +#define FIRMWARE_1200 "yam/1200.bin" + #define YAM_9600 1 #define YAM_1200 2 @@ -342,9 +345,51 @@ static int fpga_write(int iobase, unsigned char wrd) return 0; } -static unsigned char *add_mcs(unsigned char *bits, int bitrate) +/* + * predef should be 0 for loading user defined mcs + * predef should be YAM_1200 for loading predef 1200 mcs + * predef should be YAM_9600 for loading predef 9600 mcs + */ +static unsigned char *add_mcs(unsigned char *bits, int bitrate, + unsigned int predef) { + const char *fw_name[2] = {FIRMWARE_9600, FIRMWARE_1200}; + const struct firmware *fw; + struct platform_device *pdev; struct yam_mcs *p; + int err; + + switch (predef) { + case 0: + fw = NULL; + break; + case YAM_1200: + case YAM_9600: + predef--; + pdev = platform_device_register_simple("yam", 0, NULL, 0); + if (IS_ERR(pdev)) { + printk(KERN_ERR "yam: Failed to register firmware\n"); + return NULL; + } + err = request_firmware(&fw, fw_name[predef], &pdev->dev); + platform_device_unregister(pdev); + if (err) { + printk(KERN_ERR "Failed to load firmware \"%s\"\n", + fw_name[predef]); + return NULL; + } + if (fw->size != YAM_FPGA_SIZE) { + printk(KERN_ERR "Bogus length %zu in firmware \"%s\"\n", + fw->size, fw_name[predef]); + release_firmware(fw); + return NULL; + } + bits = (unsigned char *)fw->data; + break; + default: + printk(KERN_ERR "yam: Invalid predef number %u\n", predef); + return NULL; + } /* If it already exists, replace the bit data */ p = yam_data; @@ -359,6 +404,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate) /* Allocate a new mcs */ if ((p = kmalloc(sizeof(struct yam_mcs), GFP_KERNEL)) == NULL) { printk(KERN_WARNING "YAM: no memory to allocate mcs\n"); + release_firmware(fw); return NULL; } memcpy(p->bits, bits, YAM_FPGA_SIZE); @@ -366,6 +412,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate) p->next = yam_data; yam_data = p; + release_firmware(fw); return p->bits; } @@ -383,9 +430,11 @@ static unsigned char *get_mcs(int bitrate) /* Load predefined mcs data */ switch (bitrate) { case 1200: - return add_mcs(bits_1200, bitrate); + /* setting predef as YAM_1200 for loading predef 1200 mcs */ + return add_mcs(NULL, bitrate, YAM_1200); default: - return add_mcs(bits_9600, bitrate); + /* setting predef as YAM_9600 for loading predef 9600 mcs */ + return add_mcs(NULL, bitrate, YAM_9600); } } @@ -936,7 +985,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) kfree(ym); return -EINVAL; } - add_mcs(ym->bits, ym->bitrate); + /* setting predef as 0 for loading userdefined mcs data */ + add_mcs(ym->bits, ym->bitrate, 0); kfree(ym); break; @@ -1159,6 +1209,8 @@ static void __exit yam_cleanup_driver(void) MODULE_AUTHOR("Frederic Rible F1OAT frible@teaser.fr"); MODULE_DESCRIPTION("Yam amateur radio modem driver"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_1200); +MODULE_FIRMWARE(FIRMWARE_9600); module_init(yam_init_driver); module_exit(yam_cleanup_driver); diff --git a/drivers/net/hamradio/yam1200.h b/drivers/net/hamradio/yam1200.h deleted file mode 100644 index 53ca8a3903a..00000000000 --- a/drivers/net/hamradio/yam1200.h +++ /dev/null @@ -1,343 +0,0 @@ -/* - * - * File yam1k2b5.mcs converted to h format by mcs2h - * - * (C) F6FBB 1998 - * - * Tue Aug 25 20:24:08 1998 - * - */ - -static unsigned char bits_1200[]= { -0xff,0xf2,0x00,0xa5,0xad,0xff,0xfe,0x9f,0xff,0xef,0xf3,0xcb,0xff,0xdb,0xfc,0xf2, -0xff,0xf6,0xff,0x3c,0xbf,0xfd,0xbf,0xdf,0x6e,0x3f,0x6f,0xf1,0x7d,0xb4,0xfd,0xbf, -0xdf,0x6f,0x3f,0x6f,0xf7,0x0b,0xff,0xdb,0xfd,0xf2,0xff,0xf6,0xff,0xff,0xff,0xff, -0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xdf,0xff,0xff,0xff,0xef,0xff,0xff,0xff, -0xfd,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xbf, -0xff,0xff,0xf7,0xff,0xff,0xfb,0xff,0xff,0xff,0xfc,0xff,0xfe,0xff,0xff,0xff,0xf0, -0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xf1,0xff,0xff,0xfe,0x7f,0xbf,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xfb,0xff,0xff,0xff,0xf0,0x9f, -0xff,0xff,0xff,0xfe,0xff,0xfd,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xf7,0xff, -0xff,0xff,0xfb,0xff,0xfb,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xf7,0xff,0xff,0xfb,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xef,0xff,0xf0,0x5f,0xff, -0xff,0xff,0xfe,0xff,0xff,0xef,0xff,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xbf,0xff,0xff,0xdf,0xf7,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff, -0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb, -0xff,0xff,0xff,0xfd,0xff,0xbf,0xf1,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xfb, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x6f,0xff,0xff,0xff, -0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xf7,0xff,0xff,0xf1,0xff,0xff,0xf7,0xbf,0xe7,0xff,0xff,0xff,0xff,0xfb, -0xff,0xff,0xff,0xff,0xff,0xff,0x77,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xdb, -0xff,0xff,0xf5,0xa5,0xfd,0x4b,0x6e,0xef,0x33,0x32,0xdd,0xd3,0x4a,0xd6,0x92,0xfe, -0xb3,0x3f,0xbd,0xf1,0xfa,0xdb,0xfe,0xf7,0xf6,0x96,0xbd,0xbd,0xff,0xbd,0xff,0xed, -0x7f,0x6b,0x7f,0xfb,0xdf,0xfe,0xfb,0xfe,0x90,0xcf,0xff,0xff,0xff,0xfe,0xbe,0xef, -0xff,0xff,0xdb,0x5f,0xf6,0xff,0xf6,0x8f,0xfd,0xa5,0xdd,0xff,0xff,0xff,0xff,0x6f, -0x7f,0xdb,0xf1,0xfc,0xbf,0xff,0x6f,0xff,0xef,0xfc,0x5b,0x5d,0xda,0xdf,0xf4,0xff, -0xf2,0xff,0xfd,0xbf,0xff,0xff,0xff,0xd0,0x1f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff, -0xff,0xfb,0xef,0xb7,0xfc,0x33,0xff,0xfb,0xff,0x04,0x6a,0xf3,0x3c,0x36,0xff,0xf0, -0x0f,0xf1,0x0f,0xff,0xff,0xff,0xf3,0x15,0x72,0x0f,0xf1,0x6f,0xff,0xfe,0x94,0x3f, -0xff,0xff,0xff,0x7b,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf0, -0xf7,0xef,0xb7,0xfc,0x33,0xff,0xff,0xff,0x04,0x6a,0xf3,0x3c,0x36,0xff,0xf0,0x0f, -0xf1,0x0f,0xff,0xff,0xff,0xf3,0x15,0x73,0x8f,0xf2,0x6f,0xff,0xfe,0x94,0x3f,0xff, -0xff,0xff,0x7d,0x9f,0xff,0xf0,0x0f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0x9e, -0xff,0xfc,0xef,0xd3,0xfb,0xff,0x7f,0xf5,0x5f,0xfe,0x59,0xff,0xff,0xff,0xfc,0xf1, -0xfe,0x7f,0xff,0xff,0xfa,0x17,0xff,0xe7,0xef,0xef,0xff,0xff,0x3f,0xf1,0xff,0xff, -0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf5,0xff,0xbf,0xff,0xfc,0xea, -0xff,0xf0,0xff,0xff,0xbf,0xf9,0x3f,0xb1,0xef,0xff,0xd7,0xff,0xfb,0xff,0xf0,0xff, -0xff,0xf3,0xff,0xdf,0xff,0x7b,0xff,0xfd,0xff,0xf6,0xff,0xbf,0xff,0xff,0xbf,0xff, -0xff,0xff,0xda,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf2,0xc0,0x01,0x00,0x00,0x02,0x02, -0x02,0x02,0x00,0x40,0x40,0x40,0x10,0x00,0x00,0x00,0x20,0x00,0x00,0x01,0x00,0x00, -0x00,0x00,0x00,0x00,0x19,0x00,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10, -0x00,0x3c,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xfb,0xff,0xfd,0xff, -0xff,0x7f,0xff,0xff,0xbf,0xff,0xef,0xff,0xff,0xfd,0xff,0xff,0xf1,0xff,0xdf,0xff, -0xff,0xff,0xff,0xff,0xff,0xbf,0xfe,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xdf, -0xdb,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf0,0xbf,0xdf,0xff,0x7f,0xff,0xff,0xff,0xff, -0xdf,0xdf,0xff,0xef,0xff,0x9e,0xef,0xff,0xff,0x7f,0xff,0xf1,0xef,0xff,0xff,0xff, -0xf7,0xfa,0xbf,0xff,0xff,0xfe,0x47,0xef,0xff,0xbd,0xf6,0xff,0xff,0xdf,0xf5,0xf0, -0xf0,0xef,0xff,0xff,0xff,0xfe,0xf8,0x30,0x00,0x00,0x00,0x04,0x00,0x01,0x02,0x08, -0x16,0x00,0x00,0x00,0x80,0x00,0x01,0x02,0x00,0x80,0x01,0x0c,0x02,0x00,0x00,0x01, -0x00,0x00,0x20,0x00,0x00,0x06,0x00,0x20,0x00,0x10,0x00,0x14,0x00,0x04,0xc1,0xf0, -0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0x7f, -0xec,0xff,0xff,0xfa,0xff,0xbf,0xff,0x6f,0xff,0xe1,0xff,0xff,0xff,0xff,0xbd,0xfe, -0x46,0xff,0xef,0x7f,0xcd,0xdf,0xff,0xff,0xfd,0xff,0xbd,0xff,0x7f,0x7f,0xf0,0x4f, -0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x0f,0xff, -0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xa4,0xbc,0xcd,0x6d,0x6b,0x6f,0x5b,0xdc,0x33, -0x5a,0xf6,0xf7,0xf6,0xb3,0x3f,0xbd,0xc1,0xfa,0x5a,0xf6,0xf6,0xb6,0xf7,0xff,0xbd, -0xbb,0x3c,0xce,0xcf,0x34,0xef,0x33,0xbb,0xcc,0xff,0xff,0xff,0xf0,0x4f,0xff,0xff, -0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,0xd6,0xff,0xfd,0xfd,0xbf,0xff,0xad, -0xbf,0xf9,0x7f,0x6f,0xfc,0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0xff,0xff,0xda,0xdb,0xfc, -0xdb,0xff,0x76,0x8f,0xf6,0xff,0xcd,0xab,0xfe,0xfb,0xff,0xd0,0xff,0xff,0xff,0xff, -0xfe,0xff,0x9f,0xff,0xf4,0x20,0xaf,0x6d,0x0b,0xc1,0x7b,0xff,0xff,0xff,0xcb,0xff, -0x3f,0xf0,0xef,0x7f,0x0f,0xf1,0xc3,0x3c,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0x0b, -0x1d,0x6a,0x64,0x05,0x6b,0x99,0x01,0xff,0xfd,0xef,0xf0,0x2f,0xff,0xff,0xff,0xfe, -0xff,0xff,0xff,0xf4,0x00,0x2f,0xcc,0x0b,0xc3,0x7f,0xff,0xff,0xff,0x0a,0xdf,0xbf, -0xfd,0x7f,0xff,0xff,0xf1,0xc3,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x4a,0x0e, -0x96,0x64,0x02,0x97,0x99,0x10,0xff,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff, -0xff,0xff,0xfe,0x84,0xf9,0xd5,0x27,0xf1,0x7f,0xff,0xf8,0xeb,0xdf,0xf3,0xcf,0x3f, -0x1f,0xff,0xf7,0x11,0xff,0xcf,0xff,0xfe,0x67,0xff,0xff,0xff,0xff,0xc4,0xff,0xff, -0xb3,0xa1,0xff,0xf9,0xe0,0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xf5,0xff, -0xff,0xfb,0x7f,0xe0,0xff,0xc7,0xfe,0x7f,0x3f,0xff,0xfd,0x77,0x8d,0x7f,0x0f,0xff, -0xc3,0xff,0xf1,0xbf,0x8f,0xcf,0xff,0xff,0xdd,0x7b,0xff,0xf6,0xfa,0xf7,0xff,0x40, -0x9f,0xf9,0x7f,0xd8,0xff,0xff,0xfa,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00, -0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x10,0x00,0x00,0x10, -0x00,0x01,0x00,0x10,0x20,0x20,0x00,0x00,0x10,0x00,0x04,0x01,0x05,0x00,0x00,0x00, -0x00,0x40,0x40,0x00,0x00,0x3c,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff, -0xff,0xff,0xfe,0x7f,0x7f,0xff,0xef,0xff,0xff,0xdf,0xff,0xff,0xdf,0xff,0xef,0xf7, -0xf1,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xf7,0xff,0xff,0xff,0xfc,0xfd,0xff,0x7f, -0x7e,0xff,0xff,0xff,0xdb,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf0,0xbb,0xff,0xff,0xff, -0xff,0xff,0xfe,0xeb,0xfd,0x6f,0xff,0xf7,0xfe,0xf5,0x7f,0xff,0xff,0x7f,0xbf,0xb1, -0xff,0xff,0x9f,0xbf,0xfb,0xff,0xfe,0xff,0xfe,0xff,0xf7,0xeb,0xdf,0xbf,0x5f,0xdd, -0xff,0xdb,0xfd,0xd0,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf8,0x30,0x20,0x00,0x42,0x00, -0x00,0x00,0x30,0x18,0x04,0x08,0x09,0x21,0x82,0x80,0x02,0x00,0x08,0x00,0x01,0x00, -0x00,0x00,0x0c,0x20,0x10,0x00,0x11,0x00,0x44,0x84,0x00,0x20,0x20,0x84,0x80,0x00, -0x00,0x00,0xc1,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xf7,0xff,0xfb,0xdd,0xf9,0xff, -0xda,0xff,0xdc,0xdd,0xfc,0xfb,0xff,0xbf,0xfb,0x3e,0xd7,0x96,0xfe,0x61,0xf7,0xff, -0x7f,0xff,0x3f,0xfd,0xff,0xdf,0xcf,0xf7,0xdf,0xf7,0xbf,0xfd,0xff,0xfe,0xef,0xef, -0xfe,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf3,0xbd,0xfd,0x4b,0x74,0xcf, -0x73,0x5b,0xcb,0x3b,0xdf,0xfe,0xf7,0xfe,0xd3,0x75,0xac,0xa1,0xfb,0xdf,0xfe,0xf7, -0x76,0x96,0xb5,0x24,0xbd,0xa5,0xad,0x49,0x2f,0x69,0x2b,0x52,0x5b,0xbd,0xff,0xff, -0xf0,0xcf,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,0xfe,0xff,0xcc, -0xa7,0xfb,0xad,0xff,0x7f,0x6f,0xff,0x6d,0x7f,0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0xff, -0x6f,0xff,0xdb,0xff,0xdb,0xff,0xf6,0x97,0xf6,0xff,0xb5,0xb5,0xff,0xff,0xff,0xd0, -0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xa5,0xbc,0x43,0xfc,0x7c,0x03,0xe7, -0xff,0xff,0x20,0xff,0xff,0xff,0xcc,0xfd,0x7d,0xf1,0xff,0xff,0xff,0xff,0xd5,0x59, -0xba,0x56,0x66,0x6a,0xad,0x9a,0xa9,0x9a,0x97,0xa5,0xaa,0xbb,0xff,0xff,0xf0,0x0f, -0xff,0xff,0xff,0xfe,0xfe,0xfb,0xff,0xfd,0xf7,0xfd,0x43,0xff,0xfd,0x6b,0xe7,0xff, -0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0x3f,0xf1,0xff,0xff,0xff,0xff,0xd5,0x59,0xb5, -0xa6,0x66,0x6a,0xad,0x9a,0xa9,0x99,0x6b,0x5a,0xaa,0xff,0xff,0xb7,0xf0,0x3f,0xff, -0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0x9c,0xf7,0xfd,0xd2,0x41,0xff,0xff,0xf2,0x7f, -0x8f,0xff,0xff,0x3d,0xf3,0xff,0x17,0xf1,0xff,0xff,0xff,0xff,0xff,0x7f,0xdf,0xfc, -0x8f,0x38,0xff,0xef,0x23,0xff,0xfb,0xf7,0xc8,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff, -0xff,0xfe,0xf5,0x7f,0xff,0xfd,0xff,0xe4,0xff,0xeb,0xff,0xcf,0xbf,0xfa,0xff,0xab, -0xef,0xff,0xfb,0xff,0xf3,0xfd,0x61,0xff,0xff,0xff,0xff,0xfa,0xff,0xfb,0xfd,0x0d, -0xff,0xfe,0xff,0x43,0x7f,0xfe,0xbf,0xd0,0xfd,0xff,0xfa,0xf0,0x3f,0xff,0xff,0xff, -0xfe,0xf3,0xc0,0x00,0x00,0x00,0x02,0x00,0x02,0x01,0x00,0x60,0xc0,0x40,0x00,0x00, -0x00,0x00,0x34,0x04,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x88,0x00, -0x00,0x03,0x00,0x00,0x40,0x00,0x40,0x00,0x00,0x3c,0xf0,0x3f,0xff,0xff,0xff,0xfe, -0xfd,0x3f,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0x7f,0xbf,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xf7,0xf1,0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xfd,0xff, -0xff,0xff,0xff,0xfe,0xfe,0x5f,0xff,0xff,0xcb,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xf0, -0xff,0xff,0xfd,0xff,0xef,0xe3,0xde,0xee,0xd9,0xc5,0x93,0xff,0xff,0xfe,0xfe,0xff, -0xfb,0xee,0xfe,0xf1,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xbf,0xf7,0xff,0xff,0x7f, -0xaf,0xbd,0xdf,0xdf,0xfb,0xf3,0xf3,0xf0,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xf8,0x34, -0x00,0x06,0x61,0x00,0x18,0x01,0xa0,0x05,0x17,0x00,0x20,0x05,0x28,0x20,0x00,0x00, -0x05,0x00,0x41,0x00,0x00,0x40,0x00,0x09,0x00,0x01,0x20,0x86,0x82,0x08,0x40,0x03, -0x80,0x30,0x70,0x08,0x14,0x02,0xc1,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff, -0xff,0xff,0xbd,0xef,0xfb,0xff,0xff,0xfb,0x9c,0x7f,0xef,0xdf,0xff,0xbf,0xeb,0xde, -0xff,0xc1,0x7f,0xff,0xfb,0x7f,0xff,0xff,0xff,0x5f,0xff,0xff,0xff,0xdf,0xbf,0xef, -0x3f,0xf7,0x8f,0xef,0x7f,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xbd, -0xdf,0xef,0x7d,0x6d,0x2b,0x5a,0x5d,0xd2,0xdf,0xf6,0x92,0xb6,0xb2,0xb3,0xac,0xa1, -0xfb,0xdf,0xfe,0xf1,0xee,0xf5,0xf6,0xbc,0x6b,0xbd,0x7d,0xaf,0x1a,0xef,0x5f,0x6b, -0xc6,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff, -0xf6,0xff,0xf6,0xb7,0xfd,0xad,0xfd,0xbf,0xf3,0x6f,0xff,0x6f,0xff,0xdb,0xd1,0xfd, -0xbf,0xff,0x6f,0xf5,0x6b,0xbc,0x5b,0x3c,0xda,0xef,0x16,0xaf,0x16,0xff,0xcd,0xab, -0xff,0x6f,0xff,0xd0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfc,0xbf,0xff,0xff, -0xff,0x6c,0x03,0x10,0xc1,0xf3,0xff,0xf3,0x3a,0xf3,0xca,0xff,0xaf,0xf1,0xff,0xff, -0xff,0xff,0xd9,0x96,0xa6,0x65,0xa6,0x66,0x6a,0x95,0x69,0x69,0x6a,0x5a,0x5a,0xff, -0xff,0x5f,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff, -0xea,0x0f,0x50,0xc3,0xf3,0x7f,0xff,0xf3,0xf3,0xc3,0xff,0xaf,0xf1,0xff,0xff,0xff, -0xff,0xd9,0x96,0xa6,0x65,0xa6,0x66,0x6a,0x95,0x69,0x69,0x6a,0x5a,0x5a,0xff,0xff, -0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xd7,0xff,0xff,0x5f,0xc1, -0x3f,0xf7,0x5e,0xf5,0xce,0x9e,0x5f,0x3f,0x17,0xff,0xf3,0xe1,0xff,0xff,0xff,0xff, -0xd8,0xff,0xfa,0xfe,0x67,0xff,0xfe,0xbf,0x5a,0xff,0xff,0xaf,0xf5,0xff,0xff,0xff, -0xf0,0x2f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xfd,0xff,0xf7,0xff,0xfd,0x4e,0x3d, -0x3f,0xe7,0x0b,0xbf,0x8f,0xf9,0xff,0xeb,0xe3,0xff,0xe1,0xff,0xff,0xfc,0xff,0xc7, -0x9f,0xff,0x3e,0x39,0xe5,0xff,0xcf,0x9b,0xf9,0xff,0xff,0xc5,0xff,0xff,0xfa,0xf0, -0x5f,0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00, -0x00,0x00,0x00,0x60,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x20,0x00,0x20, -0x00,0x01,0x10,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3c,0xf0,0x4f, -0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xbf, -0x3f,0xff,0xff,0xbf,0xff,0xff,0xff,0xfb,0xf1,0xff,0xff,0xff,0xff,0xf7,0xff,0xf7, -0xff,0xed,0xff,0xfb,0xfe,0xff,0x7f,0xff,0x7f,0xdf,0xff,0xff,0xdd,0xf0,0x3f,0xff, -0xff,0xff,0xfe,0xf0,0xff,0xff,0xf3,0xff,0xf7,0xff,0xfe,0x5f,0xff,0xf7,0xff,0xff, -0xdf,0xff,0xff,0xff,0xf7,0xfe,0x7b,0xf1,0xff,0xfd,0xfd,0xff,0xdf,0xdf,0xff,0x7d, -0x73,0xf9,0xff,0xc3,0x7e,0xfe,0xff,0xef,0xd7,0xff,0xcf,0xd0,0xf0,0x6f,0xff,0xff, -0xff,0xfe,0xf8,0x30,0x00,0x00,0x40,0x04,0x00,0x01,0x41,0x20,0x00,0x04,0x00,0x02, -0xd5,0x09,0x00,0x02,0x80,0x02,0x01,0x00,0x00,0x00,0x0a,0x04,0x00,0x07,0x00,0x01, -0x50,0x01,0x80,0x02,0x61,0x40,0x41,0x0c,0x14,0x08,0xc1,0xf0,0x9f,0xff,0xff,0xff, -0xfe,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xdf,0xcb,0x5f,0xfe,0xef,0xff,0xfe, -0xff,0x3f,0xff,0x7f,0xfd,0xc1,0xff,0xff,0x7f,0xff,0xdf,0xfd,0xfc,0xfd,0xf7,0xee, -0xff,0xff,0x4e,0xff,0xdf,0xcf,0xdb,0xeb,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0x7f, -0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff, -0xf7,0xfb,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0x7f,0xff,0xff,0xff,0x7f,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xdd,0xff, -0xff,0xff,0xa5,0xff,0x6f,0x6b,0xe9,0x6f,0xda,0xca,0xfb,0xdd,0xee,0xf7,0xf6,0xb2, -0xb3,0xa4,0xa1,0x5b,0x5b,0xf6,0xd7,0xf4,0xf7,0x7b,0xbd,0xbd,0xad,0xcf,0xef,0x7f, -0x6b,0x7f,0x3b,0xdf,0xdb,0xff,0xff,0x30,0xcf,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff, -0xff,0xff,0xff,0xf6,0xfe,0x96,0xff,0xfd,0xb5,0xfd,0xbf,0xad,0x7f,0xff,0x6f,0xff, -0xde,0xd1,0xad,0xad,0xe9,0xff,0xf1,0xec,0xef,0xde,0x3f,0xcb,0xff,0xf6,0xff,0x32, -0xff,0xc5,0xbd,0xff,0xff,0xff,0xd0,0xbf,0xff,0xff,0xff,0xfe,0xfe,0xfb,0xff,0xf4, -0x28,0xbf,0xff,0xfd,0xfb,0xd3,0xff,0xff,0x42,0xff,0xff,0xff,0xea,0xb3,0xfc,0xc3, -0xc1,0xff,0x33,0xff,0xc0,0x15,0x6b,0x70,0xff,0xf0,0xf2,0x4f,0xff,0xfc,0x3e,0x97, -0x3c,0xff,0xff,0xfd,0xef,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0x78, -0xbf,0xff,0xfd,0xf3,0xef,0x55,0xff,0x7e,0xff,0xff,0xff,0xea,0xb3,0xfc,0xc3,0xc1, -0xff,0x33,0xff,0xc0,0x15,0x6f,0xff,0x0f,0xf0,0xf0,0x0f,0xff,0xfc,0x3d,0x6b,0xc3, -0xff,0xff,0xfe,0xf7,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff, -0xff,0x23,0xf8,0x7f,0xff,0x4e,0xff,0xff,0xff,0xfb,0xf9,0x17,0xff,0xf6,0xf1,0xff, -0xcf,0xef,0xff,0xff,0x13,0xdf,0xe6,0x2f,0xc7,0xff,0xff,0xe7,0xc1,0xfd,0xff,0xfe, -0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xff,0xfe,0xae,0xff, -0xff,0x7f,0x3b,0x3f,0xfc,0x7f,0xfc,0xef,0xff,0xfc,0xe2,0x7b,0xff,0xf1,0xfd,0xed, -0xef,0xff,0xff,0x35,0x73,0xff,0xff,0xfe,0xfa,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff, -0xff,0xfa,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x80,0x00,0x00,0x40,0x00,0x00,0x00,0x0c,0x04,0x01,0x40,0x40,0x00, -0x00,0x30,0x28,0x04,0x00,0x08,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x00,0x00,0x00, -0x38,0xf0,0x0f,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xfb,0xff,0x7f, -0xff,0xff,0x9f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xdf,0xdf,0xff, -0xff,0xff,0xff,0xed,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xbf,0xbf,0xff,0xff,0xc3, -0xf0,0x3f,0xff,0xff,0xff,0xfe,0xf0,0xbf,0xfd,0xff,0xbf,0xff,0xff,0xfd,0xff,0xff, -0xff,0xff,0xff,0xfd,0x7b,0xff,0x7f,0xff,0xbd,0xff,0xf1,0xef,0xff,0xff,0xfd,0xdf, -0xfd,0xfb,0xff,0xff,0xbf,0xbe,0xff,0xcd,0x7f,0xfc,0xf7,0xf7,0x6f,0xbf,0xd8,0xf0, -0xef,0xff,0xff,0xff,0xfe,0xf8,0x30,0x00,0x00,0x00,0x04,0x00,0x00,0xa0,0x00,0x00, -0xc0,0x00,0x00,0x20,0x34,0x00,0x00,0x00,0x0c,0x81,0x00,0x20,0xa4,0x20,0x00,0x10, -0x08,0x04,0x48,0x08,0x00,0x40,0x93,0x00,0x10,0x00,0x38,0x18,0x20,0xc1,0xf0,0x3f, -0xff,0xff,0xff,0xfe,0xff,0xfb,0xff,0xff,0xb9,0xdf,0xfe,0xb3,0xff,0xff,0xe7,0xfd, -0xff,0xff,0x3b,0xff,0x7f,0xff,0xbf,0xff,0xc1,0xff,0xfc,0xff,0xff,0x3f,0x77,0xfe, -0xfe,0xcf,0xff,0xbf,0xfd,0xbf,0xff,0xfe,0xed,0xf2,0xfd,0xf7,0xff,0xf0,0x2f,0xff, -0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xbf,0xff,0xff, -0xff,0xfe,0xff,0xff,0xff,0xf3,0xad,0xcf,0xef,0x70,0xc9,0x73,0x3b,0xdf,0x5b,0x4a, -0xf6,0xb7,0xfe,0xd7,0xf5,0xbc,0xc1,0x33,0xca,0xd6,0xb7,0x6e,0xf7,0xfb,0xbd,0xc5, -0x24,0xcf,0x6f,0x2f,0x4d,0x2b,0xba,0x5a,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff, -0xfe,0xbf,0xff,0xff,0xff,0xff,0xf6,0xf6,0xd7,0xff,0xff,0xad,0xbd,0xff,0xff,0xff, -0xef,0xf7,0x7f,0xfc,0x5b,0xb1,0xfd,0xbd,0x75,0x6f,0xef,0x6a,0xfd,0x5b,0xfb,0xdb, -0x3a,0xbf,0x8e,0x9f,0xff,0xbf,0xfd,0xff,0x6f,0xff,0xd0,0x6f,0xff,0xff,0xff,0xfe, -0xff,0xbb,0xff,0xf0,0x3f,0xff,0xff,0xfd,0xfb,0x7f,0xde,0xff,0xff,0x5a,0xd6,0xbf, -0xd8,0x2a,0xbf,0xbf,0xf1,0xe5,0xff,0xcc,0xc0,0xa9,0x70,0xff,0xf3,0x3c,0x3c,0xfd, -0x57,0xfd,0x98,0x03,0x00,0xc3,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xff, -0xff,0xff,0xff,0x3d,0xbf,0xff,0xfd,0xfb,0xff,0xdb,0xff,0xff,0x0f,0xfc,0x3f,0xd8, -0x2a,0xbf,0xbf,0xf1,0xef,0xff,0xcc,0xc0,0x96,0xbe,0xff,0xf3,0x3f,0xff,0xfd,0x57, -0xfd,0x99,0x0f,0xff,0xc3,0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xff,0xff, -0xff,0xf1,0xe7,0xff,0xff,0xf3,0x8e,0x7b,0xff,0xa8,0xff,0xdf,0x7f,0x8e,0x78,0x73, -0xff,0xf1,0x51,0x62,0xff,0xfc,0x4b,0xff,0xf3,0xff,0x7e,0xcf,0xf9,0xff,0xfd,0xff, -0xff,0x7f,0xff,0xe0,0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff, -0xfb,0xfd,0xae,0xff,0xfc,0xfe,0x6f,0x3f,0xf8,0xfd,0x77,0xaf,0xfe,0x37,0xfe,0x7b, -0xff,0xb1,0x8c,0xff,0xef,0xfd,0xf8,0xe7,0xbf,0xff,0xf1,0xfe,0x3e,0xf7,0xfe,0x95, -0x3e,0xbf,0xff,0xff,0xff,0xfa,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00,0x00, -0x01,0x04,0x00,0x00,0x00,0x00,0x80,0x02,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x08, -0x41,0x80,0x10,0x00,0x00,0x08,0x10,0x84,0x00,0x0c,0x04,0x02,0x61,0x00,0x00,0x81, -0x00,0x00,0x00,0x00,0x3d,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff, -0xff,0xff,0x7f,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1, -0x7f,0xbf,0xf7,0x7f,0xef,0xff,0xef,0xff,0xf7,0xfd,0xff,0xff,0xfd,0x7f,0xff,0xbe, -0xdf,0xff,0xff,0xd9,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf0,0xbb,0xff,0x7f,0xfb,0xff, -0xfb,0xff,0xbf,0xff,0xf3,0x7f,0xfb,0xfd,0xeb,0x7f,0xdf,0xfa,0xff,0xde,0xf0,0xed, -0xff,0xb1,0xf7,0xf9,0x1f,0xb5,0x5b,0xfe,0x7e,0xf7,0xbe,0xfd,0x7f,0x5f,0xb5,0xf7, -0xff,0xff,0xd0,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf8,0x30,0x01,0x00,0x07,0x42,0x01, -0x00,0x6a,0x18,0x50,0x80,0x00,0x00,0x02,0x40,0x01,0x01,0x20,0x01,0x01,0x24,0x14, -0x21,0x10,0x02,0x08,0x07,0x08,0x00,0x40,0x10,0x80,0x58,0x00,0x84,0x80,0x18,0x10, -0x40,0xc1,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xff,0xdb,0xb7,0xf3, -0xdf,0x7c,0xf8,0x74,0xff,0xff,0x6f,0x7d,0x3f,0x7e,0xec,0x7f,0xc1,0xf5,0xff,0xcf, -0x6f,0x9f,0xf9,0xdf,0xbe,0xe5,0xe7,0xff,0xd7,0xf3,0xdd,0xfb,0xff,0xfc,0xff,0xbf, -0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xf0,0x2f,0xff,0xff,0xff,0xfe,0xd7,0xff,0xff,0xff,0xb4,0xcf,0xef,0x77,0x6f,0x73, -0x3a,0x4a,0x3a,0xcb,0xd4,0xf7,0x2e,0xd6,0xbd,0xbd,0xa1,0x3b,0xdf,0xd6,0xf7,0xee, -0xd3,0x35,0xbd,0xfb,0xbd,0xce,0xeb,0x2b,0x4d,0x2f,0xbb,0xda,0xff,0xff,0xfe,0xb0, -0x5f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdf,0x5f,0x36,0xaf,0x3f,0xed,0xb7, -0xf5,0xfd,0xf3,0x2b,0xef,0x77,0xff,0xfb,0xda,0xb1,0xbd,0xa3,0x77,0x69,0x7f,0x4f, -0xff,0xdb,0xfa,0x5b,0xff,0xf2,0xfe,0xff,0x96,0xff,0xff,0xfe,0xdf,0xff,0xd0,0xaf, -0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0x8f,0xfd,0x40,0x6f,0x9e,0x83,0x5a,0x0f, -0xfa,0xc3,0xff,0xff,0xfc,0xe9,0x7f,0xf3,0x01,0xd0,0x00,0xfe,0xbf,0xcd,0x3f,0xf0, -0xef,0xfc,0xc5,0x0c,0x3f,0xfd,0x68,0x0b,0xff,0xff,0xff,0xfe,0xdf,0xf0,0xff,0xff, -0xff,0xff,0xfe,0xff,0xbb,0xff,0xfd,0x85,0xff,0xd4,0x6f,0x9f,0xc3,0x5a,0x0f,0xff, -0xff,0xff,0xff,0xfc,0xe9,0x7f,0xf3,0x01,0xf0,0xfb,0xc2,0xbf,0xfc,0x00,0x37,0xef, -0xfc,0xcd,0xbc,0x3f,0xff,0x0c,0xbf,0xff,0xff,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff, -0xff,0xfe,0xff,0xff,0xff,0xff,0xd9,0xf7,0xd1,0xb7,0x7e,0x7f,0xf1,0xe4,0xfd,0xff, -0xfb,0xfb,0xff,0x5f,0xff,0x7f,0xb1,0xbc,0x0f,0x67,0xeb,0xb8,0x3f,0xff,0xe2,0xff, -0xe9,0xff,0xfd,0xe3,0xff,0x3f,0x9f,0xc2,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff, -0xfe,0xf5,0x7f,0xff,0xf0,0x3f,0xbc,0xff,0xd5,0xf5,0xce,0x3f,0xfe,0xff,0xfe,0x6d, -0xff,0xf1,0xbf,0x7b,0xff,0xf1,0xfd,0xff,0x4f,0xff,0x87,0xff,0xae,0xff,0xb1,0xf8, -0xfe,0xff,0xff,0x78,0x01,0xb9,0xff,0xff,0xff,0xfa,0xf0,0x2f,0xff,0xff,0xff,0xfe, -0xf3,0xc0,0x00,0x00,0x00,0x04,0x02,0x13,0x02,0x00,0x80,0x40,0x00,0x90,0x10,0x00, -0x10,0x00,0x02,0x00,0x01,0x20,0x80,0x12,0x10,0x00,0x40,0x08,0x00,0x04,0x00,0x00, -0x02,0x00,0x01,0x40,0x00,0x80,0x00,0x00,0x3c,0xf0,0xef,0xff,0xff,0xff,0xfe,0xfd, -0x1f,0xff,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0x7f,0xff,0x7f,0xf7,0xdf,0xf7,0xff, -0xf7,0xfb,0xeb,0xd1,0xff,0xff,0xff,0xff,0xef,0xf7,0xff,0xff,0xfb,0xff,0xfe,0xff, -0xff,0x7e,0xff,0xfb,0xff,0xff,0xff,0xdb,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf0,0xff, -0xff,0xb7,0xeb,0xf7,0xdf,0xff,0xfe,0xf5,0x6b,0xe7,0xed,0xf7,0x3e,0xec,0xff,0x54, -0xef,0x6f,0xf1,0xf5,0xaf,0x6f,0xf6,0xfd,0xff,0xdd,0x7b,0xff,0xef,0xbf,0x7f,0xff, -0xff,0xf7,0xff,0xf3,0x5f,0xf7,0xd0,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xf8,0x30,0x00, -0x80,0x40,0x04,0x00,0x81,0x2c,0x04,0x24,0x00,0x02,0x01,0xc8,0x02,0x00,0x02,0x24, -0x00,0x01,0xb4,0x42,0xdc,0x44,0x02,0x15,0x90,0x02,0x03,0x48,0x39,0x10,0x02,0x24, -0xa0,0xba,0x00,0x00,0x40,0xc1,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, -0xfe,0xfc,0xf7,0xf0,0xee,0xb6,0x5d,0xfd,0xf5,0xff,0xdb,0xf7,0x7f,0x7f,0xbe,0xff, -0xc1,0xfe,0xbf,0xfa,0xfa,0x5f,0xff,0xad,0xff,0xef,0xff,0x7f,0xdf,0x7f,0xfe,0xbf, -0xb7,0x94,0xbf,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xd7,0xff,0xff,0xfb,0xb5,0xff, -0xef,0x7c,0xeb,0x2b,0x52,0x5b,0x3b,0xda,0xd4,0xf3,0x36,0x96,0xb5,0xbd,0xf1,0xfb, -0xda,0xee,0xf6,0xfe,0xd3,0x35,0xbd,0xdf,0xad,0xcf,0xef,0x7e,0xcd,0x6b,0xbb,0xdf, -0xff,0xff,0xfd,0xb0,0xef,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xd3,0x5f,0xf6, -0xff,0xf6,0xff,0xfd,0xad,0xfd,0xff,0x7f,0xef,0xff,0x6f,0x7f,0xdb,0xf1,0xa5,0xa3, -0x7f,0x6f,0x6b,0x4f,0xff,0xdb,0xfb,0xcb,0xff,0xf6,0xff,0xf4,0xd7,0xfd,0xbf,0xfe, -0xdf,0xff,0xd0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xdf,0xff,0xff,0xff, -0x3f,0x7f,0xfc,0xe5,0xff,0x20,0xfe,0xff,0xff,0xdf,0x7f,0xff,0xf1,0x7f,0xff,0xfe, -0xff,0xf0,0x7c,0x3d,0x4f,0xf3,0xc3,0x3f,0xff,0xff,0x6f,0xc3,0xff,0x0f,0xff,0xff, -0xaf,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xb7,0xe0,0x0f,0xff,0xff,0x2b, -0xff,0x7d,0xbf,0xff,0xdf,0xff,0xff,0xf8,0x9f,0x7f,0xff,0xf1,0x55,0xff,0xff,0xff, -0xfd,0x7c,0x3c,0xff,0xf3,0xc3,0x3f,0xff,0xff,0xef,0xc3,0xff,0xdf,0xff,0xff,0xff, -0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xef,0xff,0xff,0x9f,0xbf,0x7f, -0xf9,0x19,0x47,0x8e,0xe7,0x9f,0x3f,0x17,0xff,0xfc,0x81,0xc1,0x7e,0xf3,0xd9,0xf9, -0x73,0xdf,0xf4,0x7f,0xfa,0xff,0xff,0xff,0xfb,0x7f,0x77,0xc7,0xff,0xff,0xff,0xf0, -0x2f,0xff,0xff,0xff,0xfe,0xf5,0xf7,0xff,0xfb,0xff,0xf7,0x3f,0xfc,0xbf,0x3e,0x3f, -0xec,0xff,0x81,0xaf,0xfe,0x4f,0xf3,0xbb,0xff,0xf0,0x7e,0xff,0x6f,0xff,0x87,0xff, -0xbb,0xff,0xd5,0xfc,0xff,0x7f,0xfc,0x6f,0xff,0xef,0xe7,0xff,0xff,0xfa,0xf0,0x3f, -0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80, -0x00,0x30,0x10,0x60,0x20,0x00,0x08,0x00,0x01,0x20,0x80,0x00,0x10,0x00,0x04,0x00, -0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x80,0x40,0x00,0x08,0x20,0x3c,0xf0,0x6f,0xff, -0xff,0xff,0xfe,0xf5,0xbf,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0x7f,0xfe,0x3f,0xff, -0xff,0xff,0xff,0xff,0xef,0xff,0xff,0xf1,0xdf,0xdf,0xff,0xff,0xff,0x7f,0xdf,0xff, -0xfd,0xbd,0xff,0xff,0xff,0xfb,0xdf,0xff,0xff,0xff,0xff,0x5b,0xf0,0xff,0xff,0xff, -0xff,0xfe,0xf0,0xbf,0xbf,0xbf,0xff,0xf7,0xfb,0xff,0xfe,0xee,0xfa,0xff,0xff,0xff, -0x3d,0x3b,0xff,0xff,0xfe,0xfb,0xf1,0xff,0xbf,0x7b,0xff,0xff,0xef,0xff,0xbf,0xff, -0xff,0xff,0xff,0xff,0xfe,0xff,0xf7,0xef,0xff,0xfb,0xd0,0xf0,0xdf,0xff,0xff,0xff, -0xfe,0xf8,0x30,0x00,0x00,0x00,0x00,0x00,0x0b,0x10,0x05,0x01,0x00,0x08,0x00,0x02, -0x01,0x01,0x00,0x00,0x10,0x01,0xc8,0x08,0x00,0x00,0x00,0x00,0x42,0x02,0x00,0x00, -0x00,0x80,0x02,0x00,0x00,0x40,0x24,0x80,0x00,0xc1,0xf0,0x3f,0xff,0xff,0xff,0xfe, -0xff,0xff,0xff,0xff,0xf7,0xfd,0xf7,0xfa,0xef,0xee,0xf9,0xfd,0xff,0xf7,0xfe,0xbf, -0x1f,0xfd,0x9e,0xfd,0xd1,0xef,0xff,0xf7,0x7f,0x9f,0xff,0xef,0xff,0xf6,0xff,0xfe, -0xfe,0x7b,0xff,0xbd,0xff,0x7e,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xff,0xff, -0xff,0xf7,0xff,0xff,0xff,0xff,0x7f,0xff,0xff,0xff,0xdf,0xfd,0xff,0xff,0xdf,0xff, -0xff,0x5f,0xf1,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xef,0xff, -0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x3f,0xfb,0xff,0xff,0xef,0xfb,0xfd, -0xff,0xf1,0xff,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xf7,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,0xff,0xe7,0xff, -0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcf,0xff,0xfb,0xff,0xfb,0xf1, -0xff,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7b,0xff,0xff,0xff,0x7f,0xff,0xf1,0xff, -0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xef,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0x57,0xff,0xfe,0xbf,0xfb,0xf1,0xff,0xff, -0xfd,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xd7,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xdb,0xff,0xdb,0xfd, -0xf6,0xff,0xf6,0xff,0x3c,0xbc,0xbc,0xbf,0xdf,0x6f,0xef,0x2f,0xf1,0x3c,0xbf,0xbc, -0xbf,0xdf,0x6f,0xff,0x6f,0xf7,0xdb,0xff,0xdb,0xfd,0xf6,0xff,0xf6,0xff,0xff,0xff, -0x01,0xe2,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff }; diff --git a/drivers/net/hamradio/yam9600.h b/drivers/net/hamradio/yam9600.h deleted file mode 100644 index 5ed1fe6ff43..00000000000 --- a/drivers/net/hamradio/yam9600.h +++ /dev/null @@ -1,343 +0,0 @@ -/* - * - * File yam111.mcs converted to h format by mcs2h - * - * (C) F6FBB 1998 - * - * Tue Aug 25 20:23:03 1998 - * - */ - -static unsigned char bits_9600[]= { -0xff,0xf2,0x00,0xa5,0xad,0xff,0xfe,0x9f,0xff,0xef,0xfb,0xcb,0xff,0xdb,0xfe,0xf2, -0xff,0xf6,0xff,0x9c,0xbf,0xfd,0xbf,0xef,0x2e,0x3f,0x6f,0xf1,0xfd,0xb4,0xfd,0xbf, -0xff,0x6f,0xff,0x6f,0xff,0x0b,0xff,0xdb,0xff,0xf2,0xff,0xf6,0xff,0xff,0xff,0xff, -0xf0,0x6f,0xff,0xff,0xff,0xfe,0xff,0xfd,0xdf,0xff,0xff,0xff,0xf7,0xff,0xff,0xff, -0xfb,0xff,0xff,0xf7,0xff,0xff,0xff,0xfe,0xff,0x7f,0xf1,0xff,0xfe,0xff,0xbf,0xbf, -0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xfe,0xff,0xfe,0xff,0xff,0xff,0xf0, -0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,0xf7, -0xff,0xff,0xf7,0xef,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0x7e,0xff,0xff, -0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xf0,0xdf, -0xff,0xff,0xff,0xfe,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xef,0xff,0xf3,0xfb,0xfe,0xff,0xf1,0xff,0xfd,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xdf,0xff,0xf0,0x7f,0xff, -0xff,0xff,0xfe,0xff,0xff,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xdf,0xff,0xff,0xff,0xf7,0xf1,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xf0,0x0f,0xff,0xff, -0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf5, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff, -0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0xff,0xef,0xff,0x7f,0xff,0xef, -0xff,0xef,0xff,0x7f,0xef,0xf1,0xff,0xef,0xff,0x7f,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xff, -0xff,0xff,0xff,0xbd,0xff,0xef,0x7f,0xef,0x7f,0xfb,0xdf,0xd3,0x5a,0xfe,0xd7,0xd6, -0xf7,0x7f,0xbd,0xf1,0xbb,0x5d,0xd6,0xf7,0xfe,0x96,0xff,0xbd,0xaf,0xad,0xbf,0xef, -0x7f,0x6b,0x7f,0xfb,0xd6,0xfe,0xf7,0xff,0x10,0xef,0xff,0xff,0xff,0xfe,0xbe,0xef, -0xff,0xff,0xdb,0xff,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfd,0xbf,0xff,0x7f,0xff,0x7f, -0xdf,0xdb,0xf1,0xfd,0x35,0xff,0x6f,0xff,0x6f,0xff,0xdb,0xff,0xcb,0xff,0xf6,0xff, -0xf2,0xfd,0xfd,0xbf,0xff,0xff,0xff,0xd0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x55,0xff,0xcc,0xc0,0x3f,0xff, -0xff,0xf1,0x24,0xf0,0xff,0xff,0xcf,0xef,0x3f,0xff,0xf0,0xff,0xff,0xff,0xfc,0x3f, -0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x55,0xff,0xcc,0xc0,0x3f,0xff,0xff, -0xf1,0x00,0xf0,0xff,0xff,0xcf,0xdf,0xff,0xff,0xf0,0xff,0xff,0xff,0xfc,0x3f,0xff, -0xff,0xff,0x7d,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xfe,0x7f,0xdf,0xff,0xff,0xff,0xf1, -0xff,0xcf,0xff,0xf3,0xff,0x97,0xff,0xff,0x8f,0xe7,0xff,0xff,0xfc,0x71,0xff,0xff, -0xff,0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xf5,0xff,0xbf,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xf7,0xef,0xff,0xff,0xfc,0x7b,0xff,0xf1,0x3f, -0xff,0xef,0xff,0xcf,0xe3,0xe3,0xff,0xff,0xff,0xff,0x3f,0xff,0xff,0xff,0xbf,0xff, -0xbf,0xff,0xda,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf2,0xc0,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00, -0x01,0x3c,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0x9f,0xff, -0xff,0xff,0xf7,0xff,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xdb,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf0,0xbb,0xdf,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xfb,0xdf,0xbf,0xf1,0xfe,0xfd,0xf7,0xff, -0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x77,0xfd,0xf2, -0xf0,0x1f,0xff,0xff,0xff,0xfe,0xf8,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03, -0x00,0x00,0x00,0x02,0x00,0x90,0x00,0x00,0x00,0x0c,0x01,0x00,0x00,0x04,0x24,0x00, -0x40,0x01,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x01,0xc0,0xf0, -0x4f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xbf,0xff,0xff,0x6f,0xff,0xdf,0xff,0xd1,0xff,0xfe,0xff,0xff,0xff,0xff, -0xff,0xff,0xdf,0xff,0xfb,0xff,0xfb,0xef,0xff,0xff,0xee,0xff,0xff,0x7f,0xf0,0xdf, -0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x8f,0xff, -0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0xad,0xff,0x69,0x2a,0xed,0x6b,0xfb,0xdf,0x3a, -0xdc,0xf4,0x96,0xee,0xb3,0x3d,0x35,0xc1,0xbb,0xdd,0xfe,0xf6,0xfe,0xd6,0xb5,0xad, -0xbf,0xa5,0xad,0x49,0x2f,0x4f,0x2b,0xda,0x5f,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff, -0xff,0xfe,0xbf,0xff,0xff,0xfb,0x5b,0xf7,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfd,0xa5, -0xf3,0x6f,0xf3,0x6e,0xfa,0x7b,0xd1,0xfd,0xb5,0x77,0x6f,0xe9,0x6f,0xff,0xdb,0xfb, -0xdb,0xdf,0xf6,0xff,0xf6,0xff,0xfd,0x3f,0xfe,0xf7,0xff,0xd0,0x4f,0xff,0xff,0xff, -0xfe,0xff,0x9f,0xff,0xff,0x0f,0xff,0xc0,0x3f,0x9c,0x03,0xff,0xff,0x8b,0xa5,0xfe, -0x80,0x3e,0xc2,0xbf,0xac,0xb1,0x24,0xff,0xff,0xff,0xff,0xff,0xff,0x0f,0xff,0xa3, -0xff,0xfd,0x6b,0xff,0xff,0xf0,0xa5,0xff,0xff,0xff,0xf0,0xaf,0xff,0xff,0xff,0xfe, -0xff,0xff,0xff,0xff,0x0f,0xff,0xc0,0x3f,0xd4,0x6b,0xff,0xff,0xdb,0xff,0xfe,0x86, -0xbf,0xc2,0xbf,0x30,0xa1,0x24,0xff,0xff,0xff,0xff,0xcc,0xff,0x0f,0xff,0xa3,0xff, -0x05,0x6b,0xff,0xff,0xf0,0xa5,0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff, -0xff,0xff,0xfb,0xc7,0xff,0xc4,0xff,0xff,0x7f,0xff,0xec,0xfe,0x7f,0xdf,0xd8,0xb9, -0x47,0xfc,0x36,0xc1,0xdf,0xff,0xff,0xf9,0xff,0xf3,0xff,0xf7,0xff,0xfc,0xff,0xfd, -0x3f,0xff,0xff,0xff,0x3f,0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf5,0xff, -0xff,0xff,0xff,0xfe,0xff,0xff,0x7e,0xbd,0x3f,0xff,0x2b,0xfe,0x2f,0xf5,0xa3,0xfc, -0x5b,0xfe,0x61,0x9f,0x7f,0xef,0xff,0xff,0xa7,0xfb,0xff,0xff,0xfa,0xfe,0xff,0x33, -0xf1,0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xf1,0xc0,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x30,0x24,0x04, -0x00,0x01,0x00,0x80,0x40,0x00,0x08,0x00,0x00,0x00,0x02,0x01,0x01,0x00,0x02,0x00, -0x00,0x00,0x00,0x00,0x01,0x3d,0xf0,0x2f,0xff,0xff,0xff,0xfe,0xfd,0xbd,0xff,0xfd, -0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0x7f,0xf6,0xef,0xbf,0xf7,0xff,0x73,0xeb, -0xf1,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xff,0xf9,0xff,0xfd,0xfe,0xff,0xff, -0xff,0xff,0xff,0xff,0xd9,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xf0,0xbf,0x7f,0xff,0xff, -0xff,0x7f,0xff,0xff,0xde,0xff,0xff,0xef,0xdd,0xde,0x77,0xf2,0xfb,0xed,0xe7,0xf1, -0x73,0xfd,0xfd,0xdf,0xff,0x7d,0xbe,0xdf,0xff,0xfb,0xff,0xef,0xff,0xef,0xff,0xff, -0xff,0xff,0xff,0xd0,0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf8,0x30,0x20,0x02,0x00,0x22, -0x40,0xc0,0x00,0x00,0x00,0x08,0x00,0x02,0x41,0x02,0x12,0x00,0x21,0x87,0x81,0x00, -0x00,0x80,0x04,0x0b,0x28,0x01,0xb0,0x00,0x82,0x00,0x40,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0xc1,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xfd,0xff, -0xf7,0xff,0xfe,0x7f,0xed,0x79,0xff,0xde,0xeb,0x7f,0x74,0xf7,0xf7,0xe1,0xf9,0xff, -0xf6,0x5f,0x7f,0xff,0xff,0xff,0xd7,0xdb,0xef,0xff,0xbb,0xff,0xff,0xff,0xcc,0xff, -0xff,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xf0,0x0f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0x3d,0xcd,0x49,0x7f,0x6f, -0x2b,0xba,0x5c,0xd2,0xda,0xf6,0xf3,0x3e,0xf7,0xff,0xbd,0xf1,0xfa,0xdf,0xfe,0xf7, -0xcc,0xf6,0xbb,0xa5,0xb3,0xad,0xbf,0x6f,0x7d,0x6f,0x6b,0xdb,0xdf,0xbd,0xff,0xfe, -0xb0,0x5f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xfb,0xdb,0x57,0xf6,0xfe,0x9f,0xd5, -0xb7,0xff,0xaf,0xe5,0x3f,0xff,0xff,0x6f,0xff,0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0x69, -0x6c,0xdf,0xda,0xdf,0xcb,0xff,0xf6,0xff,0x76,0xfd,0xfd,0xbf,0xff,0xff,0xff,0xd0, -0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfd,0xbd,0x08,0x03,0x89,0x4f,0x5a, -0x0f,0xf0,0xff,0xf8,0xbf,0xff,0xff,0xff,0xff,0xf1,0x5a,0xff,0xff,0xff,0xff,0xf3, -0xfa,0xa0,0xf0,0xf2,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xff, -0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xfd,0x00,0x6b,0xff,0xff,0x5a,0x0f, -0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0x5a,0xff,0xff,0xff,0xff,0xb3,0xf5, -0x50,0xf0,0xf0,0xff,0xff,0xff,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x7f,0xff, -0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xbc,0xff,0xe4,0xe7,0x71,0xff,0xf9,0xc4,0xf4, -0x7f,0x7f,0xcf,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xfb,0xf7,0x73,0xbf,0x14, -0xff,0xe6,0xff,0xff,0xe1,0x7d,0xff,0xff,0xe7,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff, -0xff,0xfe,0xf5,0xff,0xff,0xfe,0xd2,0xfa,0xff,0xc4,0xf4,0x5c,0xbf,0xfa,0xff,0xff, -0xec,0x7e,0xbf,0xff,0xff,0xff,0xf1,0xff,0xff,0xef,0xff,0xff,0x6b,0xdb,0xff,0xdf, -0xf9,0xfb,0xbf,0xff,0xf1,0xff,0xbf,0xff,0xff,0xff,0xfb,0xf0,0xbf,0xff,0xff,0xff, -0xfe,0xf3,0xc0,0x00,0x02,0x00,0x00,0x00,0x00,0x82,0x00,0x00,0x00,0x00,0x80,0x00, -0x00,0x00,0x00,0x40,0x00,0x01,0x00,0x00,0x00,0x01,0x08,0x20,0x00,0x00,0x00,0x00, -0x01,0x00,0x01,0x00,0x00,0x80,0x02,0x00,0x01,0x3c,0xf0,0x5f,0xff,0xff,0xff,0xfe, -0xfd,0xbf,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0x7f,0xff,0xdf,0xff,0xef,0xff, -0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xf7,0xff,0xfb,0xff,0xfd,0xff, -0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xc3,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xf0, -0xff,0xdf,0xff,0xff,0xf7,0x23,0xff,0xff,0xfd,0xff,0xef,0xff,0xfe,0x7f,0x7d,0xf7, -0xfe,0xff,0x7f,0x71,0xff,0xfb,0x7f,0xff,0xff,0xff,0x6e,0xfd,0xf7,0xfd,0xff,0xbf, -0xff,0xbf,0xf9,0xfd,0xff,0xdf,0xef,0xf0,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xf8,0x30, -0x40,0x01,0x00,0x83,0x00,0x00,0x00,0x0c,0x06,0x08,0x04,0x26,0x26,0x00,0x00,0x06, -0x03,0x00,0x01,0x00,0x00,0x00,0x00,0x04,0x00,0x70,0x08,0x80,0x00,0x20,0x01,0x20, -0x00,0x02,0x00,0x30,0x00,0x00,0xc1,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff, -0xff,0xff,0x7b,0x3f,0xf7,0xff,0xd7,0xfe,0xfe,0xfb,0xfe,0x3b,0xfe,0xbd,0xff,0x2f, -0xff,0x71,0xff,0xfb,0x7f,0xe7,0xff,0xf9,0xef,0xff,0xd7,0xfa,0xff,0xb7,0xbb,0xfe, -0xff,0xff,0x74,0xff,0xf7,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xb5, -0xbd,0x6f,0x7c,0xeb,0x7f,0xfb,0xdb,0xd3,0x4b,0xee,0xd6,0xf6,0xb7,0xfd,0xac,0xa1, -0xfb,0xdf,0xfe,0xf7,0xf4,0x96,0xbd,0xb4,0xc5,0xa5,0xaf,0x6f,0x69,0x4f,0x7f,0xba, -0xdb,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff, -0xf6,0xff,0xf6,0xff,0xbd,0xbf,0xa5,0xbf,0xff,0x7d,0x7f,0xef,0xff,0xfb,0xf1,0xfd, -0xbf,0xff,0x6f,0xff,0x6b,0x7a,0xdb,0xff,0xdb,0xdf,0xf6,0xfe,0xb6,0xfd,0xfd,0xbf, -0xfe,0xf7,0xff,0xd0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xf4,0x2f,0xff, -0xfc,0x43,0x6b,0xff,0xff,0xff,0x0d,0xff,0xfc,0x33,0x3f,0xf0,0x5f,0xf1,0xff,0xff, -0xff,0xff,0xf9,0xde,0xf0,0x4c,0xfe,0x77,0xaf,0xff,0xff,0xef,0xff,0xf0,0xff,0xdb, -0xff,0x5f,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xfe,0xf7,0xff,0xf0,0x2f,0xff,0xfd, -0x43,0x7f,0xff,0xff,0xf1,0x0f,0xff,0xfc,0x33,0x3f,0xff,0xaf,0xf1,0xff,0xff,0xff, -0xff,0xf6,0xd7,0xff,0xbc,0xfd,0xbd,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff, -0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xfb,0xf1, -0xbf,0xff,0xf9,0xfd,0xcf,0xf2,0x70,0xff,0x1f,0x9f,0xf3,0xf1,0xff,0xff,0xff,0xff, -0xfc,0xf7,0xff,0x13,0x9f,0xfc,0xff,0xff,0x84,0xf7,0xff,0xff,0x47,0xff,0xff,0xff, -0xf0,0xbf,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xff,0xf1,0xfc,0xff,0xfe,0xfe,0x79, -0x3f,0xff,0x1d,0x46,0xcf,0xff,0xcf,0xfc,0x7b,0xff,0xf1,0xff,0xff,0xff,0xff,0xed, -0xf3,0xab,0xff,0xcb,0xff,0xf8,0xff,0xfc,0xf5,0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0, -0x8f,0xff,0xff,0xff,0xfe,0xf3,0xc2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00, -0x00,0x00,0x20,0x00,0x20,0x00,0x00,0x04,0x08,0x01,0x00,0x00,0x00,0x00,0x00,0x20, -0x0c,0x00,0x00,0x04,0x01,0x00,0x01,0x00,0x00,0x80,0x00,0x00,0x01,0x3c,0xf0,0x7f, -0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xfe,0xff, -0xdf,0xff,0xff,0xf7,0xff,0xff,0xff,0xef,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xeb, -0xff,0xdf,0xff,0xff,0xfb,0xf7,0x7f,0xff,0xfe,0xff,0xff,0xbf,0xdb,0xf0,0xff,0xff, -0xff,0xff,0xfe,0xf0,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0x7f,0xf7,0xff, -0xbf,0xbf,0xcf,0xff,0xff,0xff,0x3e,0xf1,0x7f,0xff,0xff,0xef,0xff,0xff,0xff,0xfe, -0xff,0xfd,0xff,0xbf,0xbd,0xfe,0xff,0xfb,0xf7,0xdf,0xfb,0xd0,0xf0,0x9f,0xff,0xff, -0xff,0xfe,0xf8,0x30,0x20,0x00,0x40,0x01,0x80,0xc0,0x30,0x00,0x00,0x20,0x00,0x10, -0x50,0x88,0x20,0x00,0x00,0x13,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x00, -0x00,0x00,0x01,0x80,0x08,0x00,0x00,0xa0,0x00,0x10,0xc1,0xf0,0xef,0xff,0xff,0xff, -0xfe,0xfd,0xef,0x7f,0xff,0xff,0xbf,0xff,0xf7,0xff,0xef,0xfb,0xfd,0x77,0xef,0xbf, -0xf7,0x7f,0xff,0xff,0xbf,0xd1,0x7f,0xff,0xff,0xf7,0xff,0xff,0xff,0xff,0xaf,0xff, -0xdf,0xf7,0xfb,0xff,0xfd,0xff,0xfc,0xff,0xfd,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff, -0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0x3f,0xff,0xff,0xff,0xfe,0xdd,0xff, -0xff,0xff,0xa5,0xfd,0x6f,0x7d,0x6d,0x7f,0x52,0xdf,0x5a,0x4b,0xee,0xb6,0xee,0xf2, -0xbb,0xac,0xa1,0x5b,0x4d,0xd6,0xf7,0xfe,0xb2,0xbd,0x35,0xb5,0xb5,0xdd,0x6f,0x7f, -0xe9,0x5f,0x52,0xdf,0xbd,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff, -0xff,0xdb,0xfe,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfd,0xb5,0xbf,0xf9,0x7f,0x6f,0xff, -0xdb,0xf1,0xfd,0xbf,0xff,0x6f,0xff,0x69,0x7f,0xdb,0xff,0xd3,0xff,0xf6,0xfe,0xf2, -0xff,0xad,0xbf,0xff,0xff,0xff,0xd0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5, -0x30,0x0f,0xff,0xff,0xfd,0x6b,0xca,0xff,0xf0,0x0f,0xd6,0xbf,0xcf,0x3f,0xff,0xff, -0xf1,0xff,0xff,0xff,0xca,0xfe,0xbf,0xff,0xf0,0x05,0xaf,0x0f,0xff,0xfc,0xf0,0xcf, -0xf0,0xff,0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0x30, -0x0f,0xff,0xff,0xfc,0x3f,0xca,0xff,0x0f,0x0f,0xd6,0xbf,0xff,0xff,0xf5,0x5f,0xf1, -0xff,0x8b,0xff,0xc3,0xff,0xff,0xff,0xff,0xff,0xff,0x0f,0xff,0xfc,0xf0,0xcf,0xf0, -0xff,0xff,0xff,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xcf,0xff, -0xff,0xbf,0x9f,0x3f,0xfe,0xfc,0xff,0x4f,0xff,0xff,0xff,0xff,0xff,0xf7,0xf1,0xff, -0xdf,0xfe,0x7e,0x3f,0x9f,0xf4,0xfc,0x7f,0xfc,0xff,0xff,0x3f,0xff,0x3f,0xfe,0x3f, -0xff,0xff,0xff,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xfb,0xff,0xfe,0xff, -0xff,0xff,0xff,0xbf,0xfb,0xff,0xf8,0xed,0xff,0x8f,0xff,0xbb,0xff,0xb1,0xf3,0xef, -0x8f,0xf7,0xff,0xff,0xdb,0xff,0xff,0xff,0xef,0xbf,0xfd,0x79,0xbf,0xbf,0xff,0xff, -0xff,0xfb,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x00,0x04,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0x04,0x08,0x08,0x01,0x01,0x00,0x90, -0x00,0x00,0x00,0x04,0x00,0x08,0x00,0x00,0x00,0x00,0x08,0x00,0x04,0x00,0x00,0x01, -0x3c,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0x9f,0xff,0xaf,0xdf,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff, -0xbf,0xef,0xff,0xff,0xff,0xed,0xff,0xff,0xff,0xef,0xff,0xbf,0xff,0xff,0xff,0xc3, -0xf0,0x3f,0xff,0xff,0xff,0xfe,0xf0,0xff,0xfd,0xff,0xff,0xff,0xfb,0xff,0xbb,0xff, -0xff,0xff,0x7f,0xf6,0xff,0x7f,0xfb,0xfd,0xed,0xff,0xf1,0xff,0xfe,0x7f,0xff,0xff, -0xff,0x5f,0xff,0xf7,0xff,0x7e,0xff,0xfd,0xff,0xef,0xff,0xff,0xff,0xef,0xf0,0xf0, -0x8f,0xff,0xff,0xff,0xfe,0xf8,0x30,0x80,0x00,0x04,0x00,0x00,0x40,0x02,0x00,0x03, -0x00,0x05,0x04,0x20,0x00,0x00,0x01,0xd0,0x00,0x81,0x00,0x20,0x04,0x04,0x00,0x00, -0x81,0x04,0x08,0x80,0x10,0x00,0xc0,0x00,0x00,0x00,0x20,0x00,0x08,0xc1,0xf0,0x6f, -0xff,0xff,0xff,0xfe,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0xf3,0xfd,0xff,0xed,0xfc, -0xff,0xff,0x9f,0xfb,0xfd,0xff,0xff,0xff,0xf1,0xff,0xff,0x7f,0xfb,0x3e,0xff,0x9f, -0xff,0xff,0xff,0xff,0xfd,0xf9,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xf0,0x6f,0xff, -0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xcf,0xff,0xff, -0xff,0xfe,0xff,0xff,0xff,0xfd,0xbd,0xff,0xef,0x7c,0xeb,0x7f,0xfb,0xdb,0xfa,0xdc, -0xee,0xf7,0xf6,0xd7,0xf5,0x2d,0xa1,0xbb,0xdd,0xee,0xf7,0x54,0xf7,0xfb,0x2c,0xb5, -0xb4,0xbd,0x6b,0x6f,0xef,0x6f,0xbb,0xdf,0xff,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff, -0xfe,0xbf,0xff,0xff,0xff,0xfb,0xff,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xff,0xbf,0xef, -0x6f,0xff,0x6f,0xfa,0xdb,0xf1,0xc5,0xbd,0xf5,0x6f,0xff,0x6f,0xca,0xdb,0xff,0xdb, -0xfb,0xf6,0x97,0xf6,0xff,0xfd,0xbf,0xfe,0xf7,0xff,0xd0,0x9f,0xff,0xff,0xff,0xfe, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8b,0x7f,0xff,0xff,0xe7,0x63,0xff,0xff, -0xff,0xfc,0x77,0xdf,0xf1,0xdb,0xff,0xd6,0xa8,0x3f,0xff,0xff,0x08,0x2f,0xf0,0xff, -0xc3,0xff,0xeb,0xff,0xff,0xff,0xff,0xff,0x5f,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8b,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xfc,0xff,0xcf,0xf1,0xdb,0xff,0xd6,0xa8,0x3f,0xff,0xff,0x08,0x2f,0xf0,0xff,0xc3, -0xff,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xbf,0xff,0xca,0xff,0x9f,0xff,0xfa,0xb9,0xe7, -0x9f,0xf3,0x81,0xff,0xff,0xfc,0x73,0xd7,0xff,0xff,0x77,0xff,0xfd,0xff,0xfc,0xff, -0xff,0xff,0xff,0xcf,0xff,0xff,0xff,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff, -0xff,0xf7,0xde,0xff,0xfe,0x7e,0xff,0xbf,0xff,0xbf,0xf1,0xb3,0xff,0xff,0xe3,0xfb, -0xff,0xe1,0x1f,0x7f,0xff,0xf8,0x78,0xff,0xfb,0x1e,0xff,0xf7,0xfe,0xe7,0xff,0xff, -0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x50,0x00,0x00,0x00,0x04,0x00, -0x01,0x80,0x40,0x40,0x20,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00, -0x80,0x00,0x00,0x01,0x3c,0xf0,0xaf,0xff,0xff,0xff,0xfe,0xfd,0xbf,0xff,0xfb,0xff, -0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xf7,0xf1, -0xfd,0xff,0xff,0xff,0xdf,0xff,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff, -0xff,0xff,0xff,0xdb,0xf0,0x8f,0xff,0xff,0xff,0xfe,0xf0,0xff,0xdf,0xff,0xff,0x7f, -0xff,0xff,0xff,0xbe,0xd7,0xff,0xed,0xbd,0x7e,0xbf,0xfe,0xf6,0x7f,0xbf,0x71,0xff, -0xff,0xda,0xff,0xf9,0xff,0xbf,0x7f,0xfe,0xff,0x6f,0x7f,0xff,0xff,0xff,0xff,0xff, -0x7f,0xff,0xd0,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xf8,0x30,0x42,0x00,0x00,0x00,0x00, -0x80,0xc1,0x00,0x00,0x90,0x00,0xc4,0x00,0x00,0x12,0x20,0x43,0x22,0x81,0x84,0x00, -0x00,0x14,0x00,0x01,0x00,0x08,0x80,0x00,0x02,0x00,0x02,0x00,0x04,0x02,0x00,0x00, -0x10,0xc1,0xf0,0x1f,0xff,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xdd,0xfe,0xff, -0xb6,0x76,0xe5,0xbc,0xf9,0xf7,0xaf,0x5f,0xbf,0xfc,0xdf,0xcf,0xf1,0xff,0xef,0x79, -0xff,0xbd,0xff,0xef,0xff,0xff,0xf7,0x6f,0x5f,0xff,0xff,0xfd,0xef,0xef,0xbf,0xff, -0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xf0,0xff,0xff,0xff,0xff,0xfe,0xdb,0xff,0xff,0xfd,0x2d,0xff,0x69,0x2a,0xef,0x77, -0xbb,0xdd,0x5a,0xdf,0xf6,0xf6,0xd6,0xf7,0x7d,0xbd,0xd1,0xb2,0x4a,0xd6,0xb2,0xbe, -0x97,0xf5,0xbd,0xb3,0xad,0xff,0xef,0x7f,0x69,0x6b,0xfb,0xdf,0xff,0xff,0xff,0xf0, -0x2f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6,0xfe,0x9f,0xd4,0xbf, -0xed,0xaf,0xff,0x6b,0x6f,0xf7,0xff,0xdd,0xdb,0x31,0xfd,0xbf,0xff,0x6f,0x7f,0xff, -0xff,0xdb,0xff,0xcb,0xdf,0xf6,0xff,0xf6,0xff,0xfd,0xbf,0xfe,0xf7,0xff,0xd0,0x8f, -0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0x1f,0xff,0x46,0x2f,0x9f,0xff,0xff,0xff, -0xa5,0xff,0xff,0xff,0xdf,0xb7,0xff,0xff,0xf1,0xff,0xff,0xff,0xf7,0xe9,0x6a,0xbf, -0xff,0xff,0xfd,0xff,0xff,0xfd,0x55,0x57,0xff,0xff,0xff,0xff,0xaf,0xf0,0x4f,0xff, -0xff,0xff,0xfe,0xfe,0xdf,0xff,0xfd,0x1f,0xff,0x46,0x2f,0x9f,0xff,0xff,0xff,0xa5, -0xff,0xff,0xff,0xc0,0x37,0xff,0xff,0xf1,0x99,0x8e,0xdc,0x7f,0xe9,0x6a,0xbf,0xff, -0xf0,0x0f,0xff,0xff,0xfd,0x55,0x57,0xff,0xff,0xff,0xff,0xff,0xf0,0x0f,0xff,0xff, -0xff,0xfe,0xff,0xff,0xff,0xff,0x07,0xff,0xc0,0xbe,0xff,0xff,0xcf,0xef,0x9f,0xff, -0xff,0xfb,0xff,0xe7,0xff,0xff,0xa1,0xe3,0xce,0x3c,0x58,0x3f,0xf3,0xff,0xfd,0xef, -0xf9,0xff,0xff,0xf7,0xf1,0x7f,0xff,0xcb,0xff,0xff,0xff,0xf0,0x2f,0xff,0xff,0xff, -0xfe,0xf5,0x7f,0xff,0xf0,0xff,0xfe,0xff,0xc4,0x75,0xe7,0xb9,0xff,0xff,0xff,0xef, -0xff,0xc7,0x37,0x3b,0xff,0xf0,0x13,0x9e,0x0f,0xf4,0xff,0xfe,0xfb,0xff,0xff,0xf9, -0xfc,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,0xfa,0xf0,0xef,0xff,0xff,0xff,0xfe, -0xf3,0xc0,0x01,0x00,0x00,0x02,0x00,0x02,0x22,0x00,0x00,0xc0,0x40,0x00,0x40,0x00, -0x04,0x08,0x04,0x0a,0x01,0x01,0x10,0x20,0x20,0x00,0x00,0x04,0x08,0x08,0x04,0x00, -0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x01,0x3c,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xfd, -0x3f,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,0xff,0x7f,0xff,0x7f,0xff,0xcf,0x9d,0xff, -0xff,0xf7,0xfd,0xf1,0xff,0xff,0xff,0xee,0xbf,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xf0,0xff, -0xff,0xff,0xf7,0xf7,0xff,0xff,0xfe,0xbf,0xf7,0xff,0xff,0x5b,0xff,0xbf,0xf7,0xff, -0xfd,0x7f,0x71,0xfd,0xff,0xed,0xf7,0xfe,0xef,0xff,0xff,0x7f,0xff,0xff,0xff,0xff, -0xff,0xff,0xef,0xff,0x7f,0xff,0xd0,0xf0,0xff,0xff,0xff,0xff,0xfe,0xf8,0x30,0x11, -0x00,0x48,0x60,0x40,0x82,0x60,0x24,0x60,0x00,0xcc,0x00,0x80,0x04,0x01,0x00,0x00, -0x14,0x01,0x0c,0x04,0x00,0x30,0x00,0x00,0x00,0x08,0x08,0x00,0x01,0x00,0xc2,0x00, -0x00,0x02,0x00,0x80,0x00,0xc1,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, -0xf7,0x7b,0xff,0xf3,0xeb,0xbf,0xff,0xf7,0xff,0xff,0xff,0xe7,0x5d,0x3f,0xff,0xf6, -0xd1,0xfd,0xff,0xeb,0xf7,0x3d,0xff,0xff,0xff,0x5f,0xff,0x7f,0x7f,0xf3,0xff,0xff, -0xef,0xfd,0xbf,0xff,0xff,0xf0,0x5f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xf0,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0xb5,0xdf, -0x6f,0x7d,0x69,0x7f,0xfb,0xdf,0x52,0x5f,0xf6,0xf7,0xfe,0xf6,0xf3,0xbd,0xb1,0xda, -0xcd,0xfe,0xf6,0xee,0xd2,0xbd,0xa5,0xaf,0xbd,0xff,0x6f,0x7c,0xeb,0x2b,0xfa,0xda, -0xff,0xfe,0xdf,0xf0,0x4f,0xff,0xff,0xff,0xfe,0xbf,0xff,0xff,0xff,0xdb,0xff,0xf6, -0xff,0xf6,0xff,0xbd,0xbf,0xcd,0xbf,0xeb,0x6f,0xf7,0x6f,0xdf,0xdb,0x51,0xfd,0xbd, -0xff,0x6f,0xff,0x6f,0xfb,0x5b,0xff,0xdb,0xff,0xf6,0xfe,0xf6,0xfd,0xfd,0xbf,0xfe, -0xf7,0xff,0xd0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfa,0x50,0xff,0xff,0xff, -0xf0,0x6f,0xff,0xff,0xf0,0x96,0xff,0xff,0xc6,0x2b,0xff,0xff,0xf1,0xfc,0xff,0xff, -0xf7,0xdb,0xc3,0xff,0x00,0xff,0xff,0xff,0xff,0xff,0xc1,0x4f,0xc3,0xff,0xff,0xff, -0xaf,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf5,0xa0,0xff,0xff,0xff,0xf0, -0x6f,0xff,0xff,0xf0,0x96,0xff,0xff,0xc6,0x2b,0xff,0xff,0xf1,0x5a,0xff,0xff,0xff, -0xf3,0xc3,0xff,0x00,0xff,0xff,0xff,0xff,0xff,0xc1,0x4f,0xc3,0xff,0xff,0xff,0xff, -0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0x9f,0xf0,0x7f, -0xff,0xf9,0xfc,0x4f,0xf3,0xff,0x27,0xeb,0xff,0xfc,0x81,0xfc,0x7f,0xfe,0x7b,0xff, -0xf7,0xff,0x12,0x7f,0xff,0xff,0xff,0xff,0x18,0xff,0xff,0xff,0xff,0xff,0xff,0xf0, -0x7f,0xff,0xff,0xff,0xfe,0xf5,0xff,0xff,0xff,0xdf,0xfe,0xff,0xfc,0x7e,0x7f,0xbf, -0xff,0xff,0xaf,0xef,0xff,0xdf,0xdf,0xfb,0xff,0xf1,0xc3,0xfe,0x6f,0xf1,0xcf,0x3f, -0xfb,0xff,0xff,0xcf,0xfe,0xff,0xff,0xfe,0x7f,0xbf,0xff,0xff,0xbf,0xfa,0xf0,0xdf, -0xff,0xff,0xff,0xfe,0xf3,0xc0,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x01,0x00,0x00, -0x20,0x00,0x01,0x00,0x10,0x00,0x00,0x00,0x01,0x00,0x02,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x02,0x00,0x00,0x80,0x00,0x02,0x80,0x00,0x02,0x3c,0xf0,0x2f,0xff, -0xff,0xff,0xfe,0xfd,0xbf,0xff,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xf1,0xff,0x7f,0xff,0xff,0xff,0xff,0xef,0xff, -0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xf0,0x2f,0xff,0xff, -0xff,0xfe,0xf0,0xff,0xff,0xff,0xfb,0xff,0xbf,0xff,0xff,0xff,0xff,0xf7,0xbf,0xfb, -0xff,0xff,0xff,0xdf,0xf7,0xff,0xf1,0xf7,0xbf,0xfb,0xff,0xff,0xff,0x7f,0xde,0xff, -0xff,0xff,0xff,0xff,0xff,0xed,0xf7,0xff,0xff,0x7f,0xd0,0xf0,0x3f,0xff,0xff,0xff, -0xfe,0xf8,0x30,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x00,0xe0,0x00,0x00,0x80, -0x20,0x01,0x01,0x92,0x00,0x01,0x01,0x00,0xe0,0x1c,0x60,0x20,0x30,0x08,0x08,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x00,0xc1,0xf0,0x6f,0xff,0xff,0xff,0xfe, -0xff,0xff,0xff,0xff,0xff,0xdb,0xfe,0xff,0xff,0xdf,0xff,0xfc,0x7f,0xfb,0xbf,0xff, -0xff,0xff,0xff,0xff,0xf1,0xf6,0xff,0xf7,0x7e,0x3f,0xff,0x7f,0xff,0xff,0xff,0xf7, -0xff,0xff,0xff,0xed,0xff,0xdf,0xff,0xb7,0xff,0xf0,0x3f,0xff,0xff,0xff,0xfe,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, -0xff,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xdf,0xff,0xff,0xff,0xff,0xbf,0xff,0xdf, -0x57,0xef,0xf1,0xfd,0xfe,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xfb,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x7f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff, -0xff,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xdf,0xff, -0xff,0xf1,0xfd,0xff,0x7f,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xfe,0xff,0xff,0xff,0xff,0xf0,0x9f,0xff,0xff,0xff,0xfe,0xf7,0xfd,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0xff,0xff,0xff,0xff,0xff, -0xf1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xf0,0x6f,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf1, -0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xfb,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0x6f,0xff,0xfe,0xbf,0xff,0xf1,0xff, -0xf7,0xff,0xff,0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd, -0xff,0xff,0xff,0xf0,0xef,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xfb,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0x57,0xff,0xfd,0xbf,0xff,0xf1,0xff,0xef, -0xfe,0xff,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff, -0xde,0xff,0xf0,0xcf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xf7,0xdb,0xff,0xdb,0xfd, -0xf6,0xff,0xf6,0xff,0x3c,0xbc,0xbc,0xbf,0xdf,0x6f,0xe7,0x2f,0xf1,0x3c,0xbf,0xfd, -0xbf,0xdf,0x6f,0xff,0x6f,0xf7,0xdb,0xff,0xdb,0xfd,0xf6,0xff,0xf6,0xff,0xff,0xff, -0x02,0x01,0xdf,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -0xff,0xff,0xff,0xff,0xff,0xff }; diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c index de2d4862468..f50fac25be4 100644 --- a/drivers/net/igb/e1000_phy.c +++ b/drivers/net/igb/e1000_phy.c @@ -448,8 +448,11 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) goto out; } - /* Wait 15ms for MAC to configure PHY from NVM settings. */ - msleep(15); + /* + * Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); /* * The NVM settings will configure LPLU in D3 for diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index fb09c8ad9f0..27eae49e79c 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -1419,7 +1419,6 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; - u32 stat_reg = 0; hw->mac.autoneg = false; @@ -1443,18 +1442,11 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ - E1000_CTRL_FD); /* Force Duplex to FULL */ + E1000_CTRL_FD | /* Force Duplex to FULL */ + E1000_CTRL_SLU); /* Set link up enable bit */ - if (hw->phy.media_type == e1000_media_type_copper && - hw->phy.type == e1000_phy_m88) + if (hw->phy.type == e1000_phy_m88) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ - else { - /* Set the ILOS bit on the fiber Nic if half duplex link is - * detected. */ - stat_reg = rd32(E1000_STATUS); - if ((stat_reg & E1000_STATUS_FD) == 0) - ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); - } wr32(E1000_CTRL, ctrl_reg); diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ca842163dce..03aa9593dd9 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -135,8 +135,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int); static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); static void igb_restore_vf_multicasts(struct igb_adapter *adapter); -static int igb_suspend(struct pci_dev *, pm_message_t); #ifdef CONFIG_PM +static int igb_suspend(struct pci_dev *, pm_message_t); static int igb_resume(struct pci_dev *); #endif static void igb_shutdown(struct pci_dev *); @@ -420,6 +420,9 @@ static void igb_free_queues(struct igb_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) netif_napi_del(&adapter->rx_ring[i].napi); + adapter->num_rx_queues = 0; + adapter->num_tx_queues = 0; + kfree(adapter->tx_ring); kfree(adapter->rx_ring); } @@ -1476,9 +1479,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, netdev->name, ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : "unknown"), - ((hw->bus.width == e1000_bus_width_pcie_x4) - ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1) - ? "Width x1" : "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : + "unknown"), netdev->dev_addr); igb_read_part_num(hw, &part_num); @@ -5056,7 +5060,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) return 0; } -static int igb_suspend(struct pci_dev *pdev, pm_message_t state) +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); @@ -5115,15 +5119,9 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) wr32(E1000_WUFC, 0); } - /* make sure adapter isn't asleep if manageability/wol is enabled */ - if (wufc || adapter->en_mng_pt) { - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } else { + *enable_wake = wufc || adapter->en_mng_pt; + if (!*enable_wake) igb_shutdown_fiber_serdes_link_82575(hw); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - } /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -5131,12 +5129,29 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; } #ifdef CONFIG_PM +static int igb_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __igb_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + static int igb_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -5189,7 +5204,14 @@ static int igb_resume(struct pci_dev *pdev) static void igb_shutdown(struct pci_dev *pdev) { - igb_suspend(pdev, PMSG_SUSPEND); + bool wake; + + __igb_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index ed265a7a898..de4db0dc787 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -411,7 +411,8 @@ static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) /* Decide whether to use autoneg or not. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) + if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber && + (speed == IXGBE_LINK_SPEED_1GB_FULL)) ret_val = ixgbe_fc_autoneg(hw); if (ret_val) diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 8cfd3fd309a..63ab6671d08 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1937,7 +1937,8 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num) /* Decide whether to use autoneg or not. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) + if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber && + (speed == IXGBE_LINK_SPEED_1GB_FULL)) ret_val = ixgbe_fc_autoneg(hw); if (ret_val) diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 7e94d6d399a..24f73e719c3 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -96,14 +96,11 @@ s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) #ifdef DEBUG +extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw); #define hw_dbg(hw, format, arg...) \ -printk(KERN_DEBUG, "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg); + printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg) #else -static inline int __attribute__ ((format (printf, 2, 3))) -hw_dbg(struct ixgbe_hw *hw, const char *format, ...) -{ - return 0; -} +#define hw_dbg(hw, format, arg...) do {} while (0) #endif #endif /* IXGBE_COMMON */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 0a8731f1f23..bd0a0c27695 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -90,6 +90,8 @@ int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; } + dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; + return 0; } @@ -298,8 +300,10 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != - adapter->dcb_cfg.tc_config[priority].dcb_pfc) + adapter->dcb_cfg.tc_config[priority].dcb_pfc) { adapter->dcb_set_bitmap |= BIT_PFC; + adapter->temp_dcb_cfg.pfc_mode_enable = true; + } } static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 18ecba7f6ec..aafc120f164 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -129,6 +129,15 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->advertising |= ADVERTISED_10000baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) ecmd->advertising |= ADVERTISED_1000baseT_Full; + /* + * It's possible that phy.autoneg_advertised may not be + * set yet. If so display what the default would be - + * both 1G and 10G supported. + */ + if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full))) + ecmd->advertising |= (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full); ecmd->port = PORT_TP; } else if (hw->phy.media_type == ixgbe_media_type_backplane) { @@ -225,7 +234,16 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - pause->autoneg = (hw->fc.current_mode == ixgbe_fc_full ? 1 : 0); + /* + * Flow Control Autoneg isn't on if + * - we didn't ask for it OR + * - it failed, we know this by tx & rx being off + */ + if (hw->fc.disable_fc_autoneg || + (hw->fc.current_mode == ixgbe_fc_none)) + pause->autoneg = 0; + else + pause->autoneg = 1; if (hw->fc.current_mode == ixgbe_fc_rx_pause) { pause->rx_pause = 1; @@ -243,8 +261,12 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - if ((pause->autoneg == AUTONEG_ENABLE) || - (pause->rx_pause && pause->tx_pause)) + if (pause->autoneg != AUTONEG_ENABLE) + hw->fc.disable_fc_autoneg = true; + else + hw->fc.disable_fc_autoneg = false; + + if (pause->rx_pause && pause->tx_pause) hw->fc.requested_mode = ixgbe_fc_full; else if (pause->rx_pause && !pause->tx_pause) hw->fc.requested_mode = ixgbe_fc_rx_pause; @@ -712,9 +734,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *temp_ring; + struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; int i, err; u32 new_rx_count, new_tx_count; + bool need_update = false; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; @@ -733,80 +756,94 @@ static int ixgbe_set_ringparam(struct net_device *netdev, return 0; } - temp_ring = kcalloc(adapter->num_tx_queues, - sizeof(struct ixgbe_ring), GFP_KERNEL); - if (!temp_ring) - return -ENOMEM; - while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) msleep(1); - if (new_tx_count != adapter->tx_ring->count) { + temp_tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct ixgbe_ring), GFP_KERNEL); + if (!temp_tx_ring) { + err = -ENOMEM; + goto err_setup; + } + + if (new_tx_count != adapter->tx_ring_count) { + memcpy(temp_tx_ring, adapter->tx_ring, + adapter->num_tx_queues * sizeof(struct ixgbe_ring)); for (i = 0; i < adapter->num_tx_queues; i++) { - temp_ring[i].count = new_tx_count; - err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); + temp_tx_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(adapter, + &temp_tx_ring[i]); if (err) { while (i) { i--; ixgbe_free_tx_resources(adapter, - &temp_ring[i]); + &temp_tx_ring[i]); } goto err_setup; } - temp_ring[i].v_idx = adapter->tx_ring[i].v_idx; + temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx; } - if (netif_running(netdev)) - netdev->netdev_ops->ndo_stop(netdev); - ixgbe_reset_interrupt_capability(adapter); - ixgbe_napi_del_all(adapter); - INIT_LIST_HEAD(&netdev->napi_list); - kfree(adapter->tx_ring); - adapter->tx_ring = temp_ring; - temp_ring = NULL; - adapter->tx_ring_count = new_tx_count; + need_update = true; } - temp_ring = kcalloc(adapter->num_rx_queues, - sizeof(struct ixgbe_ring), GFP_KERNEL); - if (!temp_ring) { - if (netif_running(netdev)) - netdev->netdev_ops->ndo_open(netdev); - return -ENOMEM; + temp_rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct ixgbe_ring), GFP_KERNEL); + if ((!temp_rx_ring) && (need_update)) { + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]); + kfree(temp_tx_ring); + err = -ENOMEM; + goto err_setup; } - if (new_rx_count != adapter->rx_ring->count) { + if (new_rx_count != adapter->rx_ring_count) { + memcpy(temp_rx_ring, adapter->rx_ring, + adapter->num_rx_queues * sizeof(struct ixgbe_ring)); for (i = 0; i < adapter->num_rx_queues; i++) { - temp_ring[i].count = new_rx_count; - err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); + temp_rx_ring[i].count = new_rx_count; + err = ixgbe_setup_rx_resources(adapter, + &temp_rx_ring[i]); if (err) { while (i) { i--; ixgbe_free_rx_resources(adapter, - &temp_ring[i]); + &temp_rx_ring[i]); } goto err_setup; } - temp_ring[i].v_idx = adapter->rx_ring[i].v_idx; + temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx; } + need_update = true; + } + + /* if rings need to be updated, here's the place to do it in one shot */ + if (need_update) { if (netif_running(netdev)) - netdev->netdev_ops->ndo_stop(netdev); - ixgbe_reset_interrupt_capability(adapter); - ixgbe_napi_del_all(adapter); - INIT_LIST_HEAD(&netdev->napi_list); - kfree(adapter->rx_ring); - adapter->rx_ring = temp_ring; - temp_ring = NULL; - - adapter->rx_ring_count = new_rx_count; + ixgbe_down(adapter); + + /* tx */ + if (new_tx_count != adapter->tx_ring_count) { + kfree(adapter->tx_ring); + adapter->tx_ring = temp_tx_ring; + temp_tx_ring = NULL; + adapter->tx_ring_count = new_tx_count; + } + + /* rx */ + if (new_rx_count != adapter->rx_ring_count) { + kfree(adapter->rx_ring); + adapter->rx_ring = temp_rx_ring; + temp_rx_ring = NULL; + adapter->rx_ring_count = new_rx_count; + } } /* success! */ err = 0; -err_setup: - ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) - netdev->netdev_ops->ndo_open(netdev); + ixgbe_up(adapter); +err_setup: clear_bit(__IXGBE_RESETTING, &adapter->state); return err; } diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 79aa811c403..286ecc0e6ab 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -187,15 +187,14 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, struct ixgbe_tx_buffer *tx_buffer_info) { - if (tx_buffer_info->dma) { - pci_unmap_page(adapter->pdev, tx_buffer_info->dma, - tx_buffer_info->length, PCI_DMA_TODEVICE); - tx_buffer_info->dma = 0; - } + tx_buffer_info->dma = 0; if (tx_buffer_info->skb) { + skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb, + DMA_TO_DEVICE); dev_kfree_skb_any(tx_buffer_info->skb); tx_buffer_info->skb = NULL; } + tx_buffer_info->time_stamp = 0; /* tx_buffer_info must be completely set up in the transmit path */ } @@ -204,15 +203,11 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, unsigned int eop) { struct ixgbe_hw *hw = &adapter->hw; - u32 head, tail; /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of eop */ - head = IXGBE_READ_REG(hw, tx_ring->head); - tail = IXGBE_READ_REG(hw, tx_ring->tail); adapter->detect_tx_hung = false; - if ((head != tail) && - tx_ring->tx_buffer_info[eop].time_stamp && + if (tx_ring->tx_buffer_info[eop].time_stamp && time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { /* detected Tx unit hang */ @@ -227,7 +222,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, " time_stamp <%lx>\n" " jiffies <%lx>\n", tx_ring->queue_index, - head, tail, + IXGBE_READ_REG(hw, tx_ring->head), + IXGBE_READ_REG(hw, tx_ring->tail), tx_ring->next_to_use, eop, tx_ring->tx_buffer_info[eop].time_stamp, jiffies); return true; @@ -2934,6 +2930,7 @@ err_tx_ring_allocation: **/ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) { + struct ixgbe_hw *hw = &adapter->hw; int err = 0; int vector, v_budget; @@ -2948,12 +2945,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) /* * At the same time, hardware can only support a maximum of - * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq, - * we can easily reach upwards of 64 Rx descriptor queues and - * 32 Tx queues. Thus, we cap it off in those rare cases where - * the cpu count also exceeds our vector limit. + * hw.mac->max_msix_vectors vectors. With features + * such as RSS and VMDq, we can easily surpass the number of Rx and Tx + * descriptor queues supported by our device. Thus, we cap it off in + * those rare cases where the cpu count also exceeds our vector limit. */ - v_budget = min(v_budget, MAX_MSIX_COUNT); + v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); /* A failure in MSI-X entry allocation isn't fatal, but it does * mean we disable MSI-X capabilities of the adapter. */ @@ -3169,11 +3166,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) #endif /* default flow control settings */ - hw->fc.requested_mode = ixgbe_fc_none; + hw->fc.requested_mode = ixgbe_fc_full; + hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ hw->fc.high_water = IXGBE_DEFAULT_FCRTH; hw->fc.low_water = IXGBE_DEFAULT_FCRTL; hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = false; /* enable itr by default in dynamic mode */ adapter->itr_setting = 1; @@ -3489,10 +3488,10 @@ err_up: ixgbe_release_hw_control(adapter); ixgbe_free_irq(adapter); err_req_irq: - ixgbe_free_all_rx_resources(adapter); err_setup_rx: - ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); err_setup_tx: + ixgbe_free_all_tx_resources(adapter); ixgbe_reset(adapter); return err; @@ -4163,32 +4162,39 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, struct sk_buff *skb, unsigned int first) { struct ixgbe_tx_buffer *tx_buffer_info; - unsigned int len = skb->len; + unsigned int len = skb_headlen(skb); unsigned int offset = 0, size, count = 0, i; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; - - len -= skb->data_len; + dma_addr_t *map; i = tx_ring->next_to_use; + if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { + dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); + return 0; + } + + map = skb_shinfo(skb)->dma_maps; + while (len) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = pci_map_single(adapter->pdev, - skb->data + offset, - size, PCI_DMA_TODEVICE); + tx_buffer_info->dma = map[0] + offset; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; len -= size; offset += size; count++; - i++; - if (i == tx_ring->count) - i = 0; + + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } } for (f = 0; f < nr_frags; f++) { @@ -4196,33 +4202,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, frag = &skb_shinfo(skb)->frags[f]; len = frag->size; - offset = frag->page_offset; + offset = 0; while (len) { + i++; + if (i == tx_ring->count) + i = 0; + tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = pci_map_page(adapter->pdev, - frag->page, - offset, - size, - PCI_DMA_TODEVICE); + tx_buffer_info->dma = map[f + 1] + offset; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; len -= size; offset += size; count++; - i++; - if (i == tx_ring->count) - i = 0; } } - if (i == 0) - i = tx_ring->count - 1; - else - i = i - 1; + tx_ring->tx_buffer_info[i].skb = skb; tx_ring->tx_buffer_info[first].next_to_watch = i; @@ -4388,13 +4388,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IXGBE_TX_FLAGS_CSUM; - ixgbe_tx_queue(adapter, tx_ring, tx_flags, - ixgbe_tx_map(adapter, tx_ring, skb, first), - skb->len, hdr_len); + count = ixgbe_tx_map(adapter, tx_ring, skb, first); - netdev->trans_start = jiffies; + if (count) { + ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, + hdr_len); + netdev->trans_start = jiffies; + ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); - ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); + } else { + dev_kfree_skb_any(skb); + tx_ring->tx_buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } return NETDEV_TX_OK; } @@ -4987,8 +4993,20 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } + #endif /* CONFIG_IXGBE_DCA */ +#ifdef DEBUG +/** + * ixgbe_get_hw_dev_name - return device name string + * used by hardware layer to print debugging information + **/ +char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + return adapter->netdev->name; +} +#endif module_exit(ixgbe_exit_module); /* ixgbe_main.c */ diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 2b2ecba7b60..030ff0a9ea6 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -2005,6 +2005,7 @@ struct ixgbe_fc_info { u16 pause_time; /* Flow Control Pause timer */ bool send_xon; /* Flow control send XON */ bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Turn off autoneg FC mode */ enum ixgbe_fc_mode current_mode; /* FC mode in effect */ enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ }; diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 9f6644a4403..303c23de6ca 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -505,7 +505,7 @@ out: static void mlx4_en_do_get_stats(struct work_struct *work) { - struct delayed_work *delay = container_of(work, struct delayed_work, work); + struct delayed_work *delay = to_delayed_work(work); struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, stats_task); struct mlx4_en_dev *mdev = priv->mdev; diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index a4130e76499..7e40741fb7d 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -298,7 +298,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, void mlx4_en_rx_refill(struct work_struct *work) { - struct delayed_work *delay = container_of(work, struct delayed_work, work); + struct delayed_work *delay = to_delayed_work(work); struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, refill_task); struct mlx4_en_dev *mdev = priv->mdev; diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c index 6d5089ecb5a..f36ae691cab 100644 --- a/drivers/net/mlx4/sense.c +++ b/drivers/net/mlx4/sense.c @@ -103,7 +103,7 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev, static void mlx4_sense_port(struct work_struct *work) { - struct delayed_work *delay = container_of(work, struct delayed_work, work); + struct delayed_work *delay = to_delayed_work(work); struct mlx4_sense *sense = container_of(delay, struct mlx4_sense, sense_poll); struct mlx4_dev *dev = sense->dev; diff --git a/drivers/net/pcmcia/ositech.h b/drivers/net/pcmcia/ositech.h deleted file mode 100644 index 4126efc355b..00000000000 --- a/drivers/net/pcmcia/ositech.h +++ /dev/null @@ -1,358 +0,0 @@ -/* - This file contains the firmware of Seven of Diamonds from OSITECH. - (Special thanks to Kevin MacPherson of OSITECH) - - This software may be used and distributed according to the terms of - the GNU General Public License, incorporated herein by reference. -*/ - - static const u_char __Xilinx7OD[] = { - 0xFF, 0x04, 0xA0, 0x36, 0xF3, 0xEC, 0xFF, 0xFF, 0xFF, 0xDF, 0xFB, 0xFF, - 0xF3, 0xFF, 0xFF, 0xFF, - 0xEF, 0x3F, 0xFF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x7F, 0xFE, 0xFF, - 0xCE, 0xFE, 0xFE, 0xFE, - 0xFE, 0xDE, 0xBD, 0xDD, 0xFD, 0xFF, 0xFD, 0xCF, 0xF7, 0xBF, 0x7F, 0xFF, - 0x7F, 0x3F, 0xFE, 0xBF, - 0xFF, 0xFF, 0xFF, 0xBC, 0xFF, 0xFF, 0xBD, 0xB5, 0x7F, 0x7F, 0xBF, 0xBF, - 0x7F, 0xFF, 0xEF, 0xFF, - 0xFF, 0xFF, 0xFB, 0xFF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDE, - 0xFE, 0xFE, 0xFA, 0xDE, - 0xBD, 0xFD, 0xED, 0xFD, 0xFD, 0xCF, 0xEF, 0xEF, 0xEF, 0xEF, 0xC7, 0xDF, - 0xDF, 0xDF, 0xDF, 0xDF, - 0xFF, 0x7E, 0xFE, 0xFD, 0x7D, 0x6D, 0xEE, 0xFE, 0x7C, 0xFB, 0xF4, 0xFB, - 0xCF, 0xDB, 0xDF, 0xFF, - 0xFF, 0xBB, 0x7F, 0xFF, 0x7F, 0xFF, 0xF7, 0xFF, 0x9E, 0xBF, 0x3B, 0xBF, - 0xBF, 0x7F, 0x7F, 0x7F, - 0x7E, 0x6F, 0xDF, 0xEF, 0xF5, 0xF6, 0xFD, 0xF6, 0xF5, 0xED, 0xEB, 0xFF, - 0xEF, 0xEF, 0xEF, 0x7E, - 0x7F, 0x7F, 0x6F, 0x7F, 0xFF, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xEF, 0xBF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0x1F, 0x1F, 0xEE, 0xFF, 0xBC, - 0xB7, 0xFF, 0xDF, 0xFF, - 0xDF, 0xEF, 0x3B, 0xE3, 0xD3, 0xFF, 0xFB, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, - 0xFF, 0xBA, 0xBF, 0x2D, - 0xDB, 0xBD, 0xFD, 0xDB, 0xDF, 0xFA, 0xFB, 0xFF, 0xEF, 0xFB, 0xDB, 0xF3, - 0xFF, 0xDF, 0xFD, 0x7F, - 0xEF, 0xFB, 0xFF, 0xFF, 0xBE, 0xBF, 0x27, 0xBA, 0xFE, 0xFB, 0xDF, 0xFF, - 0xF6, 0xFF, 0xFF, 0xEF, - 0xFB, 0xDB, 0xF3, 0xD9, 0x9A, 0x3F, 0xFF, 0xAF, 0xBF, 0xFF, 0xFF, 0xBE, - 0x3F, 0x37, 0xBD, 0x96, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE, 0xFB, 0xF3, 0xF3, 0xEB, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xF7, 0xFA, 0xBC, 0xAE, 0xFE, 0xBE, 0xFE, 0xBB, 0x7F, 0xFD, 0xFF, - 0x7F, 0xEF, 0xF7, 0xFB, - 0xBB, 0xD7, 0xF7, 0x7F, 0xFF, 0xF7, 0xFF, 0xFF, 0xF7, 0xBC, 0xED, 0xFD, - 0xBD, 0x9D, 0x7D, 0x7B, - 0xFB, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFE, 0xFD, 0xFD, 0xFE, 0xFE, 0xFF, - 0xFF, 0xFF, 0xFF, 0xF7, - 0xAA, 0xB9, 0xBF, 0x8F, 0xBF, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0x7F, 0xCF, - 0xFB, 0xEB, 0xCB, 0xEB, - 0xEE, 0xFF, 0xFF, 0xD7, 0xFF, 0xFF, 0xFF, 0x3E, 0x33, 0x3F, 0x1C, 0x7C, - 0xFC, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xCF, 0xD3, 0xF3, 0xE3, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xEB, 0xFE, 0x35, - 0x3F, 0x3D, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xEF, 0x6F, 0xE3, - 0xE3, 0xE3, 0xEF, 0xFF, - 0xFF, 0xDF, 0xFF, 0xFF, 0xF7, 0xFE, 0x3E, 0x5E, 0xFE, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFD, 0xFF, 0xFF, - 0xAF, 0xCF, 0xF2, 0xCB, 0xCF, 0x8E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFD, - 0xFC, 0x3E, 0x1F, 0x9E, - 0xAD, 0xFD, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xEF, 0xFF, 0xB3, 0xF7, 0xE7, - 0xF7, 0xFA, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xEE, 0xEB, 0xAB, 0xAF, 0x9F, 0xE3, 0x7F, 0xFF, 0xDE, - 0xFF, 0x7F, 0xEE, 0xFF, - 0xFF, 0xFB, 0x3A, 0xFA, 0xFF, 0xF2, 0x77, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF, - 0xFE, 0xBD, 0xAE, 0xDE, - 0x7D, 0x7D, 0xFD, 0xFF, 0xBF, 0xEE, 0xFF, 0xFD, 0xFF, 0xDB, 0xFB, 0xFF, - 0xF7, 0xEF, 0xFB, 0xFF, - 0xFF, 0xFE, 0xFF, 0x2D, 0xAF, 0xB9, 0xFD, 0x79, 0xFB, 0xFA, 0xFF, 0xBF, - 0xEF, 0xFF, 0xFF, 0x91, - 0xFA, 0xFB, 0xDF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFC, 0xCF, 0x37, 0xBF, - 0xBF, 0xFF, 0x7F, 0x7F, - 0xFF, 0xFF, 0xFF, 0xAF, 0xFF, 0xFF, 0xF3, 0xFB, 0xFB, 0xFF, 0xF5, 0xEF, - 0xFF, 0xFF, 0xF7, 0xFA, - 0xFF, 0xFF, 0xEE, 0xFA, 0xFE, 0xFB, 0x55, 0xDD, 0xFF, 0x7F, 0xAF, 0xFE, - 0xFF, 0xFB, 0xFB, 0xF5, - 0xFF, 0xF7, 0xEF, 0xFF, 0xFF, 0xFF, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, - 0x7B, 0x7B, 0x7B, 0x7B, - 0xFB, 0xAE, 0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xF7, 0xDA, 0xB7, 0x61, - 0xFF, 0xB9, 0x59, 0xF3, 0x73, 0xF3, 0xDF, 0x7F, 0x6F, 0xDF, 0xEF, 0xF7, - 0xEB, 0xEB, 0xD7, 0xFF, - 0xD7, 0xFF, 0xFF, 0xF7, 0xFE, 0x7F, 0xFB, 0x3E, 0x38, 0x73, 0xF6, 0x7F, - 0xFC, 0xFF, 0xFF, 0xCF, - 0xFF, 0xB7, 0xFB, 0xB3, 0xB3, 0x67, 0xFF, 0xE7, 0xFD, 0xFF, 0xEF, 0xF6, - 0x7F, 0xB7, 0xBC, 0xF5, - 0x7B, 0xF6, 0xF7, 0xF5, 0xFF, 0xFF, 0xEF, 0xFF, 0xF7, 0xFF, 0xF7, 0xCE, - 0xE7, 0xFF, 0x9F, 0xFF, - 0xFF, 0xF5, 0xFE, 0x7D, 0xFF, 0x5F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xEF, 0xFF, 0xF6, - 0xCB, 0xDB, 0xEE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFE, 0x7F, 0xBE, - 0x1E, 0x3E, 0xFE, 0xFF, - 0x7D, 0xFE, 0xFF, 0xFF, 0xEF, 0xBF, 0xE7, 0xFF, 0xE3, 0xE3, 0xFF, 0xDF, - 0xE7, 0xFF, 0xFF, 0xFF, - 0xB8, 0xEF, 0xB7, 0x2F, 0xEE, 0xFF, 0xDF, 0xFF, 0xBF, 0xFF, 0x7F, 0xEF, - 0xEB, 0xBF, 0xA3, 0xD3, - 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xFD, 0x3F, 0xCF, 0xFD, - 0xFB, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xAF, 0xFB, 0xBF, 0xBB, 0xBF, 0xDB, 0xFD, 0xFB, 0xFF, 0xFF, - 0xFF, 0xFF, 0x3E, 0xFE, - 0x3F, 0xBA, 0xBA, 0xFE, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xEF, 0xC3, 0x7F, - 0xB2, 0x9B, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0x3C, 0xFF, 0x3F, 0x3C, 0xFF, 0xFE, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xAF, 0xF3, 0xFE, 0xF3, 0xE3, 0xEB, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xF7, - 0x9A, 0xFE, 0xAF, 0x9E, - 0xBE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0x7B, 0xEF, 0xF7, 0xBF, 0xFB, 0xFB, - 0xFB, 0xFF, 0xFF, 0x7F, - 0xFF, 0xFF, 0xFF, 0xBC, 0xBD, 0xFD, 0xBD, 0xDD, 0x7D, 0x7B, 0x7B, 0x7B, - 0x7B, 0xFB, 0xAE, 0xFF, - 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xF7, 0x9A, 0xFF, - 0x9F, 0xFF, 0xAF, 0xEF, - 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xCF, 0xF3, 0xFF, 0xEB, 0xFF, 0xEB, 0xFF, - 0xFF, 0xBF, 0xFF, 0xFF, - 0xEF, 0xFE, 0xFF, 0x37, 0xFC, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xCF, 0xEF, 0xFD, 0xF3, - 0xFF, 0xEE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0xFD, 0x2F, 0xFD, - 0xFF, 0xFD, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xEF, 0xCF, 0xFF, 0xF3, 0xBF, 0x69, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFE, - 0xFB, 0x9F, 0xFF, 0xBF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x87, - 0xFE, 0xDA, 0xEF, 0xCF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xBF, 0xEF, 0xEF, 0xFD, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xEF, 0xFD, 0xFF, 0x7B, 0xFF, 0xEB, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, - 0xEB, 0xF8, 0xFF, 0xEF, - 0xAF, 0xFF, 0xFF, 0xBD, 0xFF, 0xFF, 0xFF, 0x7F, 0xEE, 0x7F, 0xEF, 0xFF, - 0xBB, 0xFF, 0xBF, 0xFB, - 0xFF, 0xFF, 0xFF, 0xF7, 0xF6, 0xFB, 0xBD, 0xFD, 0xDD, 0xF5, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xAF, - 0xFF, 0x5F, 0xF5, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF6, - 0xF3, 0xFF, 0xDE, 0xFE, - 0xEF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xDE, 0xDF, 0x5F, 0xDF, - 0xFD, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, - 0xFF, 0xAF, 0xFF, 0xFF, - 0xEF, 0xED, 0xFF, 0xDF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xDA, 0xBD, 0xBE, - 0xAE, 0xFE, 0x7F, 0xFD, - 0xDF, 0xFF, 0xFF, 0x7F, 0xEF, 0xFF, 0xFB, 0xFB, 0xFB, 0x7F, 0xF7, 0xFF, - 0xFF, 0xFF, 0xFF, 0xF7, - 0xBC, 0xFD, 0xBD, 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE, - 0xFF, 0xFF, 0xFD, 0xFF, - 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFA, 0x9F, 0xBF, 0xBF, 0xCF, - 0x7F, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xAF, 0xFF, 0xEB, 0xEB, 0xEB, 0xFF, 0xD7, 0xFE, 0xFF, 0xFF, - 0xBF, 0xE7, 0xFE, 0xBF, - 0x7F, 0xFC, 0xFF, 0xFF, 0xED, 0xFF, 0xFF, 0xFF, 0xFF, 0x4F, 0xFF, 0xFB, - 0xFB, 0xFF, 0xFF, 0xDD, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBD, 0xDF, 0x9D, 0xFD, 0xDF, 0xB9, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xEF, 0xFF, 0xFB, 0xEF, 0xEB, 0xFF, 0xDE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xF6, 0x9F, 0xFF, 0xFC, - 0xFE, 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xDF, 0xFA, 0xCD, 0xCF, - 0xBF, 0x9F, 0xFF, 0xFF, - 0xFF, 0xFF, 0xF7, 0xFE, 0xBF, 0xFF, 0xDF, 0xEF, 0x5F, 0xFF, 0xFF, 0xFF, - 0xFF, 0x7F, 0x6F, 0xFF, - 0xBB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7E, 0xFF, - 0x5F, 0xFF, 0xBF, 0xBF, - 0xF9, 0xFF, 0xFF, 0xFF, 0x7F, 0x6E, 0x7B, 0xFF, 0xEF, 0xFD, 0xEB, 0xDF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xF7, 0xB6, 0x3E, 0xFC, 0xFD, 0xBF, 0x7E, 0xFB, 0xFF, 0xFF, 0xFF, 0xF7, - 0xEF, 0xF7, 0xF3, 0xF7, - 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0x35, 0x79, 0xFF, - 0xBF, 0xFC, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xEF, 0xFB, 0x53, 0xDF, 0xFF, 0xEB, 0xBF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xBC, - 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xF5, - 0xFF, 0xF7, 0xFF, 0xFB, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xAA, 0xEE, 0xFE, 0x3F, 0x7D, - 0xFD, 0xFF, 0xFF, 0xFF, - 0x7F, 0xAF, 0x77, 0xFB, 0xFB, 0xFF, 0xFB, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, - 0xF7, 0xBE, 0xBD, 0xBD, - 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE, 0xFF, 0xEF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFC, - 0xFF, 0xFF, 0xFF, 0xFF, 0x9A, 0xD9, 0xB8, 0xFF, 0xFF, 0x79, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xCF, - 0xFB, 0xFF, 0xEB, 0xFF, 0xEB, 0xD7, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xDE, - 0xF8, 0xFB, 0xFE, 0x3F, - 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xAD, 0xBF, 0xFA, 0xFF, 0x73, - 0xDF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x3A, 0xF5, 0xB7, 0xFC, 0x3F, 0xF9, 0xFD, 0xFF, 0xFF, 0xFF, - 0x7F, 0xEF, 0xF3, 0xFF, - 0xBF, 0xFE, 0xF3, 0x9F, 0xFE, 0xFF, 0xFF, 0xFF, 0xF7, 0x3E, 0xFF, 0xFF, - 0xFF, 0xBF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xD3, 0xFE, 0xDB, 0xFF, 0xDB, 0xDF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x3E, 0xFF, 0xBF, 0xFF, 0x7F, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0x8F, - 0xF3, 0xFF, 0xED, 0xFF, - 0xF7, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF6, 0x3C, 0xFE, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x9F, 0xEF, 0xEF, 0xD1, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x7E, 0xBF, - 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBB, 0xEF, 0xDF, 0xF1, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE, 0x3E, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xBF, - 0xEF, 0xFD, 0xC3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, - 0xFC, 0x3E, 0xFE, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x2E, 0xEF, 0xF3, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xF7, 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x7F, 0xAF, 0xFB, - 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xF2, 0xD6, 0xED, - 0xBD, 0xBD, 0xBD, 0x7D, - 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x92, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, - 0xAF, 0xEB, 0xEB, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFE, 0x2E, 0xFE, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x4F, 0xEF, 0xF3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFE, - 0x3C, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xCE, - 0xC3, 0xFD, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x5D, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xEF, 0xCF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xF7, 0xEE, 0x3E, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xDF, 0xE2, 0xFF, - 0xFF, 0xFF, 0xFB, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xF6, 0xBE, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x7F, 0xEE, - 0x5F, 0xE6, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3E, - 0x7D, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xBF, 0xF7, 0x36, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xEF, 0xD3, 0xF6, - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x7F, 0xEE, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xEF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE, - 0xFB, 0xFA, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xD6, 0xFD, 0xBD, 0xBD, 0xBD, - 0x7D, 0x7B, 0x7B, 0x7B, - 0x7B, 0xFB, 0xAE, 0xFF, 0x7E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xF7, 0xBA, 0xBF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xEB, 0x6B, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x4F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, - 0x3E, 0x6E, 0xFC, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xC3, 0xC9, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x3E, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xEF, 0xFB, - 0xD5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFE, - 0xFE, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6F, 0xEF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFB, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xF6, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE, - 0xEF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFF, 0xFE, 0xFF, 0xF7, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x7F, 0xFA, 0xEF, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xE7, 0xFF, 0xFE, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE, 0xEF, 0xBF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xA7, 0xFF, 0xFC, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x7F, - 0xFE, 0xAE, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, - 0xF7, 0xFA, 0xFF, 0xFD, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, - 0x7B, 0x7B, 0xFB, 0xAF, - 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCA, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x6F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xE7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xCF, 0xFE, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, - 0xFF, 0xE7, 0xF2, 0xFC, - 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAE, 0xEF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7E, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, - 0xFE, 0xFE, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xEF, 0xDD, 0xFE, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xAF, 0xEF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xFE, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFA, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xF6, 0x9C, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, - 0xAE, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7A, 0xFF, 0xFF, 0xFF, - 0xFF, 0xDF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x6F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xF7, 0xFE, - 0xFE, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xEB, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x9E, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xEF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFE, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xCB, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFD, - 0xFF, 0xFF, 0xFF, 0xFF, 0xBE, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xEF, - 0xEF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFB, 0xAF, 0x7F, 0xFF, - 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xBF, 0xFF, - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0x7F, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xF7, 0xBC, 0xBD, - 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0x7F, - 0xAF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, - 0xFE, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, - 0xFF, 0xFF, 0xEF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xBF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xEF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFE, 0xFF, 0x9F, 0x9F, - 0x9F, 0x3F, 0x3F, 0x3F, - 0x3F, 0x3F, 0xFF, 0xEF, 0xDF, 0xDF, 0xDF, 0xDF, 0xCF, 0xB7, 0xBF, 0xBF, - 0xBF, 0xBF, 0xFF, 0xBC, - 0xB9, 0x9D, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xEF, 0xD7, - 0xF5, 0xF3, 0xF1, 0xD1, - 0x65, 0xE3, 0xE3, 0xE3, 0xA3, 0xFF, 0xFE, 0x7F, 0xFE, 0xDE, 0xDE, 0xFF, - 0xBD, 0xBD, 0xBD, 0xBD, - 0xDF, 0xEF, 0xFB, 0xF7, 0xF3, 0xF3, 0xF3, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, - 0xFB, 0xFE, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF - - }; diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 774232c13b3..48dbb35747d 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -42,6 +42,7 @@ #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/jiffies.h> +#include <linux/firmware.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> @@ -55,17 +56,18 @@ #include <asm/system.h> #include <asm/uaccess.h> -/* Ositech Seven of Diamonds firmware */ -#include "ositech.h" - /*====================================================================*/ static const char *if_names[] = { "auto", "10baseT", "10base2"}; +/* Firmware name */ +#define FIRMWARE_NAME "ositech/Xilinx7OD.bin" + /* Module parameters */ MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_NAME); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) @@ -771,6 +773,26 @@ static int osi_config(struct pcmcia_device *link) return i; } +static int osi_load_firmware(struct pcmcia_device *link) +{ + const struct firmware *fw; + int i, err; + + err = request_firmware(&fw, FIRMWARE_NAME, &link->dev); + if (err) { + pr_err("Failed to load firmware \"%s\"\n", FIRMWARE_NAME); + return err; + } + + /* Download the Seven of Diamonds firmware */ + for (i = 0; i < fw->size; i++) { + outb(fw->data[i], link->io.BasePort1 + 2); + udelay(50); + } + release_firmware(fw); + return err; +} + static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) { struct net_device *dev = link->priv; @@ -811,11 +833,9 @@ static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) (cardid == PRODID_OSITECH_SEVEN)) || ((manfid == MANFID_PSION) && (cardid == PRODID_PSION_NET100))) { - /* Download the Seven of Diamonds firmware */ - for (i = 0; i < sizeof(__Xilinx7OD); i++) { - outb(__Xilinx7OD[i], link->io.BasePort1+2); - udelay(50); - } + rc = osi_load_firmware(link); + if (rc) + goto free_cfg_mem; } else if (manfid == MANFID_OSITECH) { /* Make sure both functions are powered up */ set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR); @@ -862,10 +882,10 @@ static int smc91c92_resume(struct pcmcia_device *link) (smc->cardid == PRODID_OSITECH_SEVEN)) || ((smc->manfid == MANFID_PSION) && (smc->cardid == PRODID_PSION_NET100))) { - /* Download the Seven of Diamonds firmware */ - for (i = 0; i < sizeof(__Xilinx7OD); i++) { - outb(__Xilinx7OD[i], link->io.BasePort1+2); - udelay(50); + i = osi_load_firmware(link); + if (i) { + pr_err("smc91c92_cs: Failed to load firmware\n"); + return i; } } if (link->open) { diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 58b73b08dde..3ff1f425f1b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -757,8 +757,7 @@ EXPORT_SYMBOL(phy_start); */ static void phy_state_machine(struct work_struct *work) { - struct delayed_work *dwork = - container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct phy_device *phydev = container_of(dwork, struct phy_device, state_queue); int needs_aneg = 0; diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c index a50078627fb..913b2a5fafc 100644 --- a/drivers/net/qlge/qlge_ethtool.c +++ b/drivers/net/qlge/qlge_ethtool.c @@ -33,7 +33,6 @@ #include <linux/mm.h> #include <linux/vmalloc.h> -#include <linux/version.h> #include "qlge.h" diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 06c53522266..e1a638a05f8 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2075,8 +2075,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!tp->pcie_cap && netif_msg_probe(tp)) dev_info(&pdev->dev, "no PCI Express capability\n"); - /* Unneeded ? Don't mess with Mrs. Murphy. */ - rtl8169_irq_mask_and_ack(ioaddr); + RTL_W16(IntrMask, 0x0000); /* Soft reset the chip. */ RTL_W8(ChipCmd, CmdReset); @@ -2088,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) msleep_interruptible(1); } + RTL_W16(IntrStatus, 0xffff); + /* Identify chip attached to board */ rtl8169_get_mac_version(tp, ioaddr); diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 00c23b1babc..dee23b159df 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -448,6 +448,9 @@ static void efx_init_channels(struct efx_nic *efx) WARN_ON(channel->rx_pkt != NULL); efx_rx_strategy(channel); + + netif_napi_add(channel->napi_dev, &channel->napi_str, + efx_poll, napi_weight); } } @@ -462,10 +465,6 @@ static void efx_start_channel(struct efx_channel *channel) EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); - if (!(channel->efx->net_dev->flags & IFF_UP)) - netif_napi_add(channel->napi_dev, &channel->napi_str, - efx_poll, napi_weight); - /* The interrupt handler for this channel may set work_pending * as soon as we enable it. Make sure it's cleared before * then. Similarly, make sure it sees the enabled flag set. */ diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index b52a1c088f3..d91e95b237b 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -1908,7 +1908,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget) do { tc_writel(status, &tr->Int_Src); /* write to clear */ - handled = tc35815_do_interrupt(dev, status, limit); + handled = tc35815_do_interrupt(dev, status, budget - received); if (handled >= 0) { received += handled; if (received >= budget) diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 4a65fc2dd92..534c0f38483 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -62,6 +62,7 @@ #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/bitops.h> +#include <linux/firmware.h> #include <net/checksum.h> @@ -73,8 +74,10 @@ static char version[] __devinitdata = "3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ; +#define FW_NAME "3com/3C359.bin" MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ; +MODULE_FIRMWARE(FW_NAME); /* Module paramters */ @@ -114,8 +117,6 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ; * will be stuck with 1555 lines of hex #'s in the code. */ -#include "3c359_microcode.h" - static struct pci_device_id xl_pci_tbl[] = { {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, }, @@ -364,10 +365,30 @@ static int __devinit xl_probe(struct pci_dev *pdev, return 0; } +static int xl_init_firmware(struct xl_private *xl_priv) +{ + int err; + + err = request_firmware(&xl_priv->fw, FW_NAME, &xl_priv->pdev->dev); + if (err) { + printk(KERN_ERR "Failed to load firmware \"%s\"\n", FW_NAME); + return err; + } + + if (xl_priv->fw->size < 16) { + printk(KERN_ERR "Bogus length %zu in \"%s\"\n", + xl_priv->fw->size, FW_NAME); + release_firmware(xl_priv->fw); + err = -EINVAL; + } + + return err; +} static int __devinit xl_init(struct net_device *dev) { struct xl_private *xl_priv = netdev_priv(dev); + int err; printk(KERN_INFO "%s \n", version); printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n", @@ -375,8 +396,11 @@ static int __devinit xl_init(struct net_device *dev) spin_lock_init(&xl_priv->xl_lock) ; - return xl_hw_reset(dev) ; + err = xl_init_firmware(xl_priv); + if (err == 0) + err = xl_hw_reset(dev); + return err; } @@ -386,7 +410,7 @@ static int __devinit xl_init(struct net_device *dev) */ static int xl_hw_reset(struct net_device *dev) -{ +{ struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; unsigned long t ; @@ -396,6 +420,9 @@ static int xl_hw_reset(struct net_device *dev) u16 start ; int j ; + if (xl_priv->fw == NULL) + return -EINVAL; + /* * Reset the card. If the card has got the microcode on board, we have * missed the initialization interrupt, so we must always do this. @@ -458,25 +485,30 @@ static int xl_hw_reset(struct net_device *dev) /* * Now to write the microcode into the shared ram - * The microcode must finish at position 0xFFFF, so we must subtract - * to get the start position for the code + * The microcode must finish at position 0xFFFF, + * so we must subtract to get the start position for the code + * + * Looks strange but ensures compiler only uses + * 16 bit unsigned int */ + start = (0xFFFF - (xl_priv->fw->size) + 1) ; - start = (0xFFFF - (mc_size) + 1 ) ; /* Looks strange but ensures compiler only uses 16 bit unsigned int for this */ - printk(KERN_INFO "3C359: Uploading Microcode: "); - - for (i = start, j = 0; j < mc_size; i++, j++) { - writel(MEM_BYTE_WRITE | 0XD0000 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(microcode[j],xl_mmio + MMIO_MACDATA) ; + + for (i = start, j = 0; j < xl_priv->fw->size; i++, j++) { + writel(MEM_BYTE_WRITE | 0XD0000 | i, + xl_mmio + MMIO_MAC_ACCESS_CMD); + writeb(xl_priv->fw->data[j], xl_mmio + MMIO_MACDATA); if (j % 1024 == 0) printk("."); } printk("\n") ; - for (i=0;i < 16; i++) { - writel( (MEM_BYTE_WRITE | 0xDFFF0) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(microcode[mc_size - 16 + i], xl_mmio + MMIO_MACDATA) ; + for (i = 0; i < 16; i++) { + writel((MEM_BYTE_WRITE | 0xDFFF0) + i, + xl_mmio + MMIO_MAC_ACCESS_CMD); + writeb(xl_priv->fw->data[xl_priv->fw->size - 16 + i], + xl_mmio + MMIO_MACDATA); } /* @@ -1782,6 +1814,7 @@ static void __devexit xl_remove_one (struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct xl_private *xl_priv=netdev_priv(dev); + release_firmware(xl_priv->fw); unregister_netdev(dev); iounmap(xl_priv->xl_mmio) ; pci_release_regions(pdev) ; diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h index 66b1ff60323..bcb1a6b4a4c 100644 --- a/drivers/net/tokenring/3c359.h +++ b/drivers/net/tokenring/3c359.h @@ -284,5 +284,8 @@ struct xl_private { u8 xl_laa[6] ; u32 rx_ring_dma_addr ; u32 tx_ring_dma_addr ; + + /* firmware section */ + const struct firmware *fw; }; diff --git a/drivers/net/tokenring/3c359_microcode.h b/drivers/net/tokenring/3c359_microcode.h deleted file mode 100644 index 0400c029c07..00000000000 --- a/drivers/net/tokenring/3c359_microcode.h +++ /dev/null @@ -1,1581 +0,0 @@ - -/* - * The firmware this driver downloads into the tokenring card is a - * separate program and is not GPL'd source code, even though the Linux - * side driver and the routine that loads this data into the card are. - * - * This firmware is licensed to you strictly for use in conjunction - * with the use of 3Com 3C359 TokenRing adapters. There is no - * waranty expressed or implied about its fitness for any purpose. - */ - -/* 3c359_microcode.mac: 3Com 3C359 Tokenring microcode. - * - * Notes: - * - Loaded from xl_init upon adapter initialization. - * - * Available from 3Com as part of their standard 3C359 driver. - * - * mc_size *must* must match the microcode being used, each version is a - * different length. - */ - -static int mc_size = 24880 ; - -static const u8 microcode[] = { - 0xfe,0x3a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x33,0x2f,0x30,0x32,0x2f,0x39,0x39,0x20,0x31 -,0x37,0x3a,0x31,0x33,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x41,0x42,0x43,0x44,0x45,0x46 -,0x00,0x00,0x07,0xff,0x02,0x00,0xfe,0x9f,0x06,0x00,0x00,0x7c,0x48,0x00,0x00,0x70 -,0x82,0x00,0xff,0xff,0x86,0x00,0xff,0xff,0x88,0x00,0xff,0xff,0x9a,0x00,0xff,0xff -,0xff,0xff,0x11,0x00,0xc0,0x00,0xff,0xff,0xff,0xff,0x11,0x22,0x33,0x44,0x55,0x66 -,0x33,0x43,0x4f,0x4d,0x20,0x42,0x41,0x42,0x45,0x11,0x40,0xc0,0x00,0xff,0xff,0xff -,0xff,0x11,0x22,0x33,0x44,0x55,0x66,0x53,0x74,0x61,0x72,0x74,0x20,0x6f,0x66,0x20 -,0x4c,0x4c,0x43,0x20,0x66,0x72,0x61,0x6d,0x65,0x2e,0x20,0x20,0x54,0x6f,0x74,0x61 -,0x6c,0x20,0x64,0x61,0x74,0x61,0x20,0x73,0x69,0x7a,0x65,0x20,0x69,0x73,0x20,0x78 -,0x78,0x78,0x20,0x20,0x20,0x42,0x41,0x42,0x45,0xe8,0xd2,0x01,0x83,0x3e,0xf7,0x34 -,0x00,0x75,0x21,0xe8,0x41,0x00,0x83,0x3e,0xf7,0x34,0x00,0x75,0x17,0xe8,0x82,0x00 -,0x83,0x3e,0xf7,0x34,0x00,0x75,0x0d,0xe8,0xbf,0x00,0x83,0x3e,0xf7,0x34,0x00,0x75 -,0x03,0xe8,0x41,0x02,0xc3,0x1e,0xb8,0x00,0xf0,0x8e,0xd8,0x33,0xf6,0xb9,0x00,0x80 -,0x33,0xdb,0xad,0x03,0xd8,0xe2,0xfb,0x1f,0xb8,0x00,0x00,0x83,0xfb,0x00,0x74,0x03 -,0xb8,0x22,0x00,0xa3,0xf7,0x34,0xc3,0xfa,0xba,0x56,0x00,0xb0,0xff,0xee,0x33,0xc0 -,0x8e,0xc0,0x33,0xf6,0xb9,0xff,0x7f,0x83,0x3e,0xff,0x34,0x00,0x74,0x08,0x8d,0x3e -,0x30,0x61,0xd1,0xef,0x2b,0xcf,0x26,0x8b,0x1c,0x26,0xc7,0x04,0xff,0xff,0x26,0x83 -,0x3c,0xff,0x75,0x17,0x26,0xc7,0x04,0x00,0x00,0x26,0x83,0x3c,0x00,0x75,0x0c,0x26 -,0x89,0x1c,0x46,0x46,0xe2,0xe0,0xb8,0x00,0x00,0xeb,0x03,0xb8,0x24,0x00,0xa3,0xf7 -,0x34,0xc3,0xfa,0xb4,0xd7,0x9e,0x73,0x3a,0x75,0x38,0x79,0x36,0x7b,0x34,0x9f,0xb1 -,0x05,0xd2,0xec,0x73,0x2d,0xb0,0x40,0xd0,0xe0,0x71,0x27,0x79,0x25,0xd0,0xe0,0x73 -,0x21,0x7b,0x1f,0x32,0xc0,0x75,0x1b,0x32,0xe4,0x9e,0x72,0x16,0x74,0x14,0x78,0x12 -,0x7a,0x10,0x9f,0xd2,0xec,0x72,0x0b,0xd0,0xe4,0x70,0x07,0x75,0x05,0xb8,0x00,0x00 -,0xeb,0x03,0xb8,0x26,0x00,0xa3,0xf7,0x34,0xc3,0xfa,0xba,0x5a,0x00,0x33,0xc0,0xef -,0xef,0xef,0xef,0xb0,0x00,0xe6,0x56,0xb0,0x00,0xe6,0x54,0xba,0x52,0x00,0xb8,0x01 -,0x01,0xef,0xe8,0xca,0x00,0x3c,0x01,0x75,0x7f,0xe8,0x83,0x00,0xba,0x52,0x00,0xb8 -,0x02,0x02,0xef,0xe8,0xb9,0x00,0x3c,0x02,0x75,0x6e,0xe8,0x7a,0x00,0xba,0x52,0x00 -,0xb8,0x04,0x04,0xef,0xe8,0xa8,0x00,0x3c,0x04,0x75,0x5d,0xe8,0x71,0x00,0xba,0x52 -,0x00,0xb8,0x08,0x08,0xef,0xe8,0x97,0x00,0x3c,0x08,0x75,0x4c,0xe8,0x68,0x00,0xba -,0x52,0x00,0xb8,0x10,0x10,0xef,0xe8,0x86,0x00,0x3c,0x10,0x75,0x3b,0xe8,0x5f,0x00 -,0xba,0x52,0x00,0xb8,0x20,0x20,0xef,0xe8,0x75,0x00,0x3c,0x20,0x75,0x2a,0xe8,0x56 -,0x00,0xba,0x52,0x00,0xb8,0x40,0x40,0xef,0xe8,0x64,0x00,0x3c,0x40,0x75,0x19,0xe8 -,0x4d,0x00,0xba,0x52,0x00,0xb8,0x80,0x80,0xef,0xe8,0x53,0x00,0x3c,0x80,0x75,0x08 -,0xe8,0x44,0x00,0xb8,0x00,0x00,0xeb,0x03,0xb8,0x28,0x00,0xa3,0xf7,0x34,0xc3,0xba -,0x5a,0x00,0xb8,0x00,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x01,0x80,0xef,0xc3,0xba -,0x5a,0x00,0xb8,0x02,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x03,0x80,0xef,0xc3,0xba -,0x5a,0x00,0xb8,0x04,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x05,0x80,0xef,0xc3,0xba -,0x5a,0x00,0xb8,0x06,0x80,0xef,0xc3,0xba,0x5a,0x00,0xb8,0x07,0x80,0xef,0xc3,0xb9 -,0xff,0xff,0xe4,0x58,0xe4,0x54,0x3c,0x00,0x75,0x03,0x49,0x75,0xf7,0xc3,0xfa,0x32 -,0xc0,0xe6,0x56,0xe4,0x56,0x3c,0x00,0x74,0x03,0xe9,0x82,0x00,0xb0,0xff,0xe6,0x56 -,0xe4,0x56,0x3c,0xff,0x75,0x78,0xba,0x52,0x00,0xb8,0xff,0xff,0xef,0xed,0x3c,0xff -,0x75,0x6c,0xb8,0x00,0xff,0xef,0xed,0x3c,0x00,0x75,0x63,0xb0,0xff,0xe6,0x54,0xe4 -,0x54,0x3c,0xff,0x75,0x59,0x32,0xc0,0xe6,0x54,0xe4,0x54,0x3c,0x00,0x75,0x4f,0xb0 -,0x0f,0xe6,0x50,0xe4,0x50,0x24,0x0f,0x3c,0x0f,0x75,0x43,0xb0,0x00,0xe6,0x50,0xe4 -,0x50,0x24,0x0f,0x3c,0x00,0x75,0x37,0x8c,0xc8,0x8e,0xc0,0xbe,0x70,0x00,0x26,0x8b -,0x14,0x26,0x8b,0x5c,0x02,0xb8,0x00,0x00,0xef,0xed,0x23,0xc3,0x3d,0x00,0x00,0x75 -,0x1d,0xb8,0xff,0xff,0x23,0xc3,0xef,0x8b,0xc8,0xed,0x23,0xc3,0x3b,0xc1,0x75,0x0e -,0x83,0xc6,0x04,0x26,0x83,0x3c,0xff,0x75,0xd5,0xb8,0x00,0x00,0xeb,0x03,0xb8,0x2a -,0x00,0xa3,0xf7,0x34,0xc3,0xfa,0x33,0xc0,0xbf,0x00,0x20,0xb9,0x17,0x00,0xf3,0xab -,0xbf,0x00,0x30,0xb9,0x17,0x00,0xf3,0xab,0xbf,0x00,0x22,0xb9,0x40,0x00,0xf3,0xab -,0xbf,0x00,0x32,0xb9,0x40,0x00,0xf3,0xab,0xfc,0x1e,0x8c,0xc8,0x8e,0xd8,0x33,0xc0 -,0x8e,0xc0,0xbe,0x92,0x00,0xbf,0x00,0x20,0xb9,0x17,0x00,0xf3,0xa4,0xbe,0xa9,0x00 -,0xbf,0x00,0x22,0xb9,0x40,0x00,0xf3,0xa4,0x1f,0xc7,0x06,0xfb,0x34,0x64,0x00,0xba -,0x08,0x00,0xb8,0x0f,0x00,0xef,0xe8,0x82,0x01,0xe8,0x9b,0x01,0x72,0x0d,0xc7,0x06 -,0xf7,0x34,0x2c,0x00,0xc7,0x06,0xf9,0x34,0x04,0x00,0xc3,0xba,0x0a,0x00,0x33,0xc0 -,0xef,0xe8,0x98,0x01,0xe8,0xb5,0x01,0xb8,0x17,0x00,0xba,0x9c,0x00,0xef,0xb8,0x00 -,0x10,0xba,0x9a,0x00,0xef,0xb8,0x17,0x00,0xa9,0x01,0x00,0x74,0x01,0x40,0xba,0x8c -,0x00,0xef,0xb8,0x00,0x18,0xba,0x86,0x00,0xef,0xb8,0x0c,0x00,0xba,0x82,0x00,0xef -,0xba,0x02,0x00,0xed,0x25,0xf9,0xff,0x0d,0x02,0x00,0xef,0xba,0x06,0x00,0x33,0xc0 -,0xef,0xba,0x04,0x00,0xb8,0x60,0x00,0xef,0xba,0x00,0x00,0xb8,0x18,0x00,0xef,0xba -,0x80,0x00,0xb9,0xff,0xff,0xed,0xa9,0x01,0x00,0x75,0x04,0xe2,0xf8,0xeb,0x3e,0xba -,0x0a,0x00,0xed,0xa9,0x00,0x40,0x74,0x35,0xa9,0x00,0x20,0x74,0x30,0x33,0xc0,0xef -,0x51,0xb9,0xc8,0x00,0xe2,0xfe,0x59,0x1e,0x06,0x1f,0x26,0x8b,0x0e,0x02,0x30,0x83 -,0xf9,0x17,0x75,0x18,0x49,0x49,0xbe,0x02,0x20,0xbf,0x06,0x30,0xf3,0xa6,0x1f,0x23 -,0xc9,0x75,0x0a,0xff,0x0e,0xfb,0x34,0x74,0x12,0xe9,0x4d,0xff,0x1f,0xb8,0x2c,0x00 -,0xbb,0x00,0x00,0xa3,0xf7,0x34,0x89,0x1e,0xf9,0x34,0xc3,0xc7,0x06,0xfb,0x34,0x64 -,0x00,0xe8,0xd3,0x00,0x72,0x0d,0xc7,0x06,0xf7,0x34,0x2c,0x00,0xc7,0x06,0xf9,0x34 -,0x04,0x00,0xc3,0xe8,0xd6,0x00,0xe8,0xf3,0x00,0xb8,0x03,0x00,0xba,0x82,0x00,0xef -,0xb8,0x40,0x80,0xba,0x98,0x00,0xef,0xb8,0x00,0x11,0xba,0x96,0x00,0xef,0xb8,0x40 -,0x00,0xa9,0x01,0x00,0x74,0x01,0x40,0xba,0x92,0x00,0xef,0xb8,0x00,0x19,0xba,0x8e -,0x00,0xef,0xba,0x02,0x00,0xed,0x25,0xf9,0xff,0x0d,0x06,0x00,0xef,0xba,0x06,0x00 -,0x33,0xc0,0xef,0xba,0x00,0x00,0xb8,0x18,0x00,0xef,0xba,0x80,0x00,0xb9,0xff,0xff -,0xed,0xa9,0x20,0x00,0x75,0x04,0xe2,0xf8,0xeb,0x43,0xba,0x0a,0x00,0xed,0xa9,0x00 -,0x40,0x74,0x3a,0xa9,0x00,0x20,0x74,0x35,0x33,0xc0,0xef,0x51,0xb9,0xc8,0x00,0xe2 -,0xfe,0x59,0x1e,0x06,0x1f,0x26,0x8b,0x0e,0x02,0x32,0x83,0xf9,0x40,0x75,0x1d,0x49 -,0x49,0xbe,0x02,0x22,0xbf,0x06,0x32,0xf3,0xa6,0x1f,0x23,0xc9,0x75,0x0f,0xff,0x0e -,0xfb,0x34,0x74,0x03,0xe9,0x5a,0xff,0xb8,0x00,0x00,0xeb,0x0b,0x1f,0xb8,0x2c,0x00 -,0xbb,0x02,0x00,0x89,0x1e,0xf9,0x34,0xa3,0xf7,0x34,0xc3,0xba,0x02,0x00,0xb8,0x00 -,0x9c,0xef,0xba,0x00,0x00,0xb8,0x00,0x84,0xef,0x33,0xc0,0xef,0xba,0x0a,0x00,0xef -,0xba,0x0e,0x00,0x33,0xc0,0xef,0xc3,0xba,0x0a,0x00,0xb9,0xff,0xff,0xed,0x25,0x00 -,0x60,0x3d,0x00,0x60,0x74,0x04,0xe2,0xf5,0xf8,0xc3,0xf9,0xc3,0xb0,0x00,0xe6,0x56 -,0xb8,0x00,0xff,0xba,0x52,0x00,0xef,0xb9,0xff,0xff,0xba,0x58,0x00,0xed,0x25,0xef -,0x00,0x74,0x08,0xba,0x5a,0x00,0x33,0xc0,0xef,0xe2,0xef,0xc3,0xba,0x80,0x00,0xed -,0xba,0x84,0x00,0xef,0xba,0x80,0x00,0xed,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0xc6,0x06,0xec,0x34,0x15,0x33,0xc0,0x8e,0xd8,0x8e,0xc0,0x1e,0x8c,0xc8,0xbe,0x40 -,0x54,0xbf,0x60,0xfe,0x8e,0xd8,0xb9,0x10,0x00,0xf3,0xa4,0x1f,0xc7,0x06,0x80,0x36 -,0x10,0x35,0xc7,0x06,0x8c,0x36,0x30,0x35,0x8d,0x06,0x38,0x35,0xa3,0x30,0x35,0xa3 -,0x32,0x35,0x05,0x33,0x01,0xa3,0x34,0x35,0xc7,0x06,0x36,0x35,0x50,0x01,0xc7,0x06 -,0x84,0x36,0x80,0xfe,0xc7,0x06,0x88,0x36,0xc0,0xfe,0xc6,0x06,0xc2,0xfe,0xff,0xc6 -,0x06,0x93,0x36,0x80,0xc6,0x06,0x92,0x36,0x00,0xc6,0x06,0x80,0xfe,0x80,0xc7,0x06 -,0x82,0xfe,0x54,0x50,0xc7,0x06,0x84,0xfe,0x2b,0x4d,0xe5,0xce,0xa9,0x02,0x00,0x75 -,0x08,0xc6,0x06,0x81,0xfe,0x23,0xe9,0x05,0x00,0xc6,0x06,0x81,0xfe,0x22,0xa1,0xf7 -,0x34,0xa3,0x86,0xfe,0xb8,0x48,0x34,0x86,0xe0,0xa3,0x88,0xfe,0x8d,0x06,0x4e,0x34 -,0x86,0xe0,0xa3,0x8a,0xfe,0xb8,0x58,0x34,0x86,0xe0,0xa3,0x8c,0xfe,0xb8,0x9c,0x34 -,0x86,0xe0,0xa3,0x8e,0xfe,0x8d,0x06,0x20,0x03,0x86,0xe0,0xa3,0x90,0xfe,0x33,0xc0 -,0xba,0x72,0x00,0xef,0x33,0xc0,0xba,0x74,0x00,0xef,0xba,0x76,0x00,0xef,0xb8,0x80 -,0xfe,0x86,0xe0,0xba,0x72,0x00,0xef,0xe8,0xbf,0x07,0xba,0x0c,0x01,0xb8,0x40,0x40 -,0xef,0xed,0xba,0x6a,0x00,0xb8,0x03,0x00,0xc1,0xe0,0x08,0x0d,0x03,0x00,0xef,0xb9 -,0x0a,0x00,0xe8,0x94,0x00,0xba,0x6a,0x00,0xb8,0x03,0x00,0xc1,0xe0,0x08,0xef,0xa1 -,0x32,0x34,0xa3,0xa2,0x33,0xc7,0x06,0xa6,0x33,0x04,0x00,0x8d,0x06,0xa0,0x33,0xc1 -,0xe8,0x04,0xcd,0x39,0xc7,0x06,0x90,0x36,0xff,0xff,0xe9,0xe3,0x00,0x63,0x0d,0x66 -,0x0d,0x66,0x0d,0x8a,0x0d,0xe6,0x0e,0x75,0x12,0x2e,0x0f,0x03,0x0f,0x50,0x0f,0x60 -,0x0d,0x60,0x0d,0x60,0x0d,0xed,0x0f,0xe9,0x12,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60 -,0x0d,0x60,0x0d,0x22,0x10,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0xfe,0x10,0x60 -,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0xaf,0x0f,0x32,0x10,0x37 -,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60 -,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60,0x0d,0x60 -,0x0d,0x64,0x0e,0x00,0x0f,0x95,0x09,0x60,0x0a,0x49,0xbb,0xff,0xff,0xba,0x6a,0x00 -,0xed,0xa9,0x00,0x20,0x74,0x38,0x80,0x3e,0x80,0xfe,0x12,0x75,0x31,0xe8,0x4a,0x00 -,0xa1,0x32,0x34,0xa3,0xa2,0x33,0xc7,0x06,0xa6,0x33,0x04,0x00,0x8d,0x06,0xa0,0x33 -,0xc1,0xe8,0x04,0xcd,0x39,0xe8,0x22,0x00,0xc7,0x06,0xf3,0x34,0x46,0x00,0xc7,0x06 -,0xf5,0x34,0xff,0xff,0xc7,0x06,0x90,0x36,0xff,0xff,0x58,0xe9,0x32,0x00,0x4b,0x83 -,0xfb,0x00,0x75,0xb9,0x83,0xf9,0x00,0x75,0xb0,0xc3,0x52,0xba,0x6a,0x00,0xb8,0x03 -,0x00,0xc1,0xe0,0x08,0x0d,0x03,0x00,0xef,0x5a,0xc3,0x52,0xba,0x6a,0x00,0xb8,0x03 -,0x00,0xc1,0xe0,0x08,0xef,0x5a,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x68,0x80,0x07,0xa1,0x90,0x36,0xcd,0x35,0x8b,0x36,0x24,0x02,0x2e,0xff,0xa4,0x35 -,0x0a,0xfa,0x8a,0x26,0x94,0x36,0x88,0x26,0xe8,0x34,0xc6,0x06,0x94,0x36,0x00,0xfb -,0x22,0xe4,0x75,0x01,0xc3,0xf6,0xc4,0x20,0x74,0x7d,0xf6,0xc4,0x08,0x74,0x05,0x80 -,0x0e,0x92,0x36,0x04,0x80,0x26,0xe8,0x34,0xd7,0xc4,0x1e,0x84,0x36,0x26,0x8b,0x37 -,0x81,0xe6,0xff,0x00,0x83,0xfe,0x20,0x76,0x05,0xb0,0x01,0xe9,0x28,0x00,0x53,0x06 -,0xd1,0xe6,0x2e,0xff,0x94,0x9d,0x06,0x07,0x5b,0x26,0x88,0x47,0x02,0x3c,0xff,0x74 -,0x07,0x3c,0xfe,0x75,0x11,0xe9,0x3b,0x00,0xf6,0x06,0x92,0x36,0x08,0x75,0x34,0xf6 -,0x06,0x92,0x36,0x04,0x74,0x2d,0x80,0x26,0x92,0x36,0xf3,0x80,0x3e,0x95,0x36,0x00 -,0x75,0x21,0x26,0x80,0x3f,0x05,0x75,0x13,0xc6,0x06,0x95,0x36,0x00,0x26,0x80,0x7f -,0x06,0x00,0x74,0x07,0x26,0x8b,0x47,0x04,0xa2,0x95,0x36,0xba,0x0c,0x01,0xb8,0x40 -,0x40,0xef,0xed,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x10,0x75,0x03,0xe9,0x5b,0x00,0xf6 -,0xc4,0x04,0x74,0x05,0x80,0x0e,0x92,0x36,0x01,0x80,0x26,0xe8,0x34,0xeb,0xc4,0x3e -,0x88,0x36,0x26,0x8b,0x35,0x83,0xe6,0x7f,0x83,0xfe,0x12,0x72,0x08,0x26,0xc6,0x45 -,0x02,0x01,0xe9,0x24,0x00,0x83,0xc6,0x20,0xd1,0xe6,0x2e,0xff,0x94,0x9d,0x06,0xc4 -,0x3e,0x88,0x36,0x26,0x88,0x45,0x02,0x3c,0xff,0x75,0x0e,0xf6,0x06,0x92,0x36,0x01 -,0x74,0x14,0xf6,0x06,0x92,0x36,0x02,0x75,0x0d,0x80,0x26,0x92,0x36,0xfc,0xba,0x0c -,0x01,0xb8,0x20,0x20,0xef,0xed,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x08,0x74,0x22,0x80 -,0x26,0xe8,0x34,0xf7,0x80,0x0e,0x92,0x36,0x04,0xf6,0x06,0x92,0x36,0x08,0x74,0x11 -,0x80,0x26,0x92,0x36,0xf3,0xba,0x0c,0x01,0xb8,0x40,0x40,0xef,0xed,0x8a,0x26,0xe8 -,0x34,0xf6,0xc4,0x04,0x74,0x22,0x80,0x26,0xe8,0x34,0xfb,0x80,0x0e,0x92,0x36,0x01 -,0xf6,0x06,0x92,0x36,0x02,0x75,0x11,0x80,0x26,0x92,0x36,0xfe,0xba,0x0c,0x01,0xb8 -,0x20,0x20,0xef,0xed,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x01,0x74,0x67,0x80,0x26,0xe8 -,0x34,0xfe,0x80,0x3e,0xe8,0xff,0x00,0x74,0x39,0x80,0x3e,0xe8,0xff,0x04,0x74,0x32 -,0x80,0x3e,0xe8,0xff,0x01,0x75,0x21,0xe5,0x80,0xa9,0x00,0x07,0x74,0x0a,0xba,0x9e -,0x00,0xb8,0x00,0x02,0xef,0xe9,0xef,0xff,0xc6,0x06,0xe8,0xff,0x03,0xba,0x0c,0x01 -,0xb8,0x08,0x08,0xef,0xed,0xe9,0x28,0x00,0x80,0x3e,0xe8,0xff,0x03,0x74,0x06,0xe9 -,0x1e,0x00,0xe9,0x00,0x00,0xba,0x10,0x01,0xb8,0x02,0x02,0xef,0xed,0xe5,0x00,0x0d -,0x18,0x00,0xe7,0x00,0xe5,0x82,0x0d,0x02,0x00,0xe7,0x82,0xc6,0x06,0xe8,0xff,0x04 -,0x8a,0x26,0xe8,0x34,0xf6,0xc4,0x02,0x74,0x0d,0x80,0x26,0xe8,0x34,0xfd,0x80,0x26 -,0x92,0x36,0xbf,0xe8,0x4f,0x0b,0xfa,0xa0,0xe8,0x34,0x08,0x06,0x94,0x36,0xc6,0x06 -,0xe8,0x34,0x00,0xfb,0xc3,0xe8,0xe7,0x0f,0xc4,0x1e,0x84,0x36,0x2e,0xff,0x16,0x01 -,0x07,0x26,0x88,0x47,0x02,0xe9,0x7e,0xfe,0xe8,0x2d,0x10,0xc4,0x1e,0x84,0x36,0x2e -,0xff,0x16,0x03,0x07,0x26,0x88,0x47,0x02,0xe9,0x6b,0xfe,0x8e,0x06,0x26,0x02,0x2e -,0xff,0x16,0x07,0x07,0xc3,0xc3,0x83,0x3e,0xf5,0x34,0x00,0x74,0x0f,0xff,0x0e,0xf3 -,0x34,0x75,0x09,0xe8,0xc4,0xfd,0xc7,0x06,0xf5,0x34,0x00,0x00,0xf6,0x06,0x93,0x36 -,0x20,0x74,0x30,0xa1,0xc2,0x34,0x3b,0x06,0xe9,0x34,0xa3,0xe9,0x34,0x74,0x24,0x80 -,0x3e,0x95,0x36,0x00,0x75,0x1d,0xf7,0x06,0xe6,0x34,0x20,0x00,0x74,0x12,0xa9,0x20 -,0x00,0x74,0x0d,0x83,0x26,0xc2,0x34,0xdf,0x83,0x26,0xe9,0x34,0xdf,0xe9,0x03,0x00 -,0xe8,0xdd,0x09,0xba,0x06,0x01,0xed,0x8b,0xd0,0x81,0xe2,0x00,0xc0,0xc1,0xea,0x0e -,0x03,0x16,0x74,0x34,0xc1,0xe0,0x02,0x11,0x06,0x72,0x34,0x73,0x04,0xff,0x06,0x74 -,0x34,0xba,0x02,0x01,0xed,0x8b,0xd0,0x81,0xe2,0x00,0xc0,0xc1,0xea,0x0e,0x03,0x16 -,0x70,0x34,0xc1,0xe0,0x02,0x11,0x06,0x6e,0x34,0x73,0x04,0xff,0x06,0x70,0x34,0xc7 -,0x06,0xa6,0x33,0x04,0x00,0xc7,0x06,0xaa,0x33,0x00,0x00,0x8d,0x06,0xa0,0x33,0xc1 -,0xe8,0x04,0xcd,0x39,0xc3,0x95,0x09,0x95,0x09,0x65,0x09,0x78,0x09,0x95,0x09,0x95 -,0x09,0x91,0x07,0x95,0x09,0x96,0x09,0x8b,0x09,0x95,0x09,0x95,0x09,0x95,0x09,0x95 -,0x09,0x95,0x09,0x95,0x09,0x8b,0xc0,0x8b,0xc0,0x8b,0xc0,0x8b,0xc0,0x8b,0xc0,0x90 -,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xe9,0xcc,0x00,0x8c,0xc0,0x40,0x8e,0xc0,0x26 -,0x8b,0x0e,0x06,0x00,0x86,0xe9,0x26,0x89,0x0e,0x06,0x00,0x8c,0xc2,0xc1,0xe2,0x04 -,0xbe,0x0e,0x00,0x26,0xa1,0x04,0x00,0xd0,0xe0,0x24,0xc0,0x8a,0xe0,0xc0,0xec,0x04 -,0x0a,0xc4,0x26,0xa2,0x05,0x00,0x26,0xa1,0x08,0x00,0xa9,0x00,0xc0,0x74,0x03,0xe9 -,0x9e,0x00,0x26,0xf6,0x06,0x10,0x00,0x80,0x75,0x03,0xe9,0x0a,0x00,0x26,0xa0,0x16 -,0x00,0x24,0x1f,0x32,0xe4,0x03,0xf0,0x80,0x3e,0xec,0x34,0x06,0x72,0x5c,0x80,0x3e -,0x95,0x36,0x00,0x75,0x66,0x8b,0xfa,0x33,0xdb,0x8e,0xc3,0x26,0x89,0x1d,0x26,0x88 -,0x5d,0x04,0x51,0x50,0xc4,0x1e,0x8c,0x36,0xb9,0x0f,0x00,0x33,0xc0,0xe8,0x21,0x09 -,0x58,0x59,0x0b,0xdb,0x74,0x34,0xfe,0x0e,0xe6,0x3a,0x26,0xc6,0x07,0x81,0x26,0xc6 -,0x47,0x01,0x00,0x26,0xc6,0x47,0x02,0xff,0x26,0xc7,0x47,0x04,0x00,0x00,0x26,0x89 -,0x4f,0x0a,0x86,0xf2,0x26,0x89,0x57,0x06,0x26,0x89,0x77,0x08,0x26,0xc6,0x47,0x09 -,0x00,0x26,0xc6,0x47,0x0c,0x02,0xe8,0x8c,0x09,0xc3,0xff,0x06,0xec,0x33,0x8c,0xc0 -,0x48,0x8e,0xc0,0xfa,0xe8,0x97,0x10,0xfb,0xe9,0xeb,0xff,0x8c,0xc0,0x48,0x8e,0xc0 -,0xfa,0xe8,0x8a,0x10,0xfb,0xc3,0x8c,0xc0,0x8e,0xc0,0xfa,0xe8,0x80,0x10,0xfb,0xc3 -,0x80,0x3e,0x95,0x36,0x00,0x75,0x03,0xe9,0xc2,0x00,0xbf,0x08,0x00,0x26,0xf6,0x06 -,0x10,0x00,0x80,0x75,0x05,0x03,0xfe,0xe9,0x0c,0x00,0x26,0xa0,0x16,0x00,0x24,0x1f -,0x32,0xe4,0x03,0xf0,0x03,0xfe,0xa0,0x95,0x36,0x3c,0x00,0x75,0x03,0xe9,0x9c,0x00 -,0x3c,0x01,0x74,0x0b,0x3c,0x02,0x74,0x14,0x3c,0x03,0x74,0x1d,0xe9,0x8d,0x00,0xc6 -,0x06,0x96,0x36,0x01,0xe8,0x3c,0x01,0x72,0x27,0xe9,0x80,0x00,0xc6,0x06,0x96,0x36 -,0x02,0xe8,0x83,0x00,0x72,0x1a,0xe9,0x73,0x00,0xc6,0x06,0x96,0x36,0x01,0xe8,0x22 -,0x01,0x72,0x0d,0xc6,0x06,0x96,0x36,0x02,0xe8,0x6c,0x00,0x72,0x03,0xe9,0x5c,0x00 -,0x53,0x06,0x50,0xc4,0x1e,0x8c,0x36,0xb9,0x0b,0x00,0x33,0xc0,0xe8,0x42,0x08,0x58 -,0x26,0xc6,0x07,0x82,0x26,0xc6,0x47,0x02,0xff,0x8d,0x06,0xe0,0xfe,0x86,0xc4,0x26 -,0x89,0x47,0x06,0xa0,0x96,0x36,0x26,0x88,0x47,0x08,0xe8,0xc8,0x08,0x07,0x5b,0x83 -,0x26,0xad,0x36,0xfe,0xa1,0xad,0x36,0xe7,0x04,0xba,0x10,0x01,0xb8,0x80,0x80,0xef -,0xed,0xba,0x10,0x01,0xb8,0x02,0x02,0xef,0xed,0x52,0xba,0xe0,0x00,0xb8,0x41,0x10 -,0xef,0x5a,0xb8,0x9c,0x03,0xcd,0x39,0xc6,0x06,0x95,0x36,0x00,0x8c,0xc0,0x48,0x8e -,0xc0,0xfa,0xe8,0xa9,0x0f,0xfb,0xc3,0x1e,0x06,0x1f,0x06,0x33,0xc0,0x8e,0xc0,0x8b -,0xf0,0x8d,0x3e,0x20,0xf3,0x51,0xb1,0x0a,0x26,0x83,0x7d,0x0c,0x01,0x75,0x2a,0x57 -,0x26,0x83,0x7d,0x0e,0x00,0x74,0x06,0xe8,0x2f,0x00,0xe9,0x03,0x00,0xe8,0x66,0x07 -,0x5f,0x73,0x16,0x33,0xc0,0x8e,0xd8,0x26,0x8b,0x4d,0x12,0x8d,0x75,0x20,0x8d,0x3e -,0xe0,0xfe,0xf3,0xa4,0x59,0x07,0x1f,0xf9,0xc3,0xfe,0xc9,0x74,0x07,0x81,0xc7,0x20 -,0x01,0xe9,0xc4,0xff,0x59,0x07,0x1f,0xf8,0xc3,0x51,0x50,0x53,0x56,0x52,0x57,0x33 -,0xdb,0x26,0x8a,0x5d,0x0e,0x26,0x8b,0x4d,0x12,0x8d,0x7d,0x20,0x5a,0x87,0xd7,0x26 -,0x8a,0x45,0x14,0x87,0xd7,0x42,0x32,0xff,0x80,0xff,0x08,0x75,0x08,0xfe,0xcb,0x22 -,0xdb,0x75,0xea,0x33,0xdb,0x23,0xdb,0x74,0x06,0xfe,0xc7,0xd0,0xc8,0x73,0x0c,0x50 -,0x26,0x8a,0x05,0x38,0x04,0x58,0x74,0x03,0xe9,0x0a,0x00,0x49,0x46,0x47,0x23,0xc9 -,0x74,0x0a,0xe9,0xd3,0xff,0x5a,0x5e,0x5b,0x58,0x59,0xf8,0xc3,0x5a,0x5e,0x5b,0x58 -,0x59,0xf9,0xc3,0x1e,0x06,0x1f,0x06,0x33,0xc0,0x8e,0xc0,0x86,0xcd,0x2b,0xce,0x8b -,0xf7,0x8b,0xc1,0x33,0xc9,0x80,0x3c,0xff,0x74,0x16,0x80,0xf9,0x06,0x73,0x09,0x32 -,0xc9,0x46,0x48,0x74,0x2e,0xe9,0xed,0xff,0x3d,0x60,0x00,0x73,0x0c,0xe9,0x23,0x00 -,0xfe,0xc1,0x46,0x48,0x74,0x1d,0xe9,0xdc,0xff,0xb8,0x10,0x00,0x8d,0x3e,0x18,0x34 -,0x32,0xed,0xb1,0x06,0xf3,0xa6,0x74,0x03,0xe9,0x08,0x00,0x48,0x23,0xc0,0x74,0x07 -,0xe9,0xe9,0xff,0x07,0x1f,0xf8,0xc3,0x8d,0x36,0x18,0x34,0x33,0xc0,0x8e,0xd8,0x8d -,0x3e,0xe0,0xfe,0xb8,0x10,0x00,0xb9,0x06,0x00,0x56,0xf3,0xa4,0x5e,0x48,0x3d,0x00 -,0x00,0x75,0xf3,0x07,0x1f,0xf9,0xc3,0xff,0x06,0xe4,0x33,0xc6,0x06,0xeb,0x34,0x00 -,0x26,0x8b,0x45,0x06,0x86,0xe0,0xc1,0xe8,0x04,0x48,0x06,0x8e,0xc0,0xfe,0x06,0xe6 -,0x3a,0xfa,0xe8,0x69,0x0e,0xfb,0x07,0xb0,0xff,0xc3,0x00,0x00,0x00,0x00,0x00,0x00 -,0xb0,0x01,0xc3,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3 -,0x8b,0x0e,0x97,0x36,0x81,0xe1,0x80,0x30,0x26,0x8b,0x47,0x04,0x25,0x7f,0xcf,0x0b -,0xc1,0xa3,0x97,0x36,0xa3,0xe6,0x34,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x74 -,0x03,0xb0,0x03,0xc3,0x26,0x8b,0x47,0x08,0xa3,0x97,0x36,0xa3,0xe6,0x34,0x26,0x8a -,0x47,0x20,0xa2,0xfd,0x34,0x3c,0x01,0x75,0x06,0xc7,0x06,0xa1,0x36,0x00,0x00,0x26 -,0x8a,0x47,0x21,0xa2,0xfe,0x34,0x26,0x8b,0x47,0x0a,0xa3,0x18,0x34,0xa3,0x58,0x34 -,0x26,0x8b,0x47,0x0c,0xa3,0x1a,0x34,0xa3,0x5a,0x34,0x26,0x8b,0x47,0x0e,0xa3,0x1c -,0x34,0xa3,0x5c,0x34,0xc6,0x06,0x2a,0x34,0xc0,0x26,0x8b,0x47,0x14,0x25,0x7f,0xff -,0x09,0x06,0x2c,0x34,0x26,0x8b,0x47,0x16,0x25,0xff,0xfe,0x25,0xff,0xfc,0x09,0x06 -,0x2e,0x34,0xc6,0x06,0x00,0x34,0xc0,0x26,0x8b,0x47,0x10,0xa3,0x02,0x34,0x26,0x8b -,0x47,0x12,0xa3,0x04,0x34,0x06,0x53,0xe8,0x84,0x0a,0x5b,0x07,0x3d,0x00,0x00,0x75 -,0x07,0x80,0x0e,0x92,0x36,0x08,0xb0,0xfe,0xc3,0xb9,0x00,0x01,0xa1,0xac,0x33,0x33 -,0xd2,0xf7,0xf9,0xa3,0xae,0x33,0x91,0x49,0x33,0xd2,0xf7,0xe9,0x05,0x00,0x3b,0xa3 -,0x46,0x34,0xbf,0x00,0x3b,0x89,0x3e,0x44,0x34,0xba,0x68,0x00,0xb8,0xe0,0xe0,0xef -,0xa1,0xae,0x33,0xe7,0x62,0xa1,0xae,0x33,0xba,0x08,0x01,0xef,0xa1,0x44,0x34,0xe7 -,0x64,0xa1,0x44,0x34,0xba,0x0a,0x01,0xef,0xb8,0x00,0x01,0x2d,0x04,0x00,0x0d,0x00 -,0x10,0xe7,0x92,0xc3,0x3d,0x00,0x00,0x74,0x0a,0x26,0x89,0x47,0x07,0xe8,0x83,0x3a -,0xb0,0x07,0xc3,0xa1,0xae,0x33,0x26,0x89,0x47,0x2b,0xa1,0x44,0x34,0x26,0x89,0x47 -,0x2d,0xa1,0x46,0x34,0x26,0x89,0x47,0x2f,0x80,0x0e,0x93,0x36,0x20,0xa1,0x88,0x36 -,0x86,0xe0,0x26,0x89,0x47,0x08,0xa1,0x84,0x36,0x86,0xe0,0x26,0x89,0x47,0x0a,0xa1 -,0x80,0x36,0x86,0xe0,0x26,0x89,0x47,0x0c,0xb8,0x60,0xfe,0x86,0xe0,0x26,0x89,0x47 -,0x0e,0xa0,0xa1,0x36,0x26,0x88,0x47,0x10,0x8b,0x36,0x88,0x36,0x26,0xc6,0x44,0x02 -,0xff,0xe5,0x9e,0xa9,0x00,0x08,0x74,0x0c,0xba,0x84,0x00,0xed,0x0d,0x08,0x00,0xef -,0xba,0x8e,0x00,0xef,0xe5,0x02,0x25,0xf9,0xff,0xe7,0x02,0xba,0x10,0x01,0xb8,0x02 -,0x02,0xef,0xed,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x01,0xc3 -,0x80,0x26,0x93,0x36,0x9f,0xe8,0x8d,0x0a,0x80,0x0e,0x92,0x36,0x08,0xb0,0xfe,0xc3 -,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0xc6,0x06,0x2a -,0x34,0xc0,0x26,0x8b,0x47,0x06,0x25,0x7f,0xff,0xa3,0x2c,0x34,0x26,0x8b,0x47,0x08 -,0x25,0xff,0xfe,0x25,0xff,0xfc,0xa3,0x2e,0x34,0xcd,0x52,0xb0,0x00,0xc3,0xf6,0x06 -,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0xc6,0x06,0x00,0x34,0xc0,0x26,0x8b,0x47 -,0x06,0xa3,0x02,0x34,0x26,0x8b,0x47,0x08,0xa3,0x04,0x34,0xcd,0x52,0xb0,0x00,0xc3 -,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0x57,0x8d,0x7f,0x06,0x51,0xb9 -,0x07,0x00,0x33,0xc0,0xf3,0xab,0x59,0x8d,0x7f,0x06,0xa1,0x7a,0x34,0x03,0x06,0x39 -,0x37,0x26,0x88,0x05,0xa1,0x95,0x37,0x26,0x88,0x45,0x02,0xa1,0x80,0x34,0x03,0x06 -,0x76,0x34,0x26,0x88,0x45,0x07,0xa1,0xc6,0x34,0x26,0x88,0x45,0x09,0xa1,0xd8,0x33 -,0x26,0x88,0x45,0x0a,0x33,0xc0,0xa3,0x7a,0x34,0xa3,0x39,0x37,0xa3,0x95,0x37,0xa3 -,0x80,0x34,0xa3,0x76,0x34,0xa3,0xc6,0x34,0xa3,0xd8,0x33,0x5f,0xb0,0x00,0xc3,0xf6 -,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0x26,0x8b,0x4f,0x04,0x83,0xf9,0x06 -,0x74,0x12,0x83,0xf9,0x04,0x74,0x0d,0x83,0xf9,0x00,0x74,0x08,0x83,0xf9,0x02,0x74 -,0x03,0xb0,0x01,0xc3,0x89,0x0e,0xe8,0x3a,0x83,0x26,0xab,0x36,0xf9,0x09,0x0e,0xab -,0x36,0xe5,0x02,0x25,0xf9,0xff,0x0b,0xc1,0xe7,0x02,0xb0,0x00,0xc3,0xf6,0x06,0x93 -,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0x26,0x8b,0x4f,0x04,0x80,0xf9,0xff,0x74,0x08 -,0x80,0xf9,0x00,0x74,0x10,0xb0,0x01,0xc3,0x83,0x0e,0xad,0x36,0x02,0xa1,0xad,0x36 -,0xe7,0x04,0xe9,0x0a,0x00,0x83,0x26,0xad,0x36,0xfd,0xa1,0xad,0x36,0xe7,0x04,0xb0 -,0x00,0xc3,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x04,0xc3,0xe8,0xd5,0x04,0xb0 -,0x00,0xc3,0xf6,0x06,0x93,0x36,0x80,0x75,0x03,0xb0,0x01,0xc3,0x26,0x83,0x7f,0x06 -,0x05,0x75,0x03,0xe9,0x9d,0x00,0x26,0x8b,0x57,0x04,0x26,0x8b,0x47,0x08,0x26,0x81 -,0x7f,0x06,0x00,0x80,0x75,0x08,0xed,0x26,0x89,0x47,0x0a,0xe9,0x9d,0x00,0x26,0x83 -,0x7f,0x06,0x01,0x75,0x04,0xef,0xe9,0x92,0x00,0x26,0x81,0x7f,0x06,0x01,0x80,0x75 -,0x09,0xef,0xed,0x26,0x89,0x47,0x0a,0xe9,0x81,0x00,0x26,0x83,0x7f,0x06,0x02,0x75 -,0x07,0x26,0x21,0x47,0x04,0xe9,0x73,0x00,0x26,0x81,0x7f,0x06,0x02,0x80,0x75,0x0c -,0x26,0x21,0x47,0x04,0xed,0x26,0x89,0x47,0x0a,0xe9,0x5f,0x00,0x26,0x83,0x7f,0x06 -,0x03,0x75,0x07,0x26,0x09,0x47,0x04,0xe9,0x51,0x00,0x26,0x81,0x7f,0x06,0x03,0x80 -,0x75,0x0c,0x26,0x09,0x47,0x04,0xed,0x26,0x89,0x47,0x0a,0xe9,0x3d,0x00,0x26,0x83 -,0x7f,0x06,0x04,0x75,0x07,0x26,0x31,0x47,0x04,0xe9,0x2f,0x00,0x26,0x81,0x7f,0x06 -,0x04,0x80,0x75,0x0c,0x26,0x31,0x47,0x04,0xed,0x26,0x89,0x47,0x0a,0xe9,0x1b,0x00 -,0xb0,0x01,0xc3,0xfa,0x53,0x26,0x8b,0x4f,0x08,0x0b,0xc9,0x74,0x0c,0x8d,0x1e,0xe0 -,0xfe,0xe8,0x52,0xff,0x83,0xc3,0x08,0xe2,0xf8,0x5b,0xfb,0xb0,0x00,0xc3,0xf6,0x06 -,0x93,0x36,0x80,0x75,0x0a,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xb0,0x01,0xc3,0x8d -,0x3e,0xe0,0xfe,0xe5,0x00,0x26,0x89,0x05,0xe5,0x02,0x26,0x89,0x45,0x02,0xa1,0xad -,0x36,0x26,0x89,0x45,0x04,0xe5,0x06,0x26,0x89,0x45,0x06,0xe5,0x08,0x26,0x89,0x45 -,0x08,0xe5,0x0a,0x26,0x89,0x45,0x0a,0xe5,0x0e,0x26,0x89,0x45,0x0c,0xe5,0x48,0x26 -,0x89,0x45,0x0e,0xe5,0x4a,0x26,0x89,0x45,0x10,0xe5,0x4c,0x26,0x89,0x45,0x12,0xa1 -,0xb7,0x36,0x26,0x89,0x45,0x14,0xe5,0x50,0x26,0x89,0x45,0x16,0xe5,0x52,0x26,0x89 -,0x45,0x18,0xe5,0x54,0x26,0x89,0x45,0x1a,0xe5,0x56,0x26,0x89,0x45,0x1c,0xe5,0x58 -,0x26,0x89,0x45,0x1e,0xe5,0x62,0x26,0x89,0x45,0x20,0xe5,0x64,0x26,0x89,0x45,0x22 -,0xe5,0x66,0x26,0x89,0x45,0x24,0xe5,0x68,0x26,0x89,0x45,0x26,0xe5,0x6a,0x26,0x89 -,0x45,0x28,0xe5,0x6c,0x26,0x89,0x45,0x2a,0xe5,0x70,0x26,0x89,0x45,0x2c,0xe5,0x72 -,0x26,0x89,0x45,0x2e,0xe5,0x74,0x26,0x89,0x45,0x30,0xe5,0x76,0x26,0x89,0x45,0x32 -,0xe5,0x7c,0x26,0x89,0x45,0x34,0xe5,0x7e,0x26,0x89,0x45,0x36,0xe5,0x80,0x26,0x89 -,0x45,0x38,0xe5,0x82,0x26,0x89,0x45,0x3a,0xe5,0x86,0x26,0x89,0x45,0x3c,0xe5,0x88 -,0x26,0x89,0x45,0x3e,0xe5,0x9a,0x26,0x89,0x45,0x40,0xe5,0x9e,0x26,0x89,0x45,0x42 -,0xe5,0xcc,0x26,0x89,0x45,0x44,0xe5,0xce,0x26,0x89,0x45,0x46,0xe5,0xd0,0x26,0x89 -,0x45,0x48,0xe5,0xd2,0x26,0x89,0x45,0x4a,0xba,0x00,0x01,0xed,0x11,0x06,0x66,0x34 -,0x73,0x04,0xff,0x06,0x68,0x34,0x26,0x89,0x45,0x4c,0xba,0x02,0x01,0xed,0xc1,0xe0 -,0x02,0x11,0x06,0x6e,0x34,0x73,0x04,0xff,0x06,0x70,0x34,0x26,0x89,0x45,0x4e,0xba -,0x04,0x01,0xed,0x11,0x06,0x6a,0x34,0x73,0x04,0xff,0x06,0x6c,0x34,0x26,0x89,0x45 -,0x50,0xba,0x06,0x01,0xed,0xc1,0xe0,0x02,0x11,0x06,0x72,0x34,0x73,0x04,0xff,0x06 -,0x74,0x34,0x26,0x89,0x45,0x52,0xba,0x08,0x01,0xed,0x26,0x89,0x45,0x54,0xba,0x0a -,0x01,0xed,0x26,0x89,0x45,0x56,0xba,0x0c,0x01,0xed,0x26,0x89,0x45,0x58,0xba,0x0e -,0x01,0xed,0x01,0x06,0x7a,0x34,0x26,0x89,0x45,0x5e,0xba,0x10,0x01,0xed,0x26,0x89 -,0x45,0x5c,0xb0,0x00,0xc3,0xf6,0x06,0x93,0x36,0x80,0x74,0x07,0xf6,0x06,0x93,0x36 -,0x20,0x75,0x03,0xb0,0x01,0xc3,0x26,0x80,0x7f,0x06,0x00,0x75,0x30,0x80,0x3e,0x95 -,0x36,0x00,0x74,0x52,0xc6,0x06,0x95,0x36,0x00,0x83,0x26,0xad,0x36,0xfe,0xa1,0xad -,0x36,0xe7,0x04,0xba,0x10,0x01,0xb8,0x80,0x80,0xef,0xed,0xba,0x10,0x01,0xb8,0x02 -,0x02,0xef,0xed,0xba,0xe0,0x00,0xb8,0x00,0x10,0xef,0xb0,0x00,0xc3,0x26,0x8b,0x47 -,0x04,0x3d,0x00,0x00,0x74,0x20,0x3d,0x03,0x00,0x77,0x1b,0xba,0x10,0x01,0xb8,0x02 -,0x00,0xef,0xba,0xe0,0x00,0xb8,0x01,0x10,0xef,0x83,0x0e,0xad,0x36,0x01,0xa1,0xad -,0x36,0xe7,0x04,0xb0,0x00,0xc3,0xb0,0x06,0xc3,0xf6,0x06,0x93,0x36,0x80,0x75,0x03 -,0xb0,0x01,0xc3,0x26,0x83,0x7f,0x04,0x01,0x74,0x0a,0x26,0x83,0x7f,0x04,0x02,0x74 -,0x19,0xb0,0x06,0xc3,0x26,0x83,0x7f,0x06,0x0c,0x77,0xf6,0x26,0x83,0x7f,0x0a,0x60 -,0x77,0xef,0xe8,0x10,0x00,0x72,0x0b,0xb0,0x46,0xc3,0xe8,0x4e,0x00,0x72,0x03,0xb0 -,0x46,0xc3,0xb0,0x00,0xc3,0x51,0xb1,0x0a,0x8b,0x3e,0x20,0xf3,0x26,0x83,0x7d,0x0c -,0x02,0x75,0x03,0xe9,0x0e,0x00,0xfe,0xc9,0x74,0x07,0x81,0xc7,0x20,0x01,0xe9,0xeb -,0xff,0x59,0xf8,0xc3,0x57,0x8d,0x7d,0x0e,0x8d,0x77,0x06,0xb9,0x12,0x00,0xf3,0xa4 -,0x8d,0x7d,0x20,0x8d,0x36,0xe0,0xfe,0x26,0x8b,0x4d,0x12,0xf3,0xa4,0xff,0x06,0x01 -,0x35,0x5f,0x26,0xc7,0x45,0x0c,0x01,0x00,0x59,0xf9,0xc3,0x51,0xb1,0x0a,0x8d,0x3e -,0x20,0xf3,0x8d,0x36,0xe0,0xfe,0x26,0x83,0x7d,0x0c,0x01,0x75,0x1b,0x57,0xe8,0x25 -,0x00,0x5f,0x73,0x14,0x33,0xc0,0xb9,0x20,0x01,0xf3,0xaa,0x26,0xc7,0x45,0x0c,0x02 -,0x00,0xff,0x0e,0x01,0x35,0x59,0xf9,0xc3,0xfe,0xc9,0x74,0x07,0x81,0xc7,0x20,0x01 -,0xe9,0xd3,0xff,0x59,0xf8,0xc3,0x51,0x26,0x8b,0x4d,0x12,0x8d,0x7d,0x20,0xf3,0xa6 -,0x74,0x03,0x59,0xf8,0xc3,0x59,0xf9,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x80,0x3e,0xec,0x34,0x06,0x72,0x33,0xff,0x06,0xf0,0x33,0x50,0xc4,0x1e,0x8c,0x36 -,0xb9,0x0f,0x00,0x33,0xc0,0xe8,0x29,0x00,0x58,0x81,0x26,0xc2,0x34,0xdf,0x7f,0x81 -,0x26,0xe9,0x34,0xdf,0x7f,0x0b,0xdb,0x74,0x11,0x26,0xc6,0x07,0x84,0x26,0xc6,0x47 -,0x02,0xff,0x26,0x89,0x47,0x06,0xe8,0xac,0x00,0xc3,0xff,0x06,0xea,0x33,0xe9,0xf5 -,0xff,0x57,0x26,0x8b,0x3f,0x03,0xf9,0x26,0x3b,0x7f,0x02,0x74,0x16,0x26,0x3b,0x7f -,0x04,0x7c,0x2a,0x3d,0x00,0x00,0x75,0x13,0x8d,0x7f,0x08,0x03,0xf9,0x26,0x3b,0x7f -,0x02,0x7c,0x14,0xff,0x06,0xde,0x33,0x33,0xdb,0x5f,0xc3,0x26,0x8b,0x7f,0x02,0x26 -,0x89,0x3f,0x03,0xf9,0xe9,0x06,0x00,0x26,0x89,0x3f,0x26,0x29,0x0f,0x26,0xc7,0x05 -,0xff,0xff,0x26,0x87,0x3f,0x26,0x89,0x0d,0x8d,0x5d,0x02,0x50,0x8b,0xfb,0x83,0xe9 -,0x02,0x33,0xc0,0xf3,0xaa,0x58,0xfe,0x0e,0xec,0x34,0x5f,0xc3,0x8b,0x7c,0x02,0x3b -,0x3c,0x74,0x2f,0x83,0x3d,0xff,0x75,0x0b,0x8d,0x7c,0x08,0x89,0x7c,0x02,0x83,0x3d -,0xff,0x74,0x1e,0x8a,0x45,0x02,0x3c,0x81,0x75,0x0c,0x80,0x3e,0xeb,0x34,0x00,0x74 -,0x05,0x33,0xc0,0xe9,0x0b,0x00,0x8b,0x0d,0x01,0x4c,0x02,0x8d,0x75,0x02,0x83,0xe9 -,0x02,0xc3,0x80,0x3e,0xec,0x34,0x06,0x72,0x05,0x33,0xc0,0xe9,0xf3,0xff,0xff,0x06 -,0xee,0x33,0xe9,0xbe,0xff,0xf6,0x06,0x92,0x36,0x40,0x74,0x01,0xc3,0x57,0x56,0x51 -,0x52,0x8b,0x36,0x8c,0x36,0xe8,0xa4,0xff,0x75,0x03,0xe9,0x1a,0x00,0xe9,0x1c,0x00 -,0xfe,0x06,0xec,0x34,0xc4,0x3e,0x80,0x36,0xf3,0xa4,0x80,0x0e,0x92,0x36,0x40,0xba -,0x0c,0x01,0xb8,0x80,0x80,0xef,0xed,0x5a,0x59,0x5e,0x5f,0xc3,0xff,0x06,0xe0,0x33 -,0x80,0x3c,0x81,0x75,0x0c,0xff,0x06,0xe2,0x33,0xc6,0x06,0xeb,0x34,0x01,0xe9,0xcf -,0xff,0x80,0x3c,0x84,0x75,0x07,0xff,0x06,0xe6,0x33,0xe9,0xc3,0xff,0xff,0x06,0xe8 -,0x33,0xe9,0xbc,0xff,0x8d,0x3e,0xe0,0xfe,0xa1,0x72,0x34,0xc7,0x06,0x72,0x34,0x00 -,0x00,0x89,0x05,0xa1,0x74,0x34,0xc7,0x06,0x74,0x34,0x00,0x00,0x89,0x45,0x02,0xba -,0x04,0x01,0xed,0x89,0x45,0x04,0xc7,0x45,0x06,0x00,0x00,0xa1,0x6e,0x34,0xc7,0x06 -,0x6e,0x34,0x00,0x00,0x89,0x45,0x08,0xa1,0x70,0x34,0xc7,0x06,0x70,0x34,0x00,0x00 -,0x89,0x45,0x0a,0xba,0x00,0x01,0xed,0x89,0x45,0x0c,0xc7,0x45,0x0e,0x00,0x00,0x32 -,0xe4,0xba,0x0e,0x01,0xec,0x89,0x45,0x10,0xa1,0x7e,0x34,0xc7,0x06,0x7e,0x34,0x00 -,0x00,0x89,0x45,0x12,0xa1,0x8c,0x34,0xc7,0x06,0x8c,0x34,0x00,0x00,0x89,0x45,0x14 -,0xa1,0x8a,0x34,0xc7,0x06,0x8a,0x34,0x00,0x00,0x89,0x45,0x16,0xa1,0x7c,0x34,0xc7 -,0x06,0x7c,0x34,0x00,0x00,0x89,0x45,0x18,0xa1,0x88,0x34,0xc7,0x06,0x88,0x34,0x00 -,0x00,0x89,0x45,0x1a,0xa1,0xca,0x33,0xc7,0x06,0xca,0x33,0x00,0x00,0x89,0x45,0x1c -,0xa1,0x78,0x34,0xc7,0x06,0x78,0x34,0x00,0x00,0x89,0x45,0x1e,0xa1,0xc6,0x34,0xc7 -,0x06,0xc6,0x34,0x00,0x00,0x89,0x45,0x20,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0xfa,0x33,0xc0,0x8e,0xd8,0x8e,0xc0,0xb8,0xa0,0x01,0xc1,0xe8,0x04,0x8e,0xd0,0x8d -,0x26,0x80,0x00,0xe8,0x00,0x01,0xe8,0x10,0xeb,0x8b,0x1e,0xf7,0x34,0x8b,0x16,0xf9 -,0x34,0x8b,0x36,0xff,0x34,0x33,0xc0,0xb9,0xef,0xff,0x8d,0x3e,0x14,0x00,0x2b,0xcf -,0x2b,0xce,0xd1,0xe9,0xf3,0xab,0x89,0x1e,0xf7,0x34,0x89,0x16,0xf9,0x34,0x83,0xfe -,0x00,0x74,0x0c,0xb9,0xef,0xff,0xbf,0x80,0xfe,0x2b,0xcf,0xd1,0xe9,0xf3,0xab,0xb9 -,0xff,0xff,0x81,0xe9,0x00,0x3b,0x83,0xfe,0x00,0x74,0x03,0xe9,0x1b,0x00,0x51,0x1e -,0xb8,0x00,0xe0,0x8e,0xd8,0x33,0xf6,0x8d,0x3e,0x00,0xd8,0xb9,0x00,0x0c,0xf3,0xa5 -,0x1f,0x59,0xbe,0xff,0xff,0x81,0xee,0x00,0xd8,0x2b,0xce,0x81,0xe1,0x00,0xff,0x89 -,0x0e,0xac,0x33,0x8d,0x06,0x20,0x02,0xc1,0xe8,0x04,0xa3,0x32,0x34,0x8e,0xd0,0x36 -,0xc7,0x06,0x1e,0x00,0x80,0x18,0x36,0xc7,0x06,0x22,0x00,0xff,0x7f,0x36,0xc7,0x06 -,0x0a,0x00,0xff,0xff,0x36,0xc7,0x06,0x1c,0x00,0x80,0x00,0x8d,0x06,0xa0,0x02,0xc1 -,0xe8,0x04,0xa3,0x30,0x34,0x8e,0xd0,0x36,0xc7,0x06,0x1e,0x00,0x50,0x28,0x36,0xc7 -,0x06,0x0a,0x00,0xff,0xff,0x36,0xc7,0x06,0x1c,0x00,0x80,0x00,0xb8,0xa0,0x01,0xc1 -,0xe8,0x04,0xa3,0x34,0x34,0xa3,0xf2,0x33,0x8e,0xd0,0x8d,0x26,0x80,0x00,0xb8,0x00 -,0x90,0xe7,0x02,0x8d,0x3e,0x70,0x01,0x8b,0xc7,0xc1,0xe8,0x04,0xb9,0x03,0x00,0x89 -,0x45,0x0e,0x89,0x45,0x02,0xc7,0x05,0xff,0xff,0x83,0xc7,0x10,0x05,0x01,0x00,0xe2 -,0xee,0xe8,0x5b,0x01,0xe5,0xce,0xa3,0xb5,0x36,0xe8,0x21,0x00,0xe8,0x45,0x01,0xa1 -,0x32,0x34,0x8c,0xcb,0xcd,0x37,0x0e,0x58,0xa9,0x00,0xf0,0x74,0x07,0x33,0xf6,0x89 -,0x36,0xff,0x34,0xc3,0x8d,0x36,0x30,0x61,0x89,0x36,0xff,0x34,0xc3,0x33,0xc0,0x8b -,0xd0,0x8b,0xf2,0xb9,0x68,0x00,0x2e,0x80,0xbc,0xac,0x17,0x80,0x75,0x01,0xef,0x83 -,0xc2,0x02,0x46,0xe2,0xf1,0xb8,0x02,0x00,0xe7,0x50,0xb9,0x5a,0x00,0x33,0xff,0xc7 -,0x05,0x65,0x18,0x8c,0x4d,0x02,0x83,0xc7,0x04,0xe2,0xf4,0x33,0xc0,0x8e,0xc0,0x8c -,0xc8,0x8e,0xd8,0x8d,0x3e,0x80,0x00,0x8d,0x36,0x9c,0x17,0xb9,0x08,0x00,0xe8,0x37 -,0x00,0x8d,0x36,0x20,0x21,0x8d,0x3e,0xc0,0x00,0xb9,0x0d,0x00,0xe8,0x29,0x00,0x8d -,0x3e,0x40,0x01,0xb9,0x0a,0x00,0xe8,0x1f,0x00,0xe8,0x4b,0x0e,0x33,0xc0,0x8e,0xd8 -,0xc7,0x06,0x4e,0x37,0x6f,0x17,0xe7,0x48,0xe7,0x4c,0xb8,0x40,0x9c,0xe7,0x4a,0xe5 -,0x48,0x90,0xb8,0x00,0x70,0xe7,0x48,0xc3,0xa5,0x83,0xc7,0x02,0xe2,0xfa,0xc3,0xe5 -,0x4c,0xc3,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe5,0x58,0xd1 -,0xe0,0x73,0x11,0x8b,0xf0,0xd1,0xe6,0x33,0xc0,0x8e,0xd8,0x8b,0xb4,0x80,0x00,0x83 -,0xc6,0x0b,0xff,0xe6,0x1f,0x07,0x5a,0x5f,0x5e,0x59,0x58,0xcf,0x58,0x1c,0xe4,0x1c -,0x6c,0x1c,0x8e,0x1a,0xc0,0x1f,0x40,0x1a,0x44,0x1c,0x65,0x18,0x80,0x80,0x80,0xff -,0x80,0x03,0x02,0x80,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff -,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff -,0x80,0x03,0x03,0x43,0x80,0x80,0x02,0x80,0x42,0x03,0x02,0xff,0x03,0x01,0x03,0x01 -,0x01,0x03,0x02,0x03,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x02,0x03,0x01,0x03 -,0x03,0xff,0x01,0x01,0xff,0x01,0xff,0x01,0x01,0x03,0x03,0x03,0xff,0xff,0xff,0xff -,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff -,0xff,0xff,0xff,0x02,0xb8,0x0f,0x00,0xe7,0x84,0xb8,0x0f,0xf8,0xe7,0x82,0xc3,0xb9 -,0x08,0x00,0x89,0x0e,0xe6,0x3a,0x8d,0x06,0x20,0x03,0x8b,0xd0,0xc1,0xe8,0x04,0xa3 -,0x90,0x01,0x8b,0xc2,0x8b,0xd8,0xc1,0xe8,0x04,0x8e,0xc0,0x05,0x61,0x00,0x26,0xa3 -,0x00,0x00,0xa1,0x30,0x34,0x26,0xa3,0x02,0x00,0x83,0xc3,0x14,0xd1,0xeb,0x26,0x89 -,0x1e,0x08,0x00,0x81,0xc2,0x10,0x06,0xe2,0xd9,0x26,0xc7,0x06,0x00,0x00,0xff,0xff -,0x8c,0x06,0x92,0x01,0xc3,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8 -,0xe7,0x5a,0xff,0x06,0xbe,0x33,0xba,0xd2,0x00,0xed,0xcf,0x00,0x00,0x00,0x00,0x00 -,0x8c,0xcb,0xa1,0x30,0x34,0xcd,0x37,0xe9,0x06,0xed,0xb8,0x32,0x00,0xc3,0xe8,0x8c -,0x01,0xfe,0x06,0xe2,0x34,0xe8,0x21,0x01,0x75,0xf0,0xe8,0x53,0x0e,0x81,0x0e,0xaf -,0x36,0x00,0xc0,0xc7,0x06,0xad,0x36,0x60,0x00,0xf7,0x06,0xe6,0x34,0x80,0x00,0x75 -,0x1a,0xf7,0x06,0xe6,0x34,0x00,0x08,0x74,0x09,0xc7,0x06,0xab,0x36,0x0b,0x00,0xe9 -,0x0f,0x00,0xc7,0x06,0xab,0x36,0x03,0x00,0xe9,0x06,0x00,0xc7,0x06,0xab,0x36,0x11 -,0x9c,0xc7,0x06,0xa9,0x36,0x18,0x00,0xf7,0x06,0xe6,0x34,0x80,0x00,0x75,0x0d,0xf7 -,0x06,0xb5,0x36,0x02,0x00,0x74,0x05,0x83,0x0e,0xa9,0x36,0x20,0xa1,0xa9,0x36,0xe7 -,0x00,0xa1,0xab,0x36,0xe7,0x02,0xf7,0x06,0xe6,0x34,0x80,0x00,0x74,0x2e,0xe8,0xf2 -,0x2f,0x33,0xc0,0x0d,0x41,0x00,0xe7,0x56,0xa1,0xb1,0x36,0x0d,0x00,0x10,0xe7,0x08 -,0xa1,0xb3,0x36,0xe7,0x0a,0xa1,0xaf,0x36,0xe7,0x06,0xb8,0x40,0x00,0xe7,0x4e,0x33 -,0xc0,0xe7,0x0e,0xc7,0x06,0x26,0x02,0x00,0x00,0xe9,0x23,0x00,0xc7,0x06,0x4e,0x37 -,0x3f,0x20,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x80,0x74,0x07,0x26 -,0x81,0x0e,0x08,0x00,0x00,0x80,0xc6,0x06,0xe0,0x34,0x01,0xb8,0x00,0x00,0xc3,0xfe -,0x06,0xe1,0x34,0xc6,0x06,0xe0,0x34,0x00,0xa1,0x26,0x02,0x0b,0xc0,0x74,0x01,0xc3 -,0xe8,0x04,0x00,0xb8,0x00,0x00,0xc3,0xa1,0xa9,0x36,0xe7,0x00,0x8b,0x1e,0xab,0x36 -,0x83,0xe3,0x06,0xe5,0x02,0x25,0xf9,0xff,0x0b,0xc3,0x0d,0x10,0x00,0xe7,0x02,0xa1 -,0xad,0x36,0xe7,0x04,0xc3,0xb8,0x0a,0x00,0xe7,0x84,0xfe,0x06,0xe5,0x34,0xc6,0x06 -,0xe3,0x34,0x01,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x40,0x74,0x07 -,0x26,0x81,0x0e,0x08,0x00,0x00,0x40,0xc3,0xc7,0x06,0x4e,0x37,0x6f,0x17,0xfe,0x06 -,0xe4,0x34,0xc6,0x06,0xe3,0x34,0x00,0xc3,0xc3,0xf6,0x06,0x18,0x34,0x80,0x75,0x0d -,0xa1,0x18,0x34,0x0b,0x06,0x1a,0x34,0x0b,0x06,0x1c,0x34,0x75,0x01,0xc3,0xa1,0x2e -,0x34,0x25,0xff,0xfe,0x8b,0x16,0xe7,0x36,0x81,0xe2,0x00,0x01,0x0b,0xc2,0xa3,0x2e -,0x34,0x8d,0x16,0x10,0x00,0xbf,0x00,0x00,0xb9,0x08,0x00,0x8b,0x85,0x00,0x34,0xef -,0x83,0xc2,0x10,0x8b,0x85,0x02,0x34,0xef,0x83,0xc2,0x10,0x8b,0x85,0x04,0x34,0xef -,0x83,0xc2,0xe2,0x83,0xc7,0x06,0x49,0x75,0xe2,0xb8,0x00,0x00,0x8e,0xc0,0xbe,0x00 -,0x34,0xbf,0xb9,0x36,0xb9,0x18,0x00,0xf3,0xa5,0xb8,0x00,0x00,0xc3,0x33,0xc0,0x8e -,0xc0,0x8d,0x3e,0xb0,0x33,0xb9,0x08,0x00,0xf3,0xab,0x8d,0x3e,0x3e,0x34,0xb9,0x03 -,0x00,0xf3,0xab,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xba -,0x33,0xe5,0x56,0x0d,0x20,0x00,0xe7,0x56,0xba,0x7a,0x00,0xed,0x08,0x26,0x94,0x36 -,0x33,0xc0,0xb1,0x08,0x32,0xed,0x06,0x8e,0xc0,0x8d,0x3e,0xe0,0xff,0xf3,0xaa,0x8e -,0x06,0x32,0x34,0x26,0x81,0x0e,0x08,0x00,0x00,0x02,0x07,0xe5,0x56,0x25,0xdf,0xff -,0xe7,0x56,0xe9,0xf8,0xfc,0x00,0xbd,0x1b,0x10,0x1b,0xd9,0x1a,0xf3,0x1a,0x50,0x51 -,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb6,0x33,0x53 -,0x06,0x51,0xe5,0x80,0xa3,0xb4,0x33,0x8b,0xd8,0x8b,0xc8,0x25,0x10,0x00,0xa3,0xed -,0x34,0x0b,0xc0,0x74,0x14,0xff,0x06,0x80,0x34,0x80,0x3e,0xfe,0x34,0x00,0x74,0x03 -,0xe9,0x06,0x00,0xb8,0x80,0x00,0xe8,0x9d,0x04,0x83,0xe3,0x03,0xd1,0xe3,0x2e,0xff -,0x97,0x86,0x1a,0x59,0x07,0x5b,0xe9,0xa4,0xfc,0xba,0x20,0x00,0x8e,0x06,0x3c,0x34 -,0x83,0x3e,0x3c,0x34,0x00,0x75,0x03,0xe9,0xf0,0x00,0xc7,0x06,0x3c,0x34,0x00,0x00 -,0xe9,0x2a,0x00,0xba,0x10,0x00,0x8e,0x06,0x3a,0x34,0x83,0x3e,0x3a,0x34,0x00,0x75 -,0x03,0xe9,0xd5,0xff,0xc7,0x06,0x3a,0x34,0x00,0x00,0xe8,0x10,0x00,0xe9,0xc9,0xff -,0xba,0x10,0x00,0x8e,0x06,0x3a,0x34,0xc7,0x06,0x3a,0x34,0x00,0x00,0x26,0xa1,0x14 -,0x00,0x26,0xa3,0x0c,0x00,0x26,0xa1,0x16,0x00,0x26,0xa3,0x0e,0x00,0x26,0xc6,0x06 -,0x0a,0x00,0x00,0xc1,0xea,0x02,0x23,0xd1,0x74,0x1c,0xba,0x20,0x00,0x26,0xc7,0x06 -,0x0e,0x00,0xea,0x05,0x26,0x0b,0x16,0x0c,0x00,0x26,0x89,0x16,0x0c,0x00,0xff,0x06 -,0x86,0x34,0xff,0x06,0xdc,0x33,0x26,0xa1,0x0c,0x00,0xa9,0x00,0x37,0x74,0x16,0x26 -,0xc6,0x06,0x0a,0x00,0x02,0xa9,0x00,0x30,0x74,0x04,0xff,0x06,0x7a,0x34,0xff,0x06 -,0xda,0x33,0xe9,0x49,0x00,0xc0,0xec,0x07,0x83,0x16,0x8a,0x34,0x00,0x24,0x07,0x3c -,0x07,0x75,0x04,0xff,0x06,0x8c,0x34,0xff,0x06,0x7e,0x34,0xa1,0x30,0x34,0x8c,0xc3 -,0x8e,0xc0,0x8e,0xdb,0x26,0x83,0x0e,0x08,0x00,0x40,0x8c,0xd8,0x26,0x87,0x06,0x16 -,0x00,0x26,0x83,0x3e,0x14,0x00,0xff,0x74,0x0a,0x8e,0xc0,0x26,0x8c,0x1e,0x00,0x00 -,0xe9,0x05,0x00,0x26,0x8c,0x1e,0x14,0x00,0x33,0xc0,0x8e,0xd8,0xc3,0xc3,0x8c,0xc0 -,0x87,0x06,0x92,0x01,0x3d,0xff,0xff,0x74,0x0d,0x8e,0xd8,0x8c,0x06,0x00,0x00,0x33 -,0xc0,0x8e,0xd8,0xe9,0x04,0x00,0x8c,0x06,0x90,0x01,0xe8,0x01,0x00,0xc3,0x06,0x83 -,0x3e,0x90,0x01,0xff,0x74,0x29,0x83,0x3e,0x3a,0x34,0x00,0x75,0x11,0xba,0x86,0x00 -,0xe8,0x1e,0x00,0x8c,0x06,0x3a,0x34,0x83,0x3e,0x90,0x01,0xff,0x74,0x11,0x83,0x3e -,0x3c,0x34,0x00,0x75,0x0a,0xba,0x88,0x00,0xe8,0x06,0x00,0x8c,0x06,0x3c,0x34,0x07 -,0xc3,0xa1,0x90,0x01,0x8e,0xc0,0x26,0xa1,0x08,0x00,0xef,0x26,0xa1,0x00,0x00,0x26 -,0xc7,0x06,0x00,0x00,0xff,0xff,0xa3,0x90,0x01,0x3d,0xff,0xff,0x75,0x03,0xa3,0x92 -,0x01,0x83,0x3e,0xed,0x34,0x00,0x74,0x0b,0xb8,0x10,0x00,0xe7,0x84,0xc7,0x06,0xed -,0x34,0x00,0x00,0xc3,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7 -,0x5a,0xff,0x06,0xbc,0x33,0xe9,0x25,0xfb,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33 -,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb0,0x33,0xe9,0x11,0xfb,0x50,0x51,0x56,0x57 -,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb4,0x33,0x06,0xff,0x06 -,0x76,0x34,0x80,0x3e,0xfe,0x34,0x00,0x74,0x04,0x07,0xe9,0xf0,0xfa,0xb8,0x80,0x00 -,0xe8,0xd3,0x02,0x07,0xe9,0xe6,0xfa,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0xc6,0x1d,0x08,0x1d,0x91,0x1e,0x5d,0x1e,0x73,0x1e,0x89,0x1e,0x91,0x1e,0xa8,0x1d -,0x91,0x1e,0x91,0x1e,0xaf,0x1e,0xaf,0x1e,0x15,0x1d,0x15,0x1d,0x91,0x1e,0x99,0x1f -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x02,0x00,0x00 -,0x00,0x01,0x00,0x10,0x00,0x01,0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00 -,0x07,0xe9,0x99,0xfa,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7 -,0x5a,0xff,0x06,0xb2,0x33,0x06,0x68,0xf6,0x1c,0xe5,0x06,0xa3,0xb2,0x33,0x8b,0xf0 -,0x83,0xe6,0x1e,0x2e,0xff,0xa4,0xa0,0x1c,0xe5,0x0c,0xa9,0x80,0x00,0x74,0x06,0xe8 -,0xa4,0x01,0xe5,0x06,0xc3,0x53,0xe5,0x0c,0x8b,0xd8,0xa9,0x01,0x00,0x74,0x14,0x83 -,0x3e,0xe0,0x3a,0x00,0x74,0x0d,0x8e,0x06,0x38,0x34,0xe8,0xbf,0x06,0xc7,0x06,0xe0 -,0x3a,0x00,0x00,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00,0xe5,0x02,0x0d,0x11,0x00,0xe7 -,0x02,0x8b,0xc3,0x5b,0xa9,0x01,0x00,0x74,0x01,0xc3,0x8b,0xd0,0xb8,0x00,0x08,0xe7 -,0x84,0x8b,0xc2,0x8e,0x06,0x38,0x34,0x26,0xa3,0x0c,0x00,0x8b,0xd0,0xc1,0xe0,0x03 -,0x83,0x16,0x88,0x34,0x00,0xff,0x06,0x7c,0x34,0x26,0x83,0x3e,0x06,0x00,0x0a,0x75 -,0x21,0x8b,0xc2,0x25,0x40,0x18,0x3d,0x40,0x00,0x74,0x0c,0x3d,0x00,0x10,0x75,0x12 -,0x26,0xfe,0x0e,0x0a,0x00,0x74,0x0b,0xf7,0x06,0xef,0x34,0x20,0x00,0x75,0x03,0xe9 -,0x5a,0x06,0x8c,0xc0,0x26,0x8e,0x06,0x02,0x00,0x26,0x83,0x0e,0x08,0x00,0x20,0x26 -,0xa3,0x12,0x00,0x26,0xa3,0x10,0x00,0xc3,0xff,0x06,0xc4,0x33,0xe5,0x0c,0xa9,0x01 -,0x00,0x75,0x01,0xc3,0xa9,0xf0,0x07,0x74,0x01,0xc3,0xff,0x06,0xd4,0x33,0xe5,0x00 -,0x0d,0x18,0x00,0xe7,0x00,0xc3,0xff,0x06,0xca,0x33,0x80,0x3e,0xa0,0x36,0x08,0x75 -,0x14,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x08,0x74,0x07,0x26,0x81 -,0x0e,0x08,0x00,0x00,0x08,0xe5,0x82,0x25,0xfd,0xff,0xe7,0x82,0xe5,0x0c,0x50,0xe5 -,0x80,0x25,0x00,0x07,0xa3,0xe4,0x3a,0xe5,0x8c,0x25,0x00,0x80,0xa3,0xe2,0x3a,0x58 -,0xa9,0x02,0x00,0x75,0x25,0x83,0x3e,0xe2,0x3a,0x00,0x75,0x1e,0x83,0x3e,0xe4,0x3a -,0x00,0x75,0x17,0xe5,0x08,0x0d,0x00,0x04,0x25,0xff,0x04,0xe7,0x08,0xe8,0x6a,0x01 -,0xe5,0x82,0x0d,0x02,0x00,0xe7,0x82,0xe9,0x21,0x00,0xe8,0x1a,0x06,0x80,0x3e,0xe8 -,0xff,0x00,0x74,0x0a,0x80,0x3e,0xe8,0xff,0x04,0x74,0x03,0xe9,0x0d,0x00,0xc6,0x06 -,0xe8,0xff,0x01,0xba,0x0c,0x01,0xb8,0x08,0x08,0xef,0xed,0x80,0x3e,0x9f,0x36,0x06 -,0x75,0x05,0x83,0x0e,0x99,0x36,0x40,0xb8,0x00,0x01,0xe9,0x09,0x01,0xff,0x06,0xcc -,0x33,0x81,0x26,0xaf,0x36,0xff,0xf7,0xa1,0xaf,0x36,0xe7,0x06,0xff,0x06,0xc6,0x34 -,0xe9,0x1e,0x00,0xff,0x06,0xce,0x33,0xff,0x06,0x95,0x37,0x81,0x26,0xaf,0x36,0xff -,0xef,0xa1,0xaf,0x36,0xe7,0x06,0xe9,0x08,0x00,0xff,0x06,0xd0,0x33,0xff,0x06,0x7a -,0x34,0xff,0x06,0xd2,0x33,0xd1,0xe6,0x8e,0x06,0x30,0x34,0x2e,0x8b,0x84,0xc0,0x1c -,0x26,0x09,0x06,0x08,0x00,0x2e,0x8b,0x84,0xc2,0x1c,0x09,0x06,0x66,0x37,0xc3,0xe5 -,0x0c,0xa9,0x80,0x00,0x74,0x56,0x50,0xe8,0xf0,0x00,0x58,0xa9,0x00,0x01,0x75,0x07 -,0xff,0x06,0xc6,0x33,0xe9,0x08,0x00,0xff,0x06,0x78,0x34,0xff,0x06,0xc8,0x33,0xe5 -,0x82,0x25,0xfd,0xff,0xe7,0x82,0xe8,0x6e,0x05,0xba,0x10,0x01,0xed,0x80,0x3e,0xe8 -,0xff,0x00,0x74,0x0a,0x80,0x3e,0xe8,0xff,0x04,0x74,0x03,0xe9,0x1d,0x00,0xc6,0x06 -,0xe8,0xff,0x01,0xba,0x0c,0x01,0xb8,0x08,0x08,0xef,0xed,0xe9,0x0d,0x00,0xc6,0x06 -,0xe8,0xff,0x03,0xba,0x0c,0x01,0xb8,0x08,0x08,0xef,0xed,0xc3,0xa9,0x01,0x00,0x74 -,0x1c,0xe8,0x2c,0x00,0x83,0x3e,0xe0,0x3a,0x00,0x74,0x0f,0x06,0x8e,0x06,0x38,0x34 -,0xe8,0xc9,0x04,0xc7,0x06,0xe0,0x3a,0x00,0x00,0x07,0xe9,0x5d,0x00,0x8b,0xd0,0x8e -,0x06,0x38,0x34,0x26,0xa3,0x0c,0x00,0xe8,0x06,0x00,0x68,0x69,0x1d,0xe9,0x4a,0x00 -,0xa9,0x00,0x04,0x74,0x0a,0xb8,0x00,0x04,0xff,0x06,0xd8,0x33,0xe9,0x17,0x00,0xa9 -,0x00,0x01,0x74,0x0a,0xff,0x06,0x39,0x37,0xb8,0x00,0x01,0xe9,0x08,0x00,0xa9,0x10 -,0x00,0xb8,0x10,0x00,0x74,0x1d,0x09,0x06,0x66,0x37,0x8c,0xc0,0x8e,0x06,0x30,0x34 -,0x26,0xf7,0x06,0x0a,0x00,0x00,0x01,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x01 -,0x8e,0xc0,0xc3,0xff,0x06,0xc2,0x33,0xe9,0xf8,0xff,0xe5,0x00,0x0d,0x18,0x00,0xe7 -,0x00,0xe5,0x02,0x0d,0x11,0x00,0xe7,0x02,0xc3,0x58,0xe9,0x43,0xfd,0xe5,0x08,0x0d -,0x00,0x04,0x25,0xff,0x04,0xe7,0x08,0xe9,0xe0,0xff,0xe5,0x0e,0xa9,0x00,0x08,0x75 -,0x01,0xc3,0xe9,0xf5,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x50,0x51,0x56,0x57,0x52,0x06,0x1e,0x33,0xc0,0x8e,0xd8,0xe7,0x5a,0xff,0x06,0xb8 -,0x33,0xe5,0x48,0x06,0x53,0x57,0xff,0x16,0x4e,0x37,0x5f,0x5b,0x83,0x3e,0x80,0x01 -,0xff,0x74,0x58,0x8e,0x06,0x80,0x01,0x26,0xff,0x0e,0x08,0x00,0x75,0x4d,0x26,0xa1 -,0x00,0x00,0xa3,0x80,0x01,0x26,0xc7,0x06,0x00,0x00,0xff,0xff,0x8c,0xc0,0x26,0x8e -,0x06,0x02,0x00,0x26,0x81,0x0e,0x08,0x00,0x80,0x00,0x8b,0xd0,0x26,0x87,0x06,0x1a -,0x00,0x26,0x83,0x3e,0x18,0x00,0xff,0x74,0x0a,0x8e,0xc0,0x26,0x89,0x16,0x00,0x00 -,0xe9,0x05,0x00,0x26,0x89,0x16,0x18,0x00,0x83,0x3e,0x80,0x01,0xff,0x74,0x0c,0x8e -,0x06,0x80,0x01,0x26,0x83,0x3e,0x08,0x00,0x00,0x74,0xb3,0x07,0xe9,0x3e,0xf7,0xe5 -,0x4c,0x90,0xe5,0x02,0xa9,0x00,0x20,0x74,0x0d,0x25,0xff,0xdf,0x0d,0x01,0x00,0xe7 -,0x02,0x0d,0x00,0x20,0xe7,0x02,0xe5,0x0a,0x8b,0xd8,0xa3,0xf4,0x33,0x25,0xc3,0x57 -,0x0d,0x00,0x10,0xe7,0x0a,0xf7,0x06,0x9b,0x36,0x00,0x80,0x74,0x37,0xf7,0xc3,0x00 -,0x80,0x74,0x06,0xf7,0xc3,0x00,0x08,0x74,0x5d,0x81,0x26,0xc2,0x34,0x7f,0xff,0xc7 -,0x06,0x35,0x37,0x05,0x00,0xb8,0x80,0x03,0xcd,0x39,0x81,0x26,0x9b,0x36,0xff,0x7f -,0xc7,0x06,0x0f,0x37,0x04,0x00,0xf7,0x06,0x9b,0x36,0x40,0x00,0x75,0x06,0xc7,0x06 -,0x0f,0x37,0x03,0x00,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x2a,0xf7,0xc3,0x00,0x08 -,0x74,0x24,0x80,0x3e,0x9d,0x36,0x06,0x7c,0x1d,0xff,0x06,0x94,0x34,0x83,0x0e,0x66 -,0x37,0x20,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x01,0x74,0x07,0x26 -,0x81,0x0e,0x08,0x00,0x00,0x01,0xf7,0xc3,0x00,0x20,0x75,0x3b,0xf7,0x06,0x9a,0x37 -,0x80,0x00,0x74,0x0b,0xff,0x06,0x89,0x37,0x33,0xc0,0xe7,0x0e,0xe9,0x04,0x00,0xff -,0x06,0x3b,0x37,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x1c,0x80,0x26,0x9e,0x36,0xff -,0x75,0x15,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x08,0x74,0x07,0x26 -,0x81,0x0e,0x08,0x00,0x00,0x08,0xc3,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x02,0x23,0x02,0x23,0x02,0x23,0x02,0x23,0x03,0x23,0xdd,0x22,0x02,0x23,0xfd,0x21 -,0x02,0x23,0xa4,0x24,0xf3,0x24,0x02,0x23,0x8d,0x22,0x7a,0x23,0x02,0x23,0x97,0x24 -,0x1b,0x24,0x75,0x24,0x02,0x23,0x02,0x23,0x8e,0x25,0xfb,0x8e,0x06,0x7e,0x01,0xfb -,0x26,0x83,0x3e,0x00,0x00,0xff,0x74,0xf2,0x26,0x8e,0x06,0x00,0x00,0xfa,0x26,0x8b -,0x1e,0x08,0x00,0x26,0x23,0x1e,0x0a,0x00,0x74,0xe5,0x8c,0xc0,0x8e,0xd0,0x26,0x8b -,0x26,0x02,0x00,0x8c,0x16,0xf2,0x33,0x22,0xff,0x75,0x6a,0x26,0xa1,0x1c,0x00,0x8a -,0xe3,0x8a,0xdc,0x22,0xd8,0x75,0x0d,0xd0,0xe8,0x24,0xf8,0x0a,0xc0,0x75,0xf2,0xb0 -,0x80,0xe9,0xed,0xff,0xd0,0xe8,0x24,0xf8,0x0a,0xc0,0x75,0x02,0xb0,0x80,0x32,0xe4 -,0x26,0xa3,0x1c,0x00,0xf7,0xc3,0x08,0x00,0x75,0x47,0x2e,0x8a,0x9f,0xc5,0x25,0x2e -,0x8b,0xbf,0xc5,0x26,0x80,0xc3,0x10,0x26,0x8e,0x1d,0x26,0x8c,0x1e,0x06,0x00,0x8b -,0x16,0x00,0x00,0xc7,0x06,0x00,0x00,0xff,0xff,0x26,0x89,0x15,0x83,0xfa,0xff,0x75 -,0x0a,0x2e,0x8b,0x97,0xcd,0x26,0x26,0x21,0x16,0x08,0x00,0x33,0xc0,0x8e,0xd8,0x26 -,0x89,0x1e,0x04,0x00,0xc3,0x8a,0xdf,0xb7,0x00,0x2e,0x8a,0x9f,0xc5,0x25,0xe9,0xe0 -,0xff,0x26,0x83,0x26,0x08,0x00,0xf7,0x83,0xc3,0x10,0xe9,0xde,0xff,0x60,0x06,0x1e -,0x68,0x87,0x25,0x6a,0x00,0x1f,0x8e,0x06,0xf2,0x33,0x8b,0x0e,0x34,0x34,0x39,0x0e -,0xf2,0x33,0x74,0x0e,0x26,0x81,0x0e,0x0a,0x00,0x00,0x02,0x26,0x81,0x0e,0x08,0x00 -,0x00,0x02,0x26,0x89,0x26,0x02,0x00,0xa3,0xf2,0x33,0x8e,0xd0,0x8d,0x26,0x80,0x00 -,0x36,0x89,0x26,0x02,0x00,0x36,0x89,0x1e,0x20,0x00,0x36,0xc7,0x06,0x08,0x00,0x00 -,0x00,0xb9,0x04,0x00,0xbe,0x00,0x00,0x2e,0x8b,0xbc,0xc5,0x26,0x36,0xc7,0x05,0xff -,0xff,0x36,0xc7,0x45,0x02,0xff,0xff,0x83,0xc6,0x02,0xe2,0xeb,0x8e,0x06,0x7e,0x01 -,0x36,0x8b,0x0e,0x22,0x00,0x8c,0xc0,0x26,0x83,0x3e,0x00,0x00,0xff,0x26,0x8e,0x06 -,0x00,0x00,0x74,0x07,0x26,0x3b,0x0e,0x22,0x00,0x7d,0xea,0x36,0x8c,0x06,0x00,0x00 -,0x8e,0xc0,0x26,0x8c,0x16,0x00,0x00,0xfb,0x36,0xff,0x2e,0x1e,0x00,0x06,0x1e,0x68 -,0x8b,0x25,0x6a,0x00,0x1f,0x26,0x09,0x36,0x08,0x00,0xf7,0xc6,0x00,0xff,0x74,0x01 -,0xc3,0x56,0x52,0x2e,0x8b,0xb4,0xc5,0x25,0x81,0xe6,0xff,0x00,0x2e,0x8b,0xb4,0xc5 -,0x26,0x8c,0xc2,0x8e,0xc0,0x26,0xc7,0x06,0x00,0x00,0xff,0xff,0x8e,0xc2,0x26,0x83 -,0x3c,0xff,0x74,0x0f,0x8b,0xd0,0x26,0x87,0x54,0x02,0x8e,0xc2,0x26,0xa3,0x00,0x00 -,0xe9,0x07,0x00,0x26,0x89,0x44,0x02,0x26,0x89,0x04,0x5a,0x5e,0xc3,0x06,0x1e,0x68 -,0x8b,0x25,0x6a,0x00,0x1f,0x8e,0x06,0xf2,0x33,0x26,0xa3,0x0a,0x00,0x26,0x89,0x26 -,0x02,0x00,0xa1,0x34,0x34,0x8e,0xd0,0x8d,0x26,0x80,0x00,0x8c,0x16,0xf2,0x33,0xe9 -,0x4d,0xfe,0xcf,0x50,0x1e,0x52,0x53,0x33,0xc0,0x8e,0xd8,0x26,0x83,0x3e,0x04,0x00 -,0xff,0x26,0xc7,0x06,0x04,0x00,0x00,0x00,0x74,0x03,0xe9,0x1a,0x00,0x83,0x3e,0xe6 -,0x3a,0x02,0x76,0x13,0xff,0x06,0xd6,0x33,0x8c,0xc0,0x8e,0x06,0x32,0x34,0xbe,0x40 -,0x00,0x68,0x3a,0x23,0xe9,0x5e,0xff,0xe8,0x84,0xf8,0x5b,0x5a,0x1f,0x58,0xcf,0xe8 -,0xe1,0x00,0x26,0xc6,0x06,0x18,0x00,0x10,0x26,0x8a,0x1e,0x29,0x00,0x88,0x1e,0x1b -,0x37,0x26,0xc7,0x06,0x0c,0x00,0xff,0x7f,0x26,0xa1,0x0e,0x00,0xe7,0x9c,0x26,0xa1 -,0x08,0x00,0xe7,0x9a,0xe5,0x00,0x80,0xfb,0x08,0x74,0x09,0x0d,0x18,0xac,0xe7,0x00 -,0x07,0x1f,0x58,0xcf,0x0d,0x18,0x00,0xe9,0xf4,0xff,0x50,0x1e,0x06,0x33,0xc0,0x8e -,0xd8,0x83,0x3e,0xa1,0x36,0x00,0x75,0xb7,0x26,0x8b,0x36,0x06,0x00,0x2e,0xff,0x94 -,0xdc,0x23,0x07,0x1f,0x58,0xcf,0xe8,0x8a,0x00,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00 -,0xe8,0x49,0x00,0xc3,0x53,0xf7,0x06,0xef,0x34,0x20,0x00,0x75,0x2d,0xe5,0x8c,0x25 -,0x00,0x70,0x8b,0xd8,0xe5,0x8c,0x25,0x00,0x70,0x3b,0xc3,0x74,0x05,0x8b,0xd8,0xe9 -,0xf2,0xff,0x3d,0x00,0x30,0x75,0x10,0xe5,0x02,0x25,0xef,0xff,0xe7,0x02,0xc7,0x06 -,0xe0,0x3a,0xff,0xff,0xe9,0x03,0x00,0xe8,0x12,0x00,0x5b,0xc3,0xa3,0x23,0x96,0x23 -,0xa4,0x23,0xa4,0x23,0x96,0x23,0xa4,0x23,0x96,0x23,0x96,0x23,0x26,0xa0,0x29,0x00 -,0xa2,0x1b,0x37,0x26,0xc7,0x06,0x0c,0x00,0xff,0x7f,0x26,0xa1,0x0e,0x00,0xe7,0x9c -,0x26,0xa1,0x08,0x00,0xe7,0x9a,0xe5,0x00,0x25,0xff,0x53,0x26,0x8b,0x36,0x06,0x00 -,0x83,0xe6,0x0e,0x2e,0x0b,0x84,0xad,0x25,0xe7,0x00,0xc3,0x06,0x1e,0x68,0x8b,0x25 -,0x6a,0x00,0x1f,0x83,0x0e,0xef,0x34,0x20,0x83,0x0e,0x9b,0x36,0x08,0xe5,0x00,0x25 -,0xef,0xff,0x0d,0x08,0x00,0xe7,0x00,0xe5,0x00,0xa9,0x10,0x00,0x75,0x01,0xc3,0xe5 -,0x00,0xa9,0x10,0x00,0x75,0xf9,0xc3,0x50,0x53,0x51,0x56,0x06,0x1e,0x33,0xc0,0x8e -,0xd8,0xb8,0x05,0x00,0xe7,0x84,0xe5,0x08,0x0d,0x00,0x04,0x25,0xff,0x04,0xe7,0x08 -,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00,0xe5,0x02,0x0d,0x11,0x00,0xe7,0x02,0x1f,0x07 -,0x5e,0x59,0x5b,0x58,0xc3,0x50,0x1e,0x33,0xc0,0x8e,0xd8,0xc7,0x06,0xef,0x34,0x00 -,0x00,0x83,0x26,0x9b,0x36,0xf7,0xe5,0x00,0x0d,0x18,0x00,0xe7,0x00,0xe5,0x02,0x0d -,0x11,0x00,0xe7,0x02,0x1f,0x58,0xcf,0x60,0x06,0x1e,0x68,0x87,0x25,0x6a,0x00,0x1f -,0xe8,0x16,0xf5,0xc3,0x06,0x1e,0x68,0x8b,0x25,0x6a,0x00,0x1f,0x8e,0xc0,0x26,0x83 -,0x3e,0x0a,0x00,0x00,0x74,0x03,0xe8,0x43,0x00,0x26,0xc7,0x06,0x0a,0x00,0xff,0xff -,0x26,0x8b,0x16,0x06,0x00,0x8e,0x1e,0x8e,0x01,0x8c,0xd8,0x8b,0xca,0x83,0x3e,0x00 -,0x00,0xff,0x8e,0x1e,0x00,0x00,0x74,0x0a,0x2b,0x16,0x08,0x00,0x73,0xeb,0x29,0x0e -,0x08,0x00,0x26,0x89,0x0e,0x08,0x00,0x26,0x8c,0x1e,0x00,0x00,0x8e,0xd8,0x8c,0x06 -,0x00,0x00,0xc3,0x60,0x06,0x1e,0x68,0x87,0x25,0x6a,0x00,0x1f,0x8e,0xc0,0x8b,0xc8 -,0x8e,0x1e,0x8e,0x01,0x26,0xc7,0x06,0x0a,0x00,0x00,0x00,0x8c,0xd8,0x83,0x3e,0x00 -,0x00,0xff,0x74,0x25,0x3b,0x0e,0x00,0x00,0x8e,0x1e,0x00,0x00,0x75,0xed,0x8e,0xd8 -,0x26,0xa1,0x00,0x00,0xa3,0x00,0x00,0x3d,0xff,0xff,0x74,0x56,0x8e,0xd8,0x26,0xa1 -,0x08,0x00,0x01,0x06,0x08,0x00,0xe9,0x49,0x00,0x26,0x8e,0x1e,0x02,0x00,0xbe,0x18 -,0x00,0x83,0x3c,0xff,0x74,0x3c,0x39,0x0c,0x74,0x19,0x8e,0x1c,0xbe,0x00,0x00,0x83 -,0x3e,0x00,0x00,0xff,0x74,0x2c,0x39,0x0e,0x00,0x00,0x74,0x07,0x8e,0x1e,0x00,0x00 -,0xe9,0xec,0xff,0x26,0xa1,0x00,0x00,0x89,0x04,0x33,0xc9,0x8e,0xd9,0x3d,0xff,0xff -,0x75,0x10,0x83,0xfe,0x18,0x75,0x0b,0x26,0x8e,0x1e,0x02,0x00,0x81,0x26,0x08,0x00 -,0x7f,0xff,0x33,0xc0,0x8e,0xd8,0xc3,0x1f,0x07,0x61,0xcf,0x1f,0x07,0xcf,0x60,0x06 -,0x1e,0x68,0x87,0x25,0x6a,0x00,0x1f,0xe5,0x06,0x25,0x1e,0x00,0x3d,0x1e,0x00,0x75 -,0xf6,0xb9,0x08,0x00,0xe5,0x58,0xe7,0x5a,0x23,0xc0,0xe0,0xf8,0xc3,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0xac,0x00,0x00,0x00,0xa8,0x00,0x8c,0x02,0x04,0x00 -,0x00,0x08,0x10,0x20,0x00,0xff,0x0e,0x0c,0x0c,0x0a,0x0a,0x0a,0x0a,0x08,0x08,0x08 -,0x08,0x08,0x08,0x08,0x08,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06 -,0x06,0x06,0x06,0x06,0x06,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04 -,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04 -,0x04,0x04,0x04,0x04,0x04,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02 -,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02 -,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02 -,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02 -,0x02,0x02,0x02,0x02,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x14,0x00,0x10,0x00,0x0c,0x00,0xff,0x7f,0xff -,0xbf,0xff,0xdf,0xff,0xef,0xff,0xf7,0xff,0xfb,0xff,0xfd,0xff,0xfe,0x7f,0xff,0xbf -,0xff,0xdf,0xff,0xef,0xff,0xf7,0xff,0xfb,0xff,0xfd,0xff,0xfe,0xff,0x00,0x00,0x00 -,0x80,0x3e,0xe2,0x34,0x01,0x76,0x03,0xe9,0xa5,0x00,0xb8,0x00,0x00,0xe7,0x4e,0xb9 -,0x28,0x00,0xe2,0xfe,0xc6,0x06,0x45,0x37,0x02,0xbf,0x3f,0x28,0x2e,0x8b,0x45,0x08 -,0xe7,0x4e,0xb9,0x28,0x00,0xe2,0xfe,0x2e,0x8b,0x1d,0xc7,0x06,0xb3,0x36,0x40,0x11 -,0xc7,0x06,0xb1,0x36,0x27,0x00,0xc7,0x06,0x46,0x37,0x02,0x00,0xc7,0x06,0x48,0x37 -,0x64,0x00,0xf7,0x06,0xb5,0x36,0x02,0x00,0x75,0x1c,0x2e,0x0b,0x5d,0x02,0x81,0x26 -,0xb3,0x36,0xff,0xfe,0xc7,0x06,0xb1,0x36,0x9c,0x00,0xc7,0x06,0x46,0x37,0x08,0x00 -,0xc7,0x06,0x48,0x37,0x90,0x01,0x89,0x1e,0xb7,0x36,0x89,0x1e,0xfe,0x33,0xbe,0x20 -,0x00,0x8b,0xc3,0xe7,0x4e,0xb9,0x28,0x00,0xe2,0xfe,0x2e,0x8b,0x45,0x04,0xe7,0x4e -,0xb9,0x28,0x00,0xe2,0xfe,0xe5,0x4e,0x8b,0xcb,0x2e,0x23,0x45,0x06,0x2e,0x23,0x4d -,0x06,0x3a,0xc1,0x74,0x36,0x4e,0x75,0xd9,0x80,0x3e,0x45,0x37,0x00,0x74,0x0b,0xc6 -,0x06,0x45,0x37,0x00,0xbf,0x2f,0x28,0xe9,0x72,0xff,0xc6,0x06,0x45,0x37,0x01,0xf7 -,0x06,0xb5,0x36,0x02,0x00,0x74,0x14,0xe5,0xce,0x25,0xfd,0xff,0xe7,0xce,0xe8,0x43 -,0x00,0xe5,0xce,0x0d,0x02,0x00,0xe7,0xce,0xe8,0x39,0x00,0x80,0x3e,0xe2,0x34,0x01 -,0x76,0x01,0xc3,0xb8,0xea,0x05,0xe7,0x8c,0xfa,0xe8,0x12,0xf4,0xfb,0x8d,0x06,0xd0 -,0x39,0x8b,0xd8,0xc1,0xe8,0x04,0xa3,0x38,0x34,0x8e,0xc0,0xa1,0x30,0x34,0x26,0xa3 -,0x02,0x00,0x26,0xc7,0x06,0x00,0x00,0xff,0xff,0x83,0xc3,0x18,0xd1,0xeb,0x26,0x89 -,0x1e,0x08,0x00,0xc3,0xe5,0x02,0x0d,0x00,0x40,0xe7,0x02,0xe5,0x00,0x0d,0x04,0x00 -,0xe7,0x00,0xb8,0x00,0x00,0xe7,0x0a,0xe5,0x0a,0xa9,0x00,0x80,0x75,0x14,0xe5,0x08 -,0x0d,0x00,0x10,0xe7,0x08,0xe5,0x0a,0x0d,0x00,0x08,0xb9,0x05,0x00,0xe7,0x0a,0xe2 -,0xfc,0xc3,0xe5,0x08,0x0d,0x00,0x10,0xb9,0x05,0x00,0xe7,0x08,0xe2,0xfc,0xc3,0x04 -,0x0c,0x20,0x00,0x01,0x0c,0x7e,0xff,0x00,0x0c,0x02,0x00,0x10,0x00,0x40,0x00,0x0c -,0xc6,0x01,0x00,0x00,0xc0,0xf7,0xff,0x00,0xc0,0x02,0x00,0x10,0x00,0x40,0x00,0x00 -,0x33,0xc0,0x8e,0xd8,0x8d,0x3e,0x72,0x49,0x8d,0x36,0xb0,0x37,0xb9,0x14,0x00,0x8b -,0x1e,0x30,0x34,0x89,0x5c,0x02,0x2e,0x8b,0x45,0x02,0x89,0x44,0x06,0x2e,0x8b,0x05 -,0x89,0x44,0x04,0x83,0xc7,0x04,0x83,0xc6,0x10,0xe2,0xe8,0xc6,0x06,0x9e,0x36,0x0e -,0xe8,0xfd,0x26,0x68,0x83,0x28,0xa1,0xaa,0x02,0xcd,0x35,0x83,0x3e,0xa1,0x36,0x00 -,0x74,0x03,0xe9,0x3b,0x27,0x33,0xff,0x8e,0x06,0xa6,0x02,0x8b,0x36,0xa4,0x02,0x2e -,0xff,0xa4,0x2e,0x30,0x83,0x0e,0x99,0x36,0x04,0xc7,0x06,0x37,0x37,0x01,0x00,0xc6 -,0x06,0xca,0x34,0x01,0xe9,0x7d,0x19,0x80,0x3e,0xa0,0x36,0x08,0x74,0xe6,0x80,0x26 -,0x9e,0x36,0xff,0x75,0x1a,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x12,0xf7,0x06,0x9b -,0x36,0x03,0x00,0x75,0x0a,0x83,0x0e,0x66,0x37,0x10,0xc6,0x06,0xa0,0x36,0x08,0xe9 -,0xfb,0x01,0x80,0x3e,0x9e,0x36,0x02,0x75,0xce,0xc6,0x06,0xa0,0x36,0x06,0xe9,0xec -,0x01,0xc3,0xe9,0xe8,0x01,0x26,0xc7,0x06,0x0a,0x00,0x00,0x00,0x26,0xff,0x26,0x04 -,0x00,0xa1,0xd1,0x36,0x26,0x39,0x06,0x1a,0x00,0x75,0x22,0xa1,0xd3,0x36,0x26,0x39 -,0x06,0x1c,0x00,0x75,0x18,0xa1,0xd5,0x36,0x26,0x39,0x06,0x1e,0x00,0x75,0x0e,0x26 -,0xf7,0x06,0x0c,0x00,0x40,0x00,0x74,0x05,0x83,0x0e,0x66,0x37,0x40,0x81,0x0e,0xaf -,0x36,0x00,0x10,0xa1,0xaf,0x36,0xe7,0x06,0x80,0x3e,0x9d,0x36,0x02,0x75,0x06,0xcd -,0x34,0xe9,0xa2,0x1a,0xc3,0xf7,0x06,0x9b,0x36,0x10,0x00,0x75,0x54,0x26,0xf6,0x06 -,0x0a,0x00,0xff,0x75,0x4c,0x26,0xa0,0x19,0x00,0x24,0xc0,0x3c,0x40,0x75,0x11,0x80 -,0x3e,0x95,0x36,0x00,0x74,0x3b,0x26,0xc7,0x06,0x04,0x00,0xff,0xff,0xe9,0x31,0x00 -,0xe8,0xf1,0x04,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74,0x2f,0x8b,0xd8,0xb8,0x7d,0x03 -,0xcd,0x3a,0x8b,0xc3,0xc6,0x06,0xa0,0x36,0x06,0xf7,0x06,0x9b,0x36,0x02,0x00,0x75 -,0x05,0xc6,0x06,0xa0,0x36,0x04,0x81,0x0e,0x9b,0x36,0x80,0x00,0x83,0x26,0x9b,0x36 -,0xfc,0xe9,0x23,0x01,0xe8,0x87,0x1d,0xe9,0x33,0x01,0x50,0x26,0xa1,0x0c,0x00,0x25 -,0x07,0x00,0x3d,0x07,0x00,0x75,0x03,0xe9,0x84,0x00,0x3d,0x05,0x00,0x75,0x03,0xe9 -,0x7c,0x00,0x83,0x3e,0xe8,0x3a,0x04,0x74,0x75,0x83,0x3e,0xe8,0x3a,0x02,0x74,0x6e -,0xf7,0x06,0xe6,0x34,0x18,0x80,0x75,0x03,0xe9,0x6a,0x00,0xf7,0x06,0xe6,0x34,0x00 -,0x80,0x74,0x35,0x26,0x80,0x3e,0x29,0x00,0x02,0x75,0x2d,0x51,0x56,0x57,0x8d,0x36 -,0x3e,0x34,0x8d,0x3e,0x20,0x00,0xb9,0x06,0x00,0xf3,0xa6,0x5f,0x5e,0x59,0x74,0x45 -,0x26,0xa1,0x20,0x00,0xa3,0x3e,0x34,0x26,0xa1,0x22,0x00,0xa3,0x40,0x34,0x26,0xa1 -,0x24,0x00,0xa3,0x42,0x34,0xe9,0x26,0x00,0xf7,0x06,0xe6,0x34,0x08,0x00,0x74,0x0b -,0x26,0x80,0x3e,0x19,0x00,0x00,0x74,0x03,0xe9,0x13,0x00,0xf7,0x06,0xe6,0x34,0x10 -,0x00,0x74,0x12,0x26,0xa0,0x28,0x00,0xc0,0xe8,0x04,0x22,0xc0,0x74,0x07,0x26,0xc7 -,0x06,0x04,0x00,0xff,0xff,0x58,0x23,0xc0,0x74,0x03,0xe9,0x57,0xff,0x81,0x26,0x9b -,0x36,0xff,0xfe,0x83,0xfe,0x06,0x7f,0x24,0x26,0xa1,0x20,0x00,0x3b,0x06,0xd1,0x36 -,0x75,0x1a,0x26,0xa1,0x22,0x00,0x3b,0x06,0xd3,0x36,0x75,0x10,0x26,0xa1,0x24,0x00 -,0x3b,0x06,0xd5,0x36,0x75,0x06,0x81,0x0e,0x9b,0x36,0x00,0x01,0x26,0xa1,0x20,0x00 -,0x25,0x7f,0xff,0xa3,0xb8,0x34,0x26,0xa1,0x22,0x00,0xa3,0xba,0x34,0x26,0xa1,0x24 -,0x00,0xa3,0xbc,0x34,0x8b,0xc6,0x86,0xc4,0xa3,0xc0,0x34,0xd1,0xe6,0x80,0xfc,0x09 -,0x74,0x03,0xe8,0xaa,0x1c,0x8b,0xc6,0x2e,0xff,0xa4,0x30,0x49,0x26,0xa1,0x0c,0x00 -,0x3d,0xff,0x7f,0x74,0x0f,0x26,0xff,0x26,0x04,0x00,0x8e,0x06,0x38,0x34,0xe8,0x36 -,0x06,0xcd,0x50,0xc3,0xe9,0x16,0x00,0xcd,0x34,0xe9,0x11,0x00,0xcd,0x34,0x89,0x36 -,0x3d,0x37,0xa1,0x9d,0x36,0xa3,0x3f,0x37,0xc6,0x06,0xa0,0x36,0x0c,0xe8,0x8e,0x00 -,0xa1,0x9f,0x36,0x22,0xe4,0x75,0x32,0xf7,0x06,0x4c,0x37,0x01,0x00,0x75,0x2a,0xf6 -,0x06,0x9d,0x36,0x80,0x74,0x07,0x88,0x26,0x9e,0x36,0xe9,0x31,0x00,0x3a,0x06,0x9d -,0x36,0xa3,0x9d,0x36,0x74,0x28,0x8b,0xf0,0x2e,0xff,0xa4,0x0d,0x2b,0x44,0x29,0xee -,0x42,0x19,0x44,0xcd,0x44,0x2f,0x45,0x5a,0x45,0x3a,0x26,0x9e,0x36,0x75,0x01,0xc3 -,0x32,0xc0,0x86,0xc4,0x8b,0xf0,0xa2,0x9e,0x36,0x2e,0xff,0xa4,0x20,0x49,0x8b,0x2e -,0x99,0x36,0x23,0xed,0x75,0x01,0xc3,0xbf,0x01,0x00,0xbe,0x00,0x00,0x85,0xfd,0x75 -,0x1a,0x46,0xd1,0xe7,0xe9,0xf6,0xff,0x2a,0x00,0x29,0x00,0x28,0x00,0x27,0x00,0x25 -,0x00,0x05,0x00,0x07,0x00,0x26,0x00,0x06,0x00,0x20,0x00,0xf7,0xd7,0x21,0x3e,0x99 -,0x36,0xd1,0xe6,0x2e,0x8b,0xb4,0x47,0x2b,0xe9,0x4f,0xff,0xe9,0x56,0xff,0x80,0x26 -,0x9e,0x36,0xff,0x75,0x17,0xf7,0x06,0x4c,0x37,0x01,0x00,0x75,0x0f,0xf6,0x06,0x9d -,0x36,0x80,0x74,0x08,0xf7,0x06,0x66,0x37,0xff,0xff,0x75,0x07,0xc7,0x06,0x66,0x37 -,0x00,0x00,0xc3,0xf7,0x06,0x41,0x37,0x01,0x00,0x75,0x0b,0xb8,0x7f,0x03,0xcd,0x39 -,0xc7,0x06,0x41,0x37,0x01,0x00,0x33,0xf6,0xb8,0x00,0x40,0x85,0x06,0x66,0x37,0x74 -,0x21,0x80,0xbc,0x54,0x37,0xff,0x74,0x04,0xfe,0x84,0x54,0x37,0x80,0xbc,0x96,0x34 -,0xff,0x74,0x04,0xfe,0x84,0x96,0x34,0x31,0x06,0x66,0x37,0x83,0x3e,0x66,0x37,0x00 -,0x74,0x05,0x46,0xd1,0xe8,0x73,0xd4,0xc3,0xa1,0xf4,0x33,0xa9,0x00,0x88,0x74,0x0b -,0xa9,0x00,0x10,0x75,0x09,0x8b,0x1e,0x43,0x37,0xff,0xe3,0xe9,0xd7,0x00,0xc7,0x06 -,0x35,0x37,0x05,0x00,0xc7,0x06,0x43,0x37,0x1e,0x2c,0xf7,0x06,0xf4,0x33,0x00,0x08 -,0x74,0x06,0xc7,0x06,0x43,0x37,0x10,0x2c,0xb8,0x80,0x03,0xcd,0x39,0xe9,0xcd,0xfe -,0xa9,0x00,0x08,0x74,0xd9,0xff,0x0e,0x35,0x37,0x75,0xed,0xe9,0x66,0x00,0xa9,0x00 -,0x08,0x75,0xcb,0xff,0x0e,0x35,0x37,0x75,0xdf,0x81,0x0e,0xc2,0x34,0xc0,0x00,0xf6 -,0x06,0x9d,0x36,0x80,0x74,0x48,0x81,0x0e,0x9b,0x36,0x00,0x80,0xf7,0x06,0x9b,0x36 -,0x01,0x00,0x74,0x1e,0xb8,0x7d,0x03,0xcd,0x3a,0x81,0x0e,0x9b,0x36,0x80,0x00,0x83 -,0x26,0x9b,0x36,0xfe,0xc7,0x06,0x0f,0x37,0x02,0x00,0xc6,0x06,0xa0,0x36,0x04,0xe9 -,0x7b,0xfe,0x80,0x3e,0xa0,0x36,0x04,0x75,0x07,0x83,0x3e,0x0f,0x37,0x01,0x75,0x05 -,0xc6,0x06,0xa0,0x36,0x06,0xc7,0x06,0x0f,0x37,0x02,0x00,0xe9,0x5f,0xfe,0xbe,0x02 -,0x00,0xe9,0x4a,0xfe,0x80,0x26,0x9e,0x36,0xff,0x75,0x3a,0xf6,0x06,0x9d,0x36,0x80 -,0x74,0x2d,0xf7,0x06,0x9b,0x36,0x00,0x20,0x75,0x2b,0xc6,0x06,0xa0,0x36,0x06,0xff -,0x06,0x94,0x34,0x83,0x0e,0x66,0x37,0x20,0x8e,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a -,0x00,0x00,0x01,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x01,0xe9,0x06,0x00,0xbe -,0x04,0x00,0xe9,0x09,0xfe,0x81,0x0e,0xaf,0x36,0x00,0x08,0xa1,0xaf,0x36,0xe7,0x06 -,0xe5,0x0a,0xa9,0x00,0x80,0x74,0x0e,0x81,0x26,0xaf,0x36,0xff,0xf7,0xa1,0xaf,0x36 -,0xe7,0x06,0xe9,0x09,0xff,0xe9,0xf5,0xfd,0xc7,0x06,0x41,0x37,0x00,0x00,0x83,0x0e -,0x99,0x36,0x02,0xe9,0xe7,0xfd,0x80,0x26,0x9e,0x36,0xff,0x75,0x1d,0xf7,0x06,0x9b -,0x36,0x00,0x40,0x75,0x05,0x83,0x0e,0x99,0x36,0x08,0x83,0x0e,0x99,0x36,0x20,0x81 -,0x26,0x9b,0x36,0xff,0xbf,0xb8,0x85,0x03,0xcd,0x39,0xe9,0xc0,0xfd,0x80,0x3e,0x9e -,0x36,0x06,0x74,0x07,0x80,0x3e,0x9e,0x36,0x0a,0x75,0x34,0xf6,0x06,0x9d,0x36,0x80 -,0x75,0x06,0xbe,0x07,0x00,0xe9,0x96,0xfd,0xc6,0x06,0xa0,0x36,0x04,0x83,0x3e,0x0f -,0x37,0x02,0x74,0x1b,0xc7,0x06,0x0f,0x37,0x04,0x00,0x80,0x3e,0x9e,0x36,0x06,0x75 -,0x0e,0xf7,0x06,0x9b,0x36,0x40,0x00,0x75,0x06,0xc7,0x06,0x0f,0x37,0x03,0x00,0xe9 -,0x7b,0xfd,0x80,0x3e,0x9d,0x36,0x04,0x75,0x12,0x81,0x0e,0xc2,0x34,0x00,0x40,0xff -,0x06,0x92,0x34,0xc6,0x06,0xa0,0x36,0x06,0xe9,0x62,0xfd,0xbe,0x05,0x00,0xe9,0x4d -,0xfd,0xf6,0x06,0x9d,0x36,0x80,0x75,0x19,0x83,0x0e,0xc2,0x34,0x04,0xbe,0x06,0x00 -,0xe9,0x3b,0xfd,0x80,0x26,0x9e,0x36,0xff,0x75,0xc5,0xff,0x06,0x31,0x37,0xe9,0x00 -,0x00,0x83,0x26,0xc2,0x34,0xbf,0xc6,0x06,0xa0,0x36,0x06,0xe9,0x2f,0xfd,0xe5,0x0a -,0x50,0x25,0xc3,0xbf,0xe7,0x0a,0x58,0x80,0x26,0x9e,0x36,0xff,0x75,0x0d,0xa9,0x00 -,0x40,0x75,0x08,0xc6,0x06,0xa0,0x36,0x06,0xe9,0x12,0xfd,0xb8,0x83,0x03,0xcd,0x39 -,0xc3,0xb8,0x7c,0x03,0xcd,0x39,0xf7,0x06,0xf4,0x33,0x00,0x10,0x75,0x09,0xc7,0x06 -,0x33,0x37,0x02,0x00,0xe9,0xf6,0xfc,0xff,0x0e,0x33,0x37,0x74,0x03,0xe9,0xed,0xfc -,0xff,0x06,0x8e,0x34,0xe8,0xf7,0x19,0x83,0x0e,0xc2,0x34,0x08,0xbe,0x03,0x00,0xe9 -,0xcc,0xfc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x04,0x04,0x05 -,0x04,0x04,0x04,0x00,0x03,0x00,0x03,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x04,0x00,0x08,0x08,0x05,0x08,0x08,0x08,0x00,0x03,0x00,0x03,0x03,0x00,0x00 -,0x02,0x04,0x04,0x04,0x04,0x00,0x00,0x08,0x00,0x00,0x0a,0x14,0x00,0x00,0x1a,0x00 -,0x1c,0x00,0x1e,0x20,0x00,0x00,0x04,0x41,0x06,0x0b,0x08,0xc2,0xff,0xe7,0x04,0x03 -,0x06,0x04,0x04,0x05,0x04,0x06,0x04,0x87,0x04,0x03,0x06,0x04,0x04,0x85,0x4e,0xa2 -,0x04,0xcf,0x04,0xcd,0xc7,0x06,0xa2,0x37,0x00,0x00,0xc7,0x06,0xa6,0x37,0x00,0x00 -,0x26,0xa1,0x20,0x00,0x25,0x7f,0xff,0xa3,0xf5,0x36,0x26,0xa1,0x22,0x00,0xa3,0xf7 -,0x36,0x26,0xa1,0x24,0x00,0xa3,0xf9,0x36,0xe8,0x3b,0x19,0x8b,0xf0,0x26,0x8b,0x0e -,0x0e,0x00,0x2b,0xc8,0x83,0xe9,0x0e,0xb8,0x01,0x80,0x83,0xf9,0x04,0x7c,0x51,0x26 -,0x8a,0x54,0x28,0x88,0x16,0x1c,0x37,0x40,0x26,0x8b,0x6c,0x26,0x86,0xcd,0x3b,0xcd -,0x86,0xcd,0x89,0x0e,0xa4,0x37,0x75,0x38,0x40,0x32,0xff,0x26,0x8a,0x5c,0x29,0x80 -,0xfb,0x15,0x77,0x25,0x80,0xfb,0x0a,0x74,0x20,0x80,0xfb,0x01,0x74,0x1b,0xb8,0x04 -,0x80,0x2e,0x3a,0x97,0x02,0x2e,0x74,0x07,0x2e,0x3a,0x97,0x18,0x2e,0x75,0x11,0x33 -,0xc0,0x80,0xfb,0x09,0x75,0x4f,0x8b,0xf3,0xc3,0x26,0xc7,0x06,0x04,0x00,0xff,0xff -,0x50,0x52,0xa1,0xa4,0x37,0x86,0xc4,0x26,0x3b,0x06,0x26,0x00,0x7c,0x32,0x26,0x81 -,0x3e,0x26,0x00,0x00,0x04,0x7e,0x29,0x8d,0x74,0x2a,0x26,0x8b,0x14,0x22,0xd2,0x74 -,0x1f,0x80,0xe6,0xbf,0x80,0xfe,0x09,0x75,0x17,0xc7,0x06,0xa2,0x37,0x01,0x00,0x80 -,0xfa,0x04,0x75,0x0c,0x26,0x8b,0x44,0x02,0xa3,0x03,0x37,0x86,0xc4,0xa3,0xd0,0x34 -,0x5a,0x58,0xe9,0xb1,0xff,0xbd,0x72,0x37,0x2e,0x8a,0x87,0x2e,0x2e,0x22,0xc0,0x74 -,0x16,0x05,0x44,0x2e,0x8b,0xf8,0x2e,0x8b,0x05,0x3e,0x89,0x46,0x00,0x83,0xc5,0x02 -,0x83,0xc7,0x02,0x22,0xe4,0x7d,0xef,0x8d,0x74,0x2a,0x83,0xe9,0x04,0x75,0x03,0xe9 -,0xa1,0x00,0x26,0x8b,0x14,0x22,0xd2,0x75,0x03,0xe9,0x7c,0x00,0xc7,0x06,0xa6,0x37 -,0x01,0x00,0xbf,0x72,0x37,0x8b,0x05,0x83,0xc7,0x02,0x80,0xe6,0xbf,0x80,0xe4,0x3f -,0x80,0xfe,0x09,0x75,0x22,0x80,0xfa,0x04,0x75,0x5e,0xc7,0x06,0xa2,0x37,0x01,0x00 -,0x26,0x8b,0x44,0x02,0xa3,0x03,0x37,0x86,0xc4,0xa3,0xd0,0x34,0x86,0xc4,0xc7,0x06 -,0xa6,0x37,0x00,0x00,0xe9,0x47,0x00,0x3b,0xfd,0x7e,0x15,0x26,0x8b,0x04,0xa8,0x40 -,0x74,0x06,0xb8,0x07,0x80,0xe9,0x38,0xff,0x32,0xc0,0x26,0x8b,0x04,0xe9,0x2e,0x00 -,0x3a,0xf4,0x75,0xb1,0xc7,0x45,0xfe,0x00,0x00,0x80,0xfe,0x22,0x75,0x0d,0x3a,0xd0 -,0x77,0x16,0xc7,0x06,0xa6,0x37,0x00,0x00,0xe9,0x13,0x00,0x3a,0xd0,0x75,0x09,0xc7 -,0x06,0xa6,0x37,0x00,0x00,0xe9,0x06,0x00,0xb8,0x05,0x80,0xe9,0x02,0xff,0x32,0xf6 -,0x03,0xf2,0x2b,0xca,0xb8,0x05,0x80,0x23,0xc9,0x76,0x03,0xe9,0x64,0xff,0x74,0x03 -,0xe9,0xed,0xfe,0x33,0xc0,0xbf,0x72,0x37,0x8b,0x15,0x47,0x47,0x3b,0xfd,0x7f,0x1b -,0xf6,0xc6,0x80,0x74,0x16,0xf7,0x06,0xa6,0x37,0x01,0x00,0x74,0x06,0xb8,0x08,0x80 -,0xe9,0xc3,0xfe,0xf6,0xc6,0x40,0x74,0xe0,0xb8,0x07,0x80,0xe9,0xb8,0xfe,0x7d,0x42 -,0xa3,0x45,0x44,0x29,0x44,0x29,0xb7,0x28,0xe2,0x28,0xee,0x2b,0xf2,0x28,0xf5,0x28 -,0x01,0x29,0xac,0x2a,0x44,0x29,0x44,0x29,0x44,0x29,0x44,0x29,0x44,0x29,0x00,0x00 -,0x73,0x36,0x00,0x00,0x03,0x36,0xc5,0x35,0x83,0x35,0x45,0x35,0x07,0x35,0xd2,0x34 -,0x45,0x34,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0xa6,0x38,0x00,0x00,0xe0,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0xf2,0x33,0x00,0x00,0xa6,0x33,0x60,0x33,0xfd,0x32,0xbc,0x32,0x77,0x32,0x3c,0x32 -,0xfb,0x31,0x6a,0x31,0x0a,0x31,0xe0,0xe0,0x10,0x10,0x10,0xe0,0xe0,0xe0,0xe0,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0xe0,0x00,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0 -,0xe0,0x33,0xff,0x26,0xf6,0x06,0x1a,0x00,0x80,0x74,0x1b,0x26,0x80,0x26,0x1a,0x00 -,0x7f,0x26,0x8b,0x3e,0x26,0x00,0x83,0xe7,0x1f,0x74,0x0b,0x26,0x80,0x0e,0x20,0x00 -,0x80,0x26,0x01,0x3e,0x0e,0x00,0xc3,0x60,0x2e,0x8b,0x84,0xa6,0x30,0x26,0xa3,0x18 -,0x00,0xd1,0xe6,0x2e,0xff,0x94,0x50,0x30,0x61,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4 -,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x16,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26 -,0xc6,0x06,0x19,0x00,0x00,0xe8,0xbf,0x05,0xe8,0x98,0x05,0x26,0xc7,0x06,0x26,0x00 -,0x00,0x08,0x26,0xc6,0x06,0x28,0x00,0x40,0x26,0xc6,0x06,0x29,0x00,0x2a,0xbf,0x2a -,0x00,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x2a,0xa1,0x93,0x37,0x33,0xdb,0xa9 -,0x40,0x00,0x75,0x02,0xb3,0x01,0xa9,0x00,0x10,0x74,0x02,0xb7,0x88,0xa9,0x00,0x08 -,0x74,0x03,0x80,0xcf,0x44,0x26,0x89,0x5d,0x02,0xc3,0x83,0x0e,0xc2,0x34,0x20,0x26 -,0xc7,0x06,0x04,0x00,0x6b,0x2b,0x26,0xc7,0x06,0x0e,0x00,0x30,0x00,0x26,0xc7,0x06 -,0x06,0x00,0x0a,0x00,0x26,0xc7,0x06,0x0a,0x00,0x04,0x00,0x26,0xc6,0x06,0x19,0x00 -,0x00,0xe8,0x69,0x05,0xe8,0x2c,0x05,0x26,0xc7,0x06,0x26,0x00,0x00,0x22,0x26,0xc6 -,0x06,0x28,0x00,0x60,0x26,0xc6,0x06,0x29,0x00,0x29,0xbf,0x2a,0x00,0x26,0xc6,0x05 -,0x08,0x26,0xc6,0x45,0x01,0x2d,0x8d,0x7d,0x02,0xbe,0x54,0x37,0xb9,0x03,0x00,0xf3 -,0xa5,0x26,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x2e,0x8d,0x7d,0x02,0xbe,0x5a,0x37 -,0xb9,0x03,0x00,0xf3,0xa5,0xe8,0xd4,0x05,0xe8,0x64,0x05,0xb9,0x06,0x00,0xbe,0x54 -,0x37,0x8d,0x2e,0x2c,0x00,0x26,0x8b,0x46,0x00,0x29,0x04,0x83,0xc6,0x02,0x83,0xc5 -,0x02,0x83,0xf9,0x04,0x75,0x02,0x45,0x45,0xe2,0xeb,0xc3,0x26,0xc7,0x06,0x04,0x00 -,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x24,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00 -,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0xe4,0x04,0xe8,0xa7,0x04,0x26,0xc7,0x06,0x26 -,0x00,0x00,0x16,0x26,0xc6,0x06,0x28,0x00,0x60,0x26,0xc6,0x06,0x29,0x00,0x28,0xbf -,0x2a,0x00,0xe8,0x5b,0x06,0xe8,0x74,0x05,0xe8,0x04,0x05,0xc3,0x26,0xc7,0x06,0x04 -,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x1a,0x00,0x26,0xc7,0x06,0x06,0x00,0x06 -,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0xa3,0x04,0xe8,0x66,0x04,0x26,0xc7,0x06 -,0x26,0x00,0x00,0x0c,0x26,0xc6,0x06,0x28,0x00,0x60,0x26,0xc6,0x06,0x29,0x00,0x27 -,0xbf,0x2a,0x00,0xe8,0x21,0x05,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7 -,0x06,0x0e,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x0a,0x00,0x26,0xc7,0x06,0x0a -,0x00,0x04,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x4b,0x04,0xe8,0x24,0x04,0x26 -,0xc7,0x06,0x26,0x00,0x00,0x12,0x26,0xc6,0x06,0x28,0x00,0x40,0x26,0xc6,0x06,0x29 -,0x00,0x26,0xbf,0x2a,0x00,0xe8,0xf4,0x04,0xe8,0x84,0x04,0xc3,0x26,0xc7,0x06,0x04 -,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x34,0x00,0x26,0xc7,0x06,0x06,0x00,0x06 -,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x0d,0x04,0xe8,0xe6,0x03,0x26,0xc7,0x06 -,0x26,0x00,0x00,0x26,0x26,0xc6,0x06,0x28,0x00,0x40,0x26,0xc6,0x06,0x29,0x00,0x25 -,0xbf,0x2a,0x00,0xe8,0xb6,0x04,0xe8,0x46,0x04,0xe8,0xfa,0x04,0xc3,0x26,0xc7,0x06 -,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x38,0x00,0xa1,0xa2,0x37,0x50,0x0b -,0xc0,0x75,0x07,0x26,0xc7,0x06,0x0e,0x00,0x34,0x00,0x26,0xc7,0x06,0x06,0x00,0x06 -,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x99,0x03,0xe8,0xa4,0xfd,0x26,0xc7,0x45 -,0x26,0x00,0x2a,0x58,0x0b,0xc0,0x75,0x06,0x26,0xc7,0x45,0x26,0x00,0x26,0xa1,0x1c -,0x37,0xc1,0xe0,0x04,0x26,0x88,0x45,0x28,0x26,0xc6,0x45,0x29,0x24,0x83,0xc7,0x2a -,0xe8,0x29,0x04,0xe8,0xa0,0x04,0xe8,0x22,0x05,0xe8,0xf8,0x03,0xe8,0x09,0x04,0xc3 -,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x32,0x00,0x26,0xc7 -,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x45,0x03,0xe8,0x50 -,0xfd,0x26,0xc7,0x45,0x26,0x00,0x24,0xa1,0x1c,0x37,0xc1,0xe0,0x04,0x26,0x88,0x45 -,0x28,0x26,0xc6,0x45,0x29,0x23,0x83,0xc7,0x2a,0xe8,0xe0,0x03,0xe8,0x6c,0x04,0xe8 -,0x8a,0x04,0xe8,0x9c,0x04,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06 -,0x0e,0x00,0x34,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00 -,0x00,0xe8,0xff,0x02,0xe8,0x0a,0xfd,0x26,0xc7,0x45,0x26,0x00,0x26,0xa1,0x1c,0x37 -,0xc1,0xe0,0x04,0x26,0x88,0x45,0x28,0x26,0xc6,0x45,0x29,0x22,0x83,0xc7,0x2a,0xe8 -,0x9a,0x03,0xe8,0xc7,0x03,0xe8,0x57,0x03,0xe8,0xf8,0x03,0xe8,0x78,0x04,0xe8,0x8a -,0x04,0xc3,0x26,0xc7,0x06,0x04,0x00,0x74,0x45,0x26,0xc7,0x06,0x0e,0x00,0x3e,0x00 -,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc7,0x06,0x0a,0x00,0x04,0x00,0x26,0xc6 -,0x06,0x19,0x00,0x00,0xe8,0xfc,0x02,0xe8,0xa9,0x02,0x83,0x3e,0x8d,0x37,0x03,0x75 -,0x01,0x90,0x26,0xc7,0x06,0x26,0x00,0x00,0x30,0x26,0xc6,0x06,0x28,0x00,0x50,0x26 -,0xc6,0x06,0x29,0x00,0x20,0xbf,0x2a,0x00,0xe8,0xd0,0x03,0xe8,0x01,0x03,0xe8,0xb5 -,0x03,0xe8,0x9f,0x03,0xc3,0x26,0xc7,0x06,0x04,0x00,0x61,0x43,0xb9,0xf0,0x00,0x83 -,0xe9,0x02,0x26,0x89,0x0e,0x0e,0x00,0x26,0xc7,0x06,0x06,0x00,0x02,0x00,0x26,0xc6 -,0x06,0x19,0x00,0x00,0x26,0xc7,0x06,0x1a,0x00,0x00,0x00,0x26,0xc7,0x06,0x1c,0x00 -,0x00,0x00,0x26,0xc7,0x06,0x1e,0x00,0x00,0x00,0xe8,0x47,0x02,0x83,0xe9,0x0e,0x86 -,0xcd,0x26,0x89,0x0e,0x26,0x00,0x86,0xcd,0x26,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6 -,0x06,0x29,0x00,0x08,0xbf,0x2a,0x00,0x83,0xe9,0x04,0x26,0x89,0x0d,0x26,0xc6,0x45 -,0x01,0x26,0x8d,0x7d,0x02,0x83,0xe9,0x02,0xbb,0x01,0x00,0xb8,0x30,0x30,0x4b,0x75 -,0x17,0xbb,0x0a,0x00,0x8a,0xc4,0x26,0x88,0x05,0xb0,0x31,0x80,0xc4,0x01,0x80,0xfc -,0x3a,0x75,0x0a,0xb4,0x61,0xe9,0x05,0x00,0x26,0x88,0x05,0x04,0x01,0x47,0x49,0x75 -,0xdd,0xc3,0x26,0xc7,0x06,0x04,0x00,0x04,0x45,0x26,0xc7,0x06,0x0e,0x00,0x12,0x00 -,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x01,0xe8,0xe5,0x01 -,0xe8,0xd0,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x04,0x26,0xc6,0x06,0x28,0x00,0x00 -,0x26,0xc6,0x06,0x29,0x00,0x07,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7 -,0x06,0x0e,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19 -,0x00,0x06,0xe8,0x04,0x02,0xe8,0x9b,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26 -,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x06,0xbf,0x2a,0x00,0xe8,0x6b -,0x02,0xe8,0xfb,0x01,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e -,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x05 -,0xe8,0xc6,0x01,0xe8,0x5d,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26,0xc6,0x06 -,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x05,0xbf,0x2a,0x00,0xe8,0x2d,0x02,0xe8 -,0xbd,0x01,0xc3,0xff,0x06,0x82,0x34,0x26,0xc7,0x06,0x04,0x00,0x3d,0x41,0x26,0xc7 -,0x06,0x0e,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x0e,0x00,0x26,0xc6,0x06,0x19 -,0x00,0x04,0xe8,0x84,0x01,0xe8,0x1b,0x01,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26 -,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x04,0xbf,0x2a,0x00,0xe8,0xeb -,0x01,0xe8,0x7b,0x01,0xc3,0x26,0xc7,0x06,0x04,0x00,0x67,0x42,0x26,0xc7,0x06,0x0e -,0x00,0x20,0x00,0x26,0xc7,0x06,0x06,0x00,0x08,0x00,0x26,0xc6,0x06,0x19,0x00,0x03 -,0xe8,0x46,0x01,0xe8,0xdd,0x00,0x26,0xc7,0x06,0x26,0x00,0x00,0x12,0x26,0xc6,0x06 -,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x03,0xbf,0x2a,0x00,0xe8,0xad,0x01,0xe8 -,0x3d,0x01,0xc3,0xff,0x06,0x84,0x34,0x26,0xc7,0x06,0x04,0x00,0x67,0x42,0x26,0xc7 -,0x06,0x0e,0x00,0x24,0x00,0x26,0xc7,0x06,0x06,0x00,0x08,0x00,0x26,0xc6,0x06,0x19 -,0x00,0x02,0xe8,0x04,0x01,0xe8,0x9b,0x00,0x26,0xc7,0x06,0x26,0x00,0x00,0x16,0x26 -,0xc6,0x06,0x28,0x00,0x00,0x26,0xc6,0x06,0x29,0x00,0x02,0xbf,0x2a,0x00,0x26,0xc6 -,0x05,0x04,0x26,0xc6,0x45,0x01,0x01,0xa1,0x0f,0x37,0x86,0xe0,0xf6,0x06,0x6f,0x37 -,0x01,0x75,0x0f,0x39,0x06,0xcc,0x34,0x74,0x09,0x8b,0xd8,0xb8,0x89,0x03,0xcd,0x39 -,0x8b,0xc3,0xa3,0xcc,0x34,0x26,0x89,0x45,0x02,0x8d,0x7d,0x04,0xe8,0x3d,0x01,0xe8 -,0xcd,0x00,0xc3,0x26,0xc7,0x06,0x04,0x00,0xc4,0x2a,0x26,0xc7,0x06,0x0e,0x00,0x1c -,0x00,0xa1,0xa2,0x37,0x50,0x0b,0xc0,0x75,0x07,0x26,0xc7,0x06,0x0e,0x00,0x18,0x00 -,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x23,0x00 -,0xe8,0x2e,0xfa,0x26,0xc7,0x45,0x26,0x00,0x0e,0x58,0x0b,0xc0,0x75,0x06,0x26,0xc7 -,0x45,0x26,0x00,0x0a,0x26,0xc6,0x45,0x29,0x00,0x83,0xc7,0x2a,0xe8,0xbd,0x00,0xe8 -,0xff,0x00,0xc3,0x56,0x57,0x51,0xb9,0x03,0x00,0xbe,0xd1,0x36,0xbf,0x20,0x00,0xf3 -,0xa5,0x59,0x5f,0x5e,0xc3,0x56,0x57,0x51,0xb9,0x03,0x00,0xbe,0xd1,0x36,0xbf,0x1a -,0x00,0xf3,0xa5,0x59,0x5f,0x5e,0xc3,0x26,0xc7,0x06,0x1a,0x00,0xc0,0x00,0x26,0xc7 -,0x06,0x1c,0x00,0x00,0x00,0x26,0xc7,0x06,0x1e,0x00,0x00,0x10,0xc3,0x26,0xc7,0x06 -,0x1a,0x00,0xc0,0x00,0x26,0xc7,0x06,0x1c,0x00,0x00,0x00,0x26,0xc7,0x06,0x1e,0x00 -,0x00,0x08,0xc3,0x26,0xc7,0x06,0x1a,0x00,0xc0,0x00,0x26,0xc7,0x06,0x1c,0x00,0x00 -,0x00,0x26,0xc7,0x06,0x1e,0x00,0x00,0x02,0xc3,0x26,0xc7,0x06,0x1a,0x00,0xc0,0x00 -,0x26,0xc7,0x06,0x1c,0x00,0xff,0xff,0x26,0xc7,0x06,0x1e,0x00,0xff,0xff,0xc3,0x26 -,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x02,0x8d,0x7d,0x02,0xbe,0x05,0x37,0xb9,0x03 -,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x06,0xa1,0x0d,0x37 -,0x26,0x89,0x45,0x02,0x8d,0x7d,0x04,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01 -,0x07,0xa1,0x0b,0x37,0x26,0x89,0x45,0x02,0x83,0xc7,0x04,0xc3,0xa1,0xa2,0x37,0x0b -,0xc0,0x74,0x13,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x09,0xa1,0x03,0x37,0x26 -,0x89,0x45,0x02,0x83,0xc7,0x04,0xc3,0x26,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x02 -,0x8d,0x7d,0x02,0xbe,0x05,0x37,0xb9,0x03,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x06 -,0x26,0xc6,0x45,0x01,0x0b,0x8d,0x7d,0x02,0xbe,0xef,0x36,0xb9,0x02,0x00,0xf3,0xa5 -,0xc3,0x26,0xc6,0x05,0x06,0x26,0xc6,0x45,0x01,0x20,0xa1,0x68,0x37,0x26,0x89,0x45 -,0x02,0xa1,0x6a,0x37,0x26,0x88,0x65,0x05,0xc1,0xe0,0x04,0x26,0x88,0x45,0x04,0x83 -,0xc7,0x06,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x21,0x26,0xc7,0x45,0x02 -,0x00,0x00,0x83,0xc7,0x04,0xc3,0x26,0xc6,0x05,0x14,0x26,0xc6,0x45,0x01,0x22,0x8d -,0x7d,0x02,0xbe,0x1f,0x37,0xb9,0x09,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x0c,0x26 -,0xc6,0x45,0x01,0x23,0x8d,0x7d,0x02,0x1e,0x0e,0x1f,0x8d,0x36,0x40,0x54,0xb9,0x03 -,0x00,0xf3,0xa5,0x33,0xc0,0xb9,0x02,0x00,0xf3,0xab,0x1f,0xc3,0x26,0xc6,0x05,0x08 -,0x26,0xc6,0x45,0x01,0x28,0x8d,0x7d,0x02,0xbe,0xd1,0x36,0xb9,0x03,0x00,0xf3,0xa5 -,0xc3,0x26,0xc6,0x05,0x08,0x26,0xc6,0x45,0x01,0x29,0xa1,0xc2,0x34,0x86,0xe0,0x26 -,0x89,0x45,0x02,0xa1,0x9b,0x36,0x26,0x89,0x45,0x04,0x26,0x88,0x45,0x06,0x26,0x88 -,0x45,0x07,0x8d,0x7d,0x08,0xc3,0x26,0xc6,0x05,0x06,0x26,0xc6,0x45,0x01,0x2b,0x8d -,0x7d,0x02,0xbe,0xbb,0x36,0xb9,0x02,0x00,0xf3,0xa5,0xc3,0x26,0xc6,0x05,0x06,0x26 -,0xc6,0x45,0x01,0x2c,0x8d,0x7d,0x02,0xbe,0xe5,0x36,0xb9,0x02,0x00,0xf3,0xa5,0xc3 -,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x30,0xa1,0x37,0x37,0x86,0xe0,0x26,0x89 -,0x45,0x02,0x8d,0x7d,0x04,0xc3,0x26,0xc7,0x06,0x0e,0x00,0x1e,0x00,0x26,0xc7,0x06 -,0x06,0x00,0x02,0x00,0x26,0xc6,0x06,0x19,0x00,0x00,0xe8,0x6c,0xfe,0xe8,0x03,0xfe -,0x26,0xc7,0x06,0x26,0x00,0x00,0x10,0x26,0xc6,0x06,0x28,0x00,0x30,0x26,0xc6,0x06 -,0x29,0x00,0x11,0xbf,0x2a,0x00,0xe8,0x35,0x00,0xe8,0x45,0x00,0xe8,0x55,0x00,0xc3 -,0x26,0xc7,0x06,0x0e,0x00,0x12,0x00,0x26,0xc7,0x06,0x06,0x00,0x02,0x00,0x26,0xc6 -,0x06,0x19,0x00,0x00,0xe8,0x32,0xfe,0xe8,0xc9,0xfd,0x26,0xc7,0x06,0x26,0x00,0x00 -,0x04,0x26,0xc6,0x06,0x28,0x00,0x30,0x26,0xc6,0x06,0x29,0x00,0x13,0xc3,0x26,0xc6 -,0x05,0x04,0x26,0xc6,0x45,0x01,0x0c,0x26,0xc7,0x45,0x02,0x00,0x01,0x83,0xc7,0x04 -,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x0e,0x26,0xc7,0x45,0x02,0x00,0x02 -,0x83,0xc7,0x04,0xc3,0x26,0xc6,0x05,0x04,0x26,0xc6,0x45,0x01,0x21,0x26,0xc7,0x45 -,0x02,0x00,0x00,0x83,0xc7,0x04,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0xb3,0x39,0xc9,0x39,0x83,0x3a,0xb3,0x39,0xb3,0x39,0xb3,0x39,0x1c,0x3a,0x1c,0x3a -,0xa3,0xb6,0x34,0xa1,0xe9,0x36,0xa3,0x11,0x37,0xa3,0xd2,0x34,0xa1,0xeb,0x36,0xa3 -,0x13,0x37,0xa3,0xd4,0x34,0xa1,0xed,0x36,0xa3,0x15,0x37,0xa3,0xd6,0x34,0xa1,0x01 -,0x37,0xa3,0xce,0x34,0xa1,0xf7,0x36,0xa3,0x17,0x37,0xa3,0xdc,0x34,0xa1,0xf9,0x36 -,0xa3,0x19,0x37,0xa3,0xde,0x34,0xf7,0x06,0x9b,0x36,0x02,0x00,0x75,0x0c,0x33,0xc0 -,0xa0,0x9e,0x36,0x8b,0xf0,0x2e,0xff,0xa4,0x50,0x39,0xe9,0x0f,0x01,0xbe,0x07,0x00 -,0xe9,0x19,0xf1,0xf6,0x06,0x9d,0x36,0x80,0x74,0xf3,0xc6,0x06,0xa0,0x36,0x02,0xc6 -,0x06,0x6e,0x37,0x08,0xc6,0x06,0x70,0x37,0x02,0xb8,0x88,0x03,0xcd,0x39,0xf6,0x06 -,0x6f,0x37,0x01,0x75,0x4a,0xa1,0xd1,0x36,0x3a,0x06,0xe9,0x36,0x75,0x41,0x3a,0x26 -,0xea,0x36,0x75,0x3b,0xa1,0xd3,0x36,0x3a,0x06,0xeb,0x36,0x75,0x32,0x3a,0x26,0xec -,0x36,0x75,0x2c,0xa1,0xd5,0x36,0x3a,0x06,0xed,0x36,0x75,0x23,0x3a,0x26,0xee,0x36 -,0x75,0x1d,0xc6,0x06,0x70,0x37,0x02,0xfe,0x0e,0x6e,0x37,0x75,0x0f,0xb8,0x88,0x03 -,0xcd,0x3a,0x83,0x0e,0x9b,0x36,0x12,0xc6,0x06,0xa0,0x36,0x0c,0xe9,0xa8,0xf0,0xa1 -,0x05,0x37,0x26,0x3b,0x06,0x20,0x00,0x75,0x40,0xa1,0x07,0x37,0x26,0x3b,0x06,0x22 -,0x00,0x75,0x36,0xa1,0x09,0x37,0x26,0x3b,0x06,0x24,0x00,0x75,0x2c,0xa0,0x9e,0x36 -,0x3c,0x02,0x75,0x08,0x26,0xf6,0x06,0x18,0x00,0x08,0x75,0x47,0xc6,0x06,0x6e,0x37 -,0x08,0xfe,0x0e,0x70,0x37,0x75,0x1c,0xc6,0x06,0x70,0x37,0x02,0xe5,0x02,0x0d,0x01 -,0x04,0x25,0xef,0xff,0xe7,0x02,0xe9,0x5e,0xf0,0xc6,0x06,0x70,0x37,0x02,0xc6,0x06 -,0x6e,0x37,0x08,0xe5,0x02,0x25,0xff,0xfb,0x0d,0x01,0x00,0x25,0xef,0xff,0xe7,0x02 -,0xe9,0x44,0xf0,0xf7,0x06,0x9b,0x36,0x00,0x01,0x74,0x25,0x26,0xf6,0x06,0x18,0x00 -,0x08,0x75,0xed,0x81,0x26,0x9b,0x36,0x7f,0xff,0xb8,0x89,0x03,0xcd,0x3a,0xb8,0x84 -,0x03,0xcd,0x3a,0xc6,0x06,0xa0,0x36,0x06,0x83,0x26,0xc2,0x34,0xaf,0xe9,0x17,0xf0 -,0xa1,0x01,0x37,0x3a,0x26,0x0f,0x37,0x7f,0xc7,0xe9,0xf7,0xfe,0x83,0x26,0x9b,0x36 -,0xec,0xe8,0x2a,0x0d,0x81,0x0e,0x9b,0x36,0x80,0x00,0xbb,0xff,0x7f,0xcd,0x53,0xc6 -,0x06,0xa0,0x36,0x02,0xe9,0xf0,0xef,0x83,0x0e,0x9b,0x36,0x11,0xc6,0x06,0xa0,0x36 -,0x0c,0xe9,0xf9,0xef,0x44,0x3b,0x2c,0x3b,0xc7,0x2a,0x6b,0x3b,0x44,0x3b,0xc7,0x2a -,0xc7,0x2a,0xc7,0x2a,0xa3,0xb6,0x34,0x81,0x0e,0xc2,0x34,0x00,0x20,0xf7,0x06,0x41 -,0x37,0x01,0x00,0x74,0x1b,0x8c,0xc3,0xc7,0x06,0x41,0x37,0x00,0x00,0xb8,0x7f,0x03 -,0xcd,0x3a,0x33,0xc0,0x8e,0xc0,0xbf,0x54,0x37,0xb9,0x06,0x00,0xf3,0xab,0x8e,0xc3 -,0x33,0xc0,0xa0,0x9e,0x36,0x8b,0xf0,0x2e,0xff,0xa4,0xe4,0x3a,0xf7,0x06,0x9b,0x36 -,0x00,0x01,0x75,0x21,0x83,0x26,0xc2,0x34,0xbf,0xa1,0xa9,0x36,0xe7,0x00,0xa1,0x9b -,0x36,0xe9,0x09,0x00,0xa1,0x9b,0x36,0x81,0x26,0x9b,0x36,0xff,0xdf,0xa9,0x00,0x20 -,0x75,0x06,0xe9,0x6e,0x00,0xe9,0x6f,0xef,0x83,0x0e,0x99,0x36,0x04,0xc7,0x06,0x37 -,0x37,0x01,0x00,0xc6,0x06,0xca,0x34,0x01,0xe9,0x58,0x00,0x83,0x0e,0x9b,0x36,0x40 -,0xe8,0x58,0x00,0xa1,0x05,0x37,0x3b,0x06,0xe9,0x36,0x75,0x37,0xa1,0x07,0x37,0x3b -,0x06,0xeb,0x36,0x75,0x2e,0xa1,0x09,0x37,0x3b,0x06,0xed,0x36,0x75,0x25,0xfe,0x0e -,0x71,0x37,0x75,0x1c,0xb8,0x87,0x03,0xcd,0x3a,0x83,0x0e,0x99,0x36,0x10,0xa1,0x50 -,0x37,0xc7,0x06,0x50,0x37,0x00,0x00,0x09,0x06,0x99,0x36,0xc6,0x06,0xa0,0x36,0x08 -,0xe9,0x14,0xef,0x83,0x0e,0x99,0x36,0x04,0xc7,0x06,0x37,0x37,0x03,0x00,0xc6,0x06 -,0xca,0x34,0x03,0xc6,0x06,0xa0,0x36,0x0a,0xe9,0xfc,0xee,0xa1,0xd1,0x36,0x26,0x3b -,0x06,0x20,0x00,0x75,0x15,0xa1,0xd3,0x36,0x26,0x3b,0x06,0x22,0x00,0x75,0x12,0xa1 -,0xd5,0x36,0x26,0x3b,0x06,0x24,0x00,0x75,0x0f,0xc3,0x8d,0x36,0x20,0x00,0xe9,0x0b -,0x00,0x8d,0x36,0x22,0x00,0xe9,0x04,0x00,0x8d,0x36,0x24,0x00,0x83,0xc4,0x02,0xf7 -,0x06,0xe6,0x34,0x01,0x00,0x74,0x15,0x26,0x3a,0x04,0x77,0x08,0x72,0x0e,0x26,0x3a -,0x64,0x01,0x72,0x08,0xc6,0x06,0xa0,0x36,0x06,0xe9,0xab,0xee,0xe8,0x7c,0x0a,0x8c -,0xc0,0x3d,0xff,0xff,0x74,0x1b,0x26,0xc6,0x06,0x18,0x00,0x10,0x26,0xc7,0x06,0x04 -,0x00,0x49,0x3c,0x26,0xc7,0x06,0x06,0x00,0x0c,0x00,0xcd,0x50,0xb9,0x4e,0x00,0xe2 -,0xfe,0xc6,0x06,0xa0,0x36,0x0a,0xe9,0x94,0xee,0xe9,0x7b,0xee,0x8f,0x3c,0x06,0x3d -,0x06,0x3d,0x06,0x3d,0xd2,0x3c,0xea,0x3c,0x06,0x3d,0x06,0x3d,0xa3,0xb6,0x34,0x81 -,0x26,0xc2,0x34,0xaf,0xdf,0xc7,0x06,0x4c,0x37,0x00,0x00,0xb8,0x8a,0x03,0xcd,0x3a -,0x80,0x3e,0x9d,0x36,0x04,0x75,0x0c,0x80,0x3e,0x9e,0x36,0x06,0x74,0x05,0xc6,0x06 -,0x9f,0x36,0x06,0x33,0xc0,0xa0,0x9e,0x36,0x8b,0xf0,0x2e,0xff,0xa4,0x4c,0x3c,0xf7 -,0x06,0x9b,0x36,0x00,0x20,0x75,0x0e,0x81,0x26,0x9b,0x36,0xff,0xbf,0xb8,0x8b,0x03 -,0xcd,0x3a,0xe9,0x54,0x00,0xf7,0x06,0x9b,0x36,0x00,0x01,0x74,0x03,0xe9,0x17,0xee -,0xc7,0x06,0x37,0x37,0x02,0x00,0xc6,0x06,0xca,0x34,0x02,0x83,0x0e,0x99,0x36,0x04 -,0x83,0x0e,0x50,0x37,0x04,0xf6,0x06,0x9d,0x36,0x80,0x75,0x2a,0xe8,0x1f,0x0b,0xe9 -,0x27,0x00,0xf7,0x06,0x9b,0x36,0x00,0x01,0x75,0xd3,0xc7,0x06,0x37,0x37,0x02,0x00 -,0xc6,0x06,0xca,0x34,0x02,0x83,0x0e,0x99,0x36,0x04,0xc6,0x06,0xa0,0x36,0x00,0xf6 -,0x06,0x9d,0x36,0x80,0x74,0x03,0xe8,0xde,0x0a,0x81,0x26,0x9b,0x36,0x7c,0xff,0xbb -,0xff,0xff,0xcd,0x53,0xcd,0x54,0xe9,0xbe,0xed,0xa3,0xb6,0x34,0xe8,0xad,0x01,0xb8 -,0x86,0x03,0xcd,0x39,0xc7,0x06,0x4c,0x37,0x00,0x00,0x81,0x26,0xc2,0x34,0xaf,0xdf -,0xf6,0x06,0x9d,0x36,0x80,0x74,0x34,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x56,0xf7 -,0x06,0x9b,0x36,0x00,0x01,0x74,0x27,0xe8,0x35,0x01,0x72,0x1c,0xbe,0x00,0x40,0x85 -,0x36,0xc2,0x34,0x75,0x08,0x09,0x36,0xc2,0x34,0xff,0x06,0x92,0x34,0xe8,0x8b,0x01 -,0x73,0x06,0x81,0x0e,0x99,0x36,0x80,0x00,0xe9,0x6c,0xed,0xe9,0xb5,0x00,0xc7,0x06 -,0x37,0x37,0x02,0x00,0xc6,0x06,0xca,0x34,0x02,0x83,0x0e,0x99,0x36,0x04,0x83,0x0e -,0x50,0x37,0x04,0x80,0x3e,0x9e,0x36,0x08,0x74,0x03,0xe8,0x5a,0x0a,0xe8,0xef,0x00 -,0x72,0xd6,0xe9,0xc8,0xff,0x80,0x3e,0x9e,0x36,0x0a,0x75,0x12,0xc6,0x06,0xa0,0x36 -,0x00,0xf7,0x06,0x9b,0x36,0x08,0x00,0x74,0x02,0xcd,0x54,0xe8,0x39,0x0a,0x81,0x26 -,0x9b,0x36,0xff,0xbf,0xe8,0xc8,0x00,0x72,0xaf,0xb8,0x8b,0x03,0xcd,0x39,0xe9,0x9c -,0xff,0xf6,0x06,0x9e,0x36,0xff,0x75,0x58,0xa3,0xb6,0x34,0xe8,0xfe,0x00,0x81,0x26 -,0xc2,0x34,0xff,0xbf,0xf6,0x06,0x9d,0x36,0x80,0x74,0x48,0xf7,0x06,0x9b,0x36,0x00 -,0x20,0x74,0x22,0xf7,0x06,0x9b,0x36,0x00,0x40,0x75,0x08,0xe8,0x91,0x00,0x72,0x30 -,0xe9,0x22,0x00,0x26,0xa1,0x0c,0x00,0xa9,0x60,0x00,0x75,0x24,0x81,0x0e,0x66,0x37 -,0x00,0x08,0xe9,0xd2,0xec,0xc7,0x06,0x4c,0x37,0x00,0x00,0xe8,0x71,0x00,0x72,0x10 -,0xb8,0x8b,0x03,0xcd,0x39,0xe8,0xd3,0x00,0x73,0x06,0x81,0x0e,0x99,0x36,0x80,0x00 -,0xe9,0xb4,0xec,0x80,0x3e,0x9d,0x36,0x04,0x75,0x0c,0x80,0x3e,0x9e,0x36,0x06,0x74 -,0x46,0xc6,0x06,0x9f,0x36,0x06,0xf7,0x06,0x9b,0x36,0x00,0x01,0x74,0x0c,0x80,0x3e -,0x9d,0x36,0x08,0x75,0x05,0xc6,0x06,0x9f,0x36,0x0a,0xe8,0x32,0x00,0x72,0xd1,0xe8 -,0x99,0x00,0x80,0x3e,0x9d,0x36,0x08,0x75,0x13,0x81,0x0e,0x99,0x36,0x80,0x00,0xf7 -,0x06,0x9b,0x36,0x00,0x20,0x75,0x08,0xb8,0x8b,0x03,0xcd,0x39,0xe9,0x68,0xec,0xc6 -,0x06,0x9f,0x36,0x0a,0xe9,0x60,0xec,0xb8,0x86,0x03,0xcd,0x3a,0xe9,0x58,0xec,0x26 -,0xa1,0x0c,0x00,0xa9,0x60,0x00,0x74,0x08,0x81,0x26,0xc2,0x34,0xff,0xbf,0xf9,0xc3 -,0xf7,0x06,0x9b,0x36,0x00,0x40,0x74,0x13,0x81,0x0e,0x66,0x37,0x00,0x08,0xe8,0x4a -,0x00,0x73,0x06,0x81,0x0e,0x99,0x36,0x80,0x00,0xf9,0xc3,0x81,0x0e,0x9b,0x36,0x00 -,0x40,0x80,0x26,0x6f,0x37,0xfe,0x81,0x26,0x9b,0x36,0x7f,0xff,0xc6,0x06,0xa0,0x36 -,0x00,0xf8,0xc3,0x81,0x0e,0x99,0x36,0x00,0x01,0xe9,0x21,0xec,0x26,0xa1,0x20,0x00 -,0xa3,0xfb,0x36,0xa3,0xaa,0x34,0x26,0xa1,0x22,0x00,0xa3,0xfd,0x36,0xa3,0xac,0x34 -,0x26,0xa1,0x24,0x00,0xa3,0xff,0x36,0xa3,0xae,0x34,0xc3,0xa1,0x05,0x37,0x26,0x3b -,0x06,0x20,0x00,0x75,0x19,0xa1,0x07,0x37,0x26,0x3b,0x06,0x22,0x00,0x75,0x0f,0xa1 -,0x09,0x37,0x26,0x3b,0x06,0x24,0x00,0x75,0x05,0xe8,0x02,0x00,0xf8,0xc3,0x51,0x1e -,0x06,0x8b,0xc7,0x8d,0x36,0x20,0x00,0xbf,0x05,0x37,0xb9,0x03,0x00,0x1e,0x06,0x1f -,0x07,0xf3,0xa5,0x8b,0xf8,0x8d,0x36,0x20,0x00,0xbf,0xa0,0x34,0xb9,0x03,0x00,0xf3 -,0xa5,0x07,0x1f,0x59,0x8b,0xf8,0xa1,0x07,0x37,0xa3,0xa6,0x34,0xa1,0x09,0x37,0xa3 -,0xa8,0x34,0xf9,0xc3,0xc6,0x06,0xb6,0x34,0x01,0xe9,0x8b,0xeb,0xe8,0x87,0x08,0x8b -,0xf0,0x05,0x12,0x00,0x26,0x29,0x06,0x0e,0x00,0x26,0x8b,0x44,0x2a,0x26,0x3a,0x06 -,0x0e,0x00,0x75,0x5b,0x26,0x83,0x2e,0x0e,0x00,0x02,0x80,0xfc,0x27,0x75,0x50,0x26 -,0x8b,0x44,0x2c,0xa9,0xff,0xff,0x75,0x47,0x8b,0xfe,0x33,0xc0,0x26,0xf6,0x45,0x3c -,0x80,0x74,0x06,0x26,0x8a,0x45,0x3a,0x24,0x1f,0x03,0xf8,0x26,0x80,0x7d,0x45,0x09 -,0x75,0x2d,0x8c,0xc2,0x8e,0x06,0x38,0x34,0x8e,0xda,0x8b,0x0e,0x0e,0x00,0x26,0x89 -,0x0e,0x0e,0x00,0x8d,0x74,0x2c,0xbf,0x18,0x00,0xf3,0xa4,0x33,0xc0,0x8e,0xd8,0x26 -,0xc7,0x06,0x04,0x00,0xb5,0x3f,0x26,0xc7,0x06,0x06,0x00,0x06,0x00,0xcd,0x50,0xb8 -,0x06,0x80,0xe9,0xef,0xe9,0x26,0xa1,0x0c,0x00,0xa3,0x93,0x37,0x83,0x0e,0x99,0x36 -,0x01,0xe9,0x00,0xeb,0x26,0x80,0x3e,0x1c,0x00,0xff,0x75,0x2f,0x26,0x80,0x3e,0x1e -,0x00,0xff,0x75,0x27,0x26,0xf7,0x06,0x0c,0x00,0x40,0x00,0x75,0x1b,0xa1,0xd1,0x36 -,0x26,0xa3,0x1a,0x00,0xa1,0xd3,0x36,0x26,0xa3,0x1c,0x00,0xa1,0xd5,0x36,0x26,0xa3 -,0x1e,0x00,0xb8,0x0a,0x80,0xe8,0x36,0x07,0xe9,0xe2,0xea,0xff,0x06,0x90,0x34,0xbe -,0x0a,0x00,0xc6,0x06,0xb6,0x34,0x01,0xf6,0x06,0x9d,0x36,0x80,0x75,0x05,0x83,0x0e -,0xc2,0x34,0x01,0xe9,0xb6,0xea,0x80,0x3e,0x9d,0x36,0x0a,0x75,0x0f,0x26,0xa1,0x0c -,0x00,0x25,0x07,0x00,0x3d,0x04,0x00,0x75,0x03,0xe8,0x79,0x00,0xa1,0xf3,0x36,0x86 -,0xe0,0xe7,0x1e,0xa3,0xe3,0x36,0x81,0x26,0x0b,0x37,0x00,0x03,0x81,0x26,0x0d,0x37 -,0x7b,0x7f,0x83,0x0e,0x0d,0x37,0x48,0xe8,0x1e,0x00,0x26,0xa1,0x0c,0x00,0x25,0x07 -,0x00,0x3d,0x04,0x00,0x74,0x09,0x26,0xf7,0x06,0x0c,0x00,0x20,0x00,0x75,0x06,0xb8 -,0x01,0x00,0xe9,0x3f,0xe9,0xe9,0x5f,0xea,0xc7,0x06,0x41,0x37,0x00,0x00,0xb8,0x7f -,0x03,0xcd,0x3a,0xa1,0x1d,0x37,0xa3,0xc4,0x34,0x86,0xe0,0x68,0x7f,0x03,0x1f,0xa3 -,0x06,0x00,0x33,0xc0,0x8e,0xd8,0xa1,0x0b,0x37,0xa3,0xb2,0x34,0xa1,0x0d,0x37,0xa3 -,0xb4,0x34,0xa1,0xf3,0x36,0xa3,0xc8,0x34,0xa1,0xef,0x36,0xa3,0x9c,0x34,0xa1,0xf1 -,0x36,0xa3,0x9e,0x34,0xc3,0x80,0x0e,0x9d,0x36,0x80,0xbe,0x00,0x00,0xe8,0xb4,0x07 -,0xb8,0x7b,0x03,0xcd,0x3a,0xb8,0x7c,0x03,0xcd,0x39,0xc7,0x06,0x33,0x37,0x02,0x00 -,0xa1,0xe5,0x36,0xe7,0x2e,0xa1,0xe7,0x36,0xe7,0x3e,0xb8,0x82,0x03,0xcd,0x3a,0xf7 -,0x06,0x9b,0x36,0x00,0x20,0x75,0x03,0xe8,0xfd,0x06,0xa1,0xd3,0x36,0xa3,0xef,0x36 -,0xa3,0x9c,0x34,0xa1,0xd5,0x36,0xa3,0xf1,0x36,0xa3,0x9e,0x34,0xc3,0xf6,0x06,0x9d -,0x36,0x80,0x74,0x31,0xbe,0x22,0x00,0xe9,0x17,0x00,0xf6,0x06,0x9d,0x36,0x80,0x74 -,0x24,0xbe,0x23,0x00,0xe9,0x0a,0x00,0xf6,0x06,0x9d,0x36,0x80,0x74,0x17,0xbe,0x24 -,0x00,0x56,0xe8,0xa8,0x05,0x8c,0xc0,0x3d,0xff,0xff,0x5e,0x74,0x05,0xe8,0xd7,0xef -,0xcd,0x50,0xe9,0x1f,0xe8,0xe9,0x9f,0xe9,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0xb8,0x84,0x03,0xcd,0x3a,0xb8,0x8a,0x03,0xcd,0x39,0xe9,0xf7,0x00,0x80,0x3e,0xa0 -,0x36,0x08,0x75,0x2e,0xa9,0xd0,0x07,0x75,0x2c,0xa1,0xb1,0x36,0x0d,0x00,0x04,0xe7 -,0x08,0xe5,0x00,0x25,0xff,0x73,0xe7,0x00,0xb8,0x8a,0x03,0xcd,0x3a,0xe8,0xc3,0x06 -,0x33,0xc0,0xe7,0x0e,0xe5,0x0a,0x25,0xc3,0x17,0xe7,0x0a,0xcd,0x54,0xc6,0x06,0xa0 -,0x36,0x00,0xe9,0x68,0xe9,0xbe,0x04,0x00,0xe9,0x3f,0xe9,0x83,0x26,0x9b,0x36,0xbf -,0xc6,0x06,0x71,0x37,0x03,0xb8,0x86,0x03,0xcd,0x3a,0xb8,0x88,0x03,0xcd,0x3a,0xb8 -,0x83,0x03,0xcd,0x3a,0xb8,0x87,0x03,0xcd,0x39,0x81,0x0e,0xc2,0x34,0x00,0x20,0xe9 -,0x92,0x00,0xe8,0x49,0x06,0xb8,0x87,0x03,0xcd,0x39,0xbb,0xff,0x7f,0xcd,0x53,0xb8 -,0x84,0x03,0xcd,0x3a,0xb8,0x88,0x03,0xcd,0x3a,0xb8,0x8b,0x03,0xcd,0x3a,0xb8,0x83 -,0x03,0xcd,0x3a,0xb8,0x86,0x03,0xcd,0x3a,0xb8,0x85,0x03,0xcd,0x3a,0xc3,0xe5,0x00 -,0x25,0xff,0x53,0xe7,0x00,0x83,0x0e,0xc2,0x34,0x40,0x83,0x26,0xc2,0x34,0xef,0xe8 -,0x0c,0x06,0xbb,0xff,0x7f,0xcd,0x53,0xb8,0x8a,0x03,0xcd,0x3a,0xb8,0x85,0x03,0xcd -,0x3a,0xb8,0x86,0x03,0xcd,0x3a,0xb8,0x83,0x03,0xcd,0x3a,0xb8,0x87,0x03,0xcd,0x3a -,0xb8,0x8b,0x03,0xcd,0x3a,0xb8,0x84,0x03,0xcd,0x3a,0xb8,0x89,0x03,0xcd,0x3a,0xc3 -,0x83,0x0e,0xc2,0x34,0x50,0xe8,0x18,0x04,0xe8,0xd3,0x05,0xf6,0x06,0x6f,0x37,0x01 -,0x75,0x12,0xb8,0x89,0x03,0xcd,0x39,0x83,0x3e,0x0f,0x37,0x00,0x75,0x06,0xc7,0x06 -,0x0f,0x37,0x04,0x00,0xa1,0x9d,0x36,0x80,0xfc,0x08,0x74,0x05,0xb8,0x84,0x03,0xcd -,0x39,0xe5,0x02,0x0d,0x01,0x08,0x25,0xef,0xff,0xe7,0x02,0xa1,0x9d,0x36,0x86,0xe0 -,0x32,0xe4,0x8b,0xf0,0xd1,0xee,0x33,0xc0,0x0d,0x20,0x00,0x09,0x06,0xad,0x36,0xa1 -,0xad,0x36,0xe7,0x04,0xe9,0x53,0xe8,0xe9,0x5a,0xe8,0x33,0xc0,0xa0,0x1b,0x37,0xd1 -,0xe0,0x3a,0x06,0xa0,0x36,0x75,0x03,0xe9,0xba,0xff,0xe9,0x60,0xe8,0xc7,0x06,0x41 -,0x37,0x00,0x00,0xe8,0xc1,0xe1,0xe8,0x6a,0x06,0x33,0xc0,0x0d,0x41,0x00,0xe7,0x56 -,0xa1,0xb1,0x36,0x0d,0x00,0x10,0xe7,0x08,0xe5,0x02,0x25,0xf9,0xff,0x0d,0x03,0x00 -,0xe7,0x02,0xa1,0xb3,0x36,0xe7,0x0a,0xa1,0xaf,0x36,0xe7,0x06,0xa1,0xad,0x36,0xe7 -,0x04,0xe8,0x7c,0x03,0xe8,0x9f,0x03,0xc7,0x06,0x1d,0x37,0x00,0xc8,0xc7,0x06,0x0b -,0x37,0x00,0x03,0xc7,0x06,0x0d,0x37,0x7b,0x7f,0x33,0xc0,0xa3,0x99,0x36,0xa3,0x9b -,0x36,0xa3,0x9d,0x36,0xa3,0x9f,0x36,0xa3,0x4c,0x37,0xa3,0xf3,0x36,0xa3,0xef,0x36 -,0xa3,0xf1,0x36,0xe8,0x82,0xfd,0xc6,0x06,0x9f,0x36,0x02,0xe9,0xef,0xe7,0xe5,0x02 -,0x0d,0x01,0x88,0x25,0xef,0xff,0x0d,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02,0xe8,0xf2 -,0x05,0xe5,0x0a,0x0d,0x40,0x00,0xe7,0x0a,0x33,0xc0,0xa3,0x81,0x37,0xa3,0x85,0x37 -,0xa3,0x83,0x37,0xa3,0x87,0x37,0xa3,0x89,0x37,0xe5,0x00,0x0d,0x00,0x84,0xe7,0x00 -,0xb8,0x8c,0x03,0xcd,0x39,0xb8,0x80,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff -,0xe5,0x00,0x25,0xff,0x7b,0xe7,0x00,0x81,0x0e,0x9a,0x37,0x80,0x00,0xb8,0x7e,0x03 -,0xcd,0x39,0x33,0xc0,0xe7,0x0e,0xbe,0x08,0x00,0x8e,0x06,0x38,0x34,0xe8,0xa7,0xed -,0x83,0x26,0xef,0x34,0xdf,0xff,0x06,0x81,0x37,0xcd,0x50,0x83,0x0e,0xef,0x34,0x20 -,0xc3,0xf7,0x06,0x9a,0x37,0x80,0x00,0x74,0x3d,0xa9,0xd0,0x07,0x74,0x10,0xa9,0x00 -,0x04,0x74,0x12,0x33,0xc0,0xe7,0x0e,0xff,0x06,0x87,0x37,0xe9,0xd2,0xff,0xff,0x06 -,0x85,0x37,0xe9,0xcb,0xff,0xff,0x06,0x83,0x37,0xe9,0xc4,0xff,0x83,0x26,0x9a,0x37 -,0x7f,0xa1,0x89,0x37,0x03,0x06,0x87,0x37,0x3d,0x05,0x00,0x7f,0x01,0xc3,0xbb,0xff -,0x7f,0xcd,0x53,0xe9,0x00,0x00,0xe5,0x02,0x25,0xff,0xfb,0x25,0xef,0xff,0x0d,0x01 -,0x00,0xe7,0x02,0xa1,0x83,0x37,0x3b,0x06,0x46,0x37,0x7f,0x2a,0xa1,0x85,0x37,0x3b -,0x06,0x48,0x37,0x7c,0x21,0xa1,0x89,0x37,0x03,0x06,0x87,0x37,0x3d,0x05,0x00,0x7f -,0x15,0xc6,0x06,0x9f,0x36,0x04,0xe5,0x02,0x25,0xff,0xf7,0x0d,0x01,0x00,0x25,0xef -,0xff,0xe7,0x02,0xe9,0xf7,0xe6,0xbe,0x01,0x00,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74 -,0x0a,0x83,0x26,0x9b,0x36,0xfc,0x83,0x0e,0xc2,0x34,0x04,0xe9,0xd0,0xe6,0xb8,0x7b -,0x03,0xcd,0x39,0xe5,0x02,0x0d,0x01,0x60,0x25,0xef,0xff,0xe7,0x02,0xc7,0x06,0xf1 -,0x34,0x20,0x03,0xb8,0x8e,0x03,0xcd,0x39,0xc3,0x81,0x26,0xc2,0x34,0x7f,0xff,0x80 -,0x0e,0x6f,0x37,0x01,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74,0xd2,0xb8,0x7b,0x03,0xcd -,0x3a,0xb8,0x7d,0x03,0xcd,0x39,0x83,0x26,0x9b,0x36,0xef,0x33,0xc0,0xb0,0x8a,0xa2 -,0x9f,0x36,0xa2,0x9d,0x36,0xc7,0x06,0x4c,0x37,0x01,0x00,0xc7,0x06,0x0f,0x37,0x04 -,0x00,0xf7,0x06,0x9b,0x36,0x40,0x00,0x75,0x06,0xc7,0x06,0x0f,0x37,0x03,0x00,0xb8 -,0x8d,0x03,0xcd,0x39,0xe8,0x00,0xd5,0xe5,0x02,0x0d,0x01,0x40,0x25,0xef,0xff,0x8b -,0xd8,0xb8,0x7c,0x03,0xcd,0x39,0xc7,0x06,0x33,0x37,0x02,0x00,0x8b,0xc3,0x0d,0x00 -,0x20,0x25,0xf9,0xff,0x0b,0x06,0xe8,0x3a,0xe7,0x02,0xc3,0xff,0x0e,0xf1,0x34,0x75 -,0x01,0xc3,0xe5,0x4e,0xa9,0x01,0x00,0x75,0x12,0xe5,0x00,0xa9,0x00,0x04,0x75,0x05 -,0x0d,0x00,0x04,0xe7,0x00,0xb8,0x8e,0x03,0xcd,0x39,0xc3,0xe5,0x00,0xa9,0x00,0x04 -,0x74,0xf3,0x25,0xff,0xfb,0xe7,0x00,0xe9,0xeb,0xff,0xc6,0x06,0xa0,0x36,0x04,0x83 -,0x26,0x9b,0x36,0xfc,0x81,0x0e,0x9b,0x36,0x80,0x00,0xe9,0x10,0xe6,0xb8,0x8e,0x03 -,0xcd,0x3a,0xcd,0x54,0x81,0x0e,0xaf,0x36,0x00,0x18,0xa1,0xaf,0x36,0xe7,0x06,0xb8 -,0x7b,0x03,0xcd,0x39,0xa1,0xd3,0x36,0xa3,0x8f,0x37,0xa1,0xd5,0x36,0xa3,0x91,0x37 -,0xc7,0x06,0x8b,0x37,0x02,0x00,0xc7,0x06,0x8d,0x37,0x02,0x00,0x83,0x0e,0x99,0x36 -,0x40,0xe9,0xd9,0xe5,0x80,0x3e,0x9f,0x36,0x06,0x75,0x15,0xa9,0xd0,0x07,0x75,0xec -,0x25,0x00,0x18,0x75,0x0e,0xff,0x0e,0x8b,0x37,0x75,0xe1,0xc6,0x06,0x9f,0x36,0x08 -,0xe9,0xba,0xe5,0xff,0x0e,0x8d,0x37,0x75,0xd3,0xbe,0x08,0x00,0xe9,0x9f,0xe5,0xb8 -,0x7b,0x03,0xcd,0x39,0xf7,0x06,0x9b,0x36,0x00,0x20,0x74,0x08,0xc6,0x06,0x9f,0x36 -,0x0a,0xe9,0x0d,0x00,0xf7,0x06,0x9b,0x36,0x00,0x40,0x74,0x0b,0xb8,0x8b,0x03,0xcd -,0x39,0x81,0x0e,0x99,0x36,0x80,0x00,0xe9,0x83,0xe5,0xb8,0x7b,0x03,0xcd,0x39,0xc7 -,0x06,0x8b,0x37,0x04,0x00,0xc7,0x06,0x8d,0x37,0x04,0x00,0x81,0x0e,0x99,0x36,0x00 -,0x02,0xe9,0x69,0xe5,0xf6,0x06,0x9d,0x36,0x80,0x75,0x1b,0xa9,0xd0,0x07,0x75,0xeb -,0xa9,0x00,0x18,0x75,0x0c,0xff,0x0e,0x8d,0x37,0x75,0xe0,0xe8,0x17,0xfb,0xe9,0x4c -,0xe5,0xb8,0x82,0x03,0xcd,0x39,0xc3,0xff,0x0e,0x8b,0x37,0x75,0xce,0xbe,0x09,0x00 -,0xe9,0x2b,0xe5,0xc7,0x06,0x3d,0x37,0x00,0x00,0xc7,0x06,0x9b,0x36,0x00,0x00,0xe8 -,0x3c,0x02,0x81,0x26,0xaf,0x36,0xff,0xe7,0xa1,0xaf,0x36,0xe7,0x06,0x81,0x26,0x9b -,0x36,0xff,0x7f,0xe5,0x02,0x0d,0x01,0x00,0x25,0xef,0xff,0x25,0xff,0xdf,0xe7,0x02 -,0xbb,0xff,0x7f,0xcd,0x53,0x33,0xc0,0xa3,0x9d,0x36,0xa3,0x9f,0x36,0xe8,0x50,0x00 -,0xe8,0x73,0x00,0xb8,0x81,0x03,0xcd,0x39,0xc3,0xf7,0x06,0x9b,0x36,0x03,0x00,0x74 -,0x0d,0xc6,0x06,0x9f,0x36,0x02,0xc6,0x06,0xa0,0x36,0x00,0xe9,0xdf,0xe4,0x83,0x0e -,0x9b,0x36,0x10,0xc7,0x06,0x99,0x36,0x00,0x00,0xe8,0xe7,0x02,0xe5,0x56,0x0d,0x02 -,0x00,0xe7,0x56,0xc7,0x06,0xa8,0x02,0x00,0x00,0x8b,0x36,0x3d,0x37,0xe8,0x44,0x02 -,0xc6,0x06,0xa0,0x36,0x0e,0xe9,0xb5,0xe4,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x06,0xb8,0x8a,0x03,0xcd,0x3a,0xb8,0x85,0x03,0xcd,0x3a,0xb8,0x86,0x03,0xcd,0x3a -,0xb8,0x83,0x03,0xcd,0x3a,0xb8,0x87,0x03,0xcd,0x3a,0xb8,0x8b,0x03,0xcd,0x3a,0xb8 -,0x88,0x03,0xcd,0x3a,0x07,0xc3,0x06,0xb8,0x88,0x03,0xcd,0x3a,0xb8,0x7b,0x03,0xcd -,0x3a,0xb8,0x82,0x03,0xcd,0x3a,0xb8,0x7f,0x03,0xcd,0x3a,0xb8,0x7c,0x03,0xcd,0x3a -,0xb8,0x7e,0x03,0xcd,0x3a,0xb8,0x80,0x03,0xcd,0x3a,0xb8,0x81,0x03,0xcd,0x3a,0xb8 -,0x84,0x03,0xcd,0x3a,0xb8,0x89,0x03,0xcd,0x3a,0xb8,0x7d,0x03,0xcd,0x3a,0xb8,0x8d -,0x03,0xcd,0x3a,0xc7,0x06,0x41,0x37,0x00,0x00,0x07,0xc3,0x06,0x8e,0x06,0x38,0x34 -,0x1f,0x8b,0x0e,0x0e,0x00,0x26,0x89,0x0e,0x0e,0x00,0xbe,0x18,0x00,0xbf,0x18,0x00 -,0xf3,0xa4,0x06,0x1e,0x07,0xcd,0x34,0x07,0x33,0xc0,0x8e,0xd8,0xc3,0x26,0xf6,0x06 -,0x20,0x00,0x80,0x74,0x44,0x33,0xc0,0x26,0xa0,0x26,0x00,0x24,0x1f,0x8b,0xf0,0x26 -,0x8b,0x5c,0x28,0x89,0x1e,0x6a,0x37,0x06,0x8e,0x06,0x38,0x34,0x1f,0xc0,0xe3,0x04 -,0x26,0x88,0x5c,0x28,0x8b,0xc6,0xb9,0x06,0x00,0xbe,0x20,0x00,0xbf,0x1a,0x00,0xf3 -,0xa4,0x8b,0xc8,0x83,0xc7,0x06,0xf3,0xa4,0x26,0x81,0x26,0x26,0x00,0x1f,0x80,0x26 -,0x81,0x36,0x26,0x00,0x00,0x80,0xe9,0xa9,0xff,0x26,0x8b,0x1e,0x28,0x00,0x89,0x1e -,0x6a,0x37,0x06,0x8e,0x06,0x38,0x34,0x1f,0xc0,0xe3,0x04,0x26,0x88,0x1e,0x28,0x00 -,0xb9,0x06,0x00,0xbe,0x20,0x00,0xbf,0x1a,0x00,0xf3,0xa4,0xe9,0x84,0xff,0x86,0xc4 -,0xa3,0x68,0x37,0xe8,0x87,0xff,0xf7,0x06,0x6a,0x37,0x0f,0x00,0x74,0x10,0x80,0x3e -,0x9e,0x36,0x00,0x75,0x09,0xbe,0x00,0x00,0xe8,0xac,0xe9,0xcd,0x50,0xc3,0xc3,0x50 -,0x56,0x06,0x33,0xc0,0x26,0xf6,0x06,0x20,0x00,0x80,0x74,0x06,0x26,0xa0,0x26,0x00 -,0x24,0x1f,0x8b,0xf0,0x26,0x8b,0x5c,0x26,0x86,0xfb,0x83,0xeb,0x04,0x74,0x4f,0x83 -,0xc6,0x2a,0x8c,0xc0,0x8e,0xd8,0xb9,0x07,0x00,0x33,0xc0,0x8e,0xc0,0xbf,0x72,0x37 -,0xf3,0xab,0x33,0xc9,0x8a,0x0c,0x80,0xf9,0x00,0x75,0x03,0xe9,0x30,0x00,0x3b,0xd9 -,0x73,0x03,0xe9,0x29,0x00,0x2b,0xd9,0x8a,0x44,0x01,0x25,0x3f,0x00,0x74,0x19,0x3d -,0x0b,0x00,0x7d,0x14,0xd1,0xe0,0x8b,0xf8,0x2e,0x8b,0xbd,0x5c,0x49,0x8d,0x74,0x02 -,0x83,0xe9,0x02,0xf3,0xa4,0xe9,0x02,0x00,0x03,0xf1,0x23,0xdb,0x75,0xc4,0x33,0xc0 -,0x8e,0xd8,0x07,0x5e,0x58,0xc3,0x33,0xc0,0x26,0xf6,0x06,0x20,0x00,0x80,0x74,0x06 -,0x26,0xa0,0x26,0x00,0x24,0x1f,0xc3,0xe5,0x0a,0x25,0xc3,0xbf,0xe7,0x0a,0xb8,0x86 -,0x03,0xcd,0x39,0xb8,0x83,0x03,0xcd,0x39,0x81,0x26,0x9b,0x36,0x7c,0xdf,0xb8,0x85 -,0x03,0xcd,0x3a,0xe5,0x02,0x25,0xff,0xf3,0x0d,0x01,0x00,0x25,0xef,0xff,0xe7,0x02 -,0xe5,0x00,0x25,0xff,0x53,0xe7,0x00,0xa1,0xe7,0x36,0x25,0xff,0xfe,0xa3,0xe7,0x36 -,0xe7,0x3e,0x83,0x26,0x99,0x36,0xcf,0x81,0x0e,0xaf,0x36,0x00,0x10,0xa1,0xaf,0x36 -,0xe7,0x06,0xc3,0xe5,0x02,0x0d,0x01,0x0c,0x25,0xef,0xff,0xe7,0x02,0xa1,0xe7,0x36 -,0x0d,0x00,0x01,0xe7,0x3e,0xa3,0xe7,0x36,0x81,0x0e,0x9b,0x36,0x00,0x20,0x83,0x0e -,0x99,0x36,0x20,0x81,0x26,0x9b,0x36,0x7c,0xbf,0x81,0x0e,0xaf,0x36,0x00,0x10,0xa1 -,0xaf,0x36,0xe7,0x06,0xb8,0x86,0x03,0xcd,0x39,0xb8,0x85,0x03,0xcd,0x39,0xb8,0x83 -,0x03,0xcd,0x3a,0xc3,0x0b,0xf6,0x75,0x49,0x06,0x8e,0x06,0x32,0x34,0x80,0x3e,0xe0 -,0x34,0x01,0x75,0x1b,0x26,0x89,0x36,0x06,0x00,0x8e,0x06,0x32,0x34,0x26,0xf7,0x06 -,0x0a,0x00,0x00,0x20,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x20,0x07,0xc3,0x80 -,0x3e,0xe3,0x34,0x01,0x75,0x19,0x26,0x89,0x36,0x06,0x00,0x8e,0x06,0x32,0x34,0x26 -,0xf7,0x06,0x0a,0x00,0x00,0x10,0x74,0x07,0x26,0x81,0x0e,0x08,0x00,0x00,0x10,0x07 -,0xc3,0xe9,0xb4,0xff,0x50,0x51,0x57,0x33,0xc0,0xb9,0x06,0x00,0x8e,0xc0,0xbf,0xd1 -,0x36,0xf3,0xae,0x5f,0x74,0x0c,0x26,0xf6,0x06,0x00,0x00,0xc0,0x75,0x04,0xf8,0x59 -,0x58,0xc3,0xf9,0xe9,0xf9,0xff,0x8b,0x05,0x0b,0x45,0x02,0x0b,0x45,0x04,0xc3,0x52 -,0x50,0xe5,0x06,0x25,0x1e,0x00,0x3d,0x1e,0x00,0x75,0xf6,0xb8,0x01,0x80,0xe7,0x5a -,0x58,0x5a,0xc3,0xe8,0xe9,0xff,0x50,0xe5,0x02,0x25,0xff,0x7f,0x0d,0x01,0x00,0x25 -,0xef,0xff,0xe7,0x02,0x0d,0x00,0x80,0xe7,0x02,0xa1,0xad,0x36,0xe7,0x04,0xa1,0xaf -,0x36,0xe7,0x06,0x58,0xc3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x2e,0x2b,0xce,0x41,0x10,0x42,0x7b,0x41,0x30,0x41,0xa2,0x41,0xaf,0x45,0x44,0x29 -,0xc7,0x2a,0xc7,0x2a,0x60,0x39,0xf4,0x3a,0x5c,0x3c,0x09,0x3d,0xb1,0x3d,0x34,0x3f -,0xc7,0x2a,0x3c,0x3f,0xc7,0x2a,0xc4,0x3f,0x16,0x40,0x16,0x40,0xed,0x40,0xfa,0x40 -,0x07,0x41,0xc7,0x2a,0xc7,0x2a,0xc7,0x2a,0xc7,0x2a,0xd6,0x52,0x00,0x00,0x01,0x37 -,0xe9,0x36,0xf3,0x36,0xef,0x36,0x1d,0x37,0x0d,0x37,0x0b,0x37,0x9c,0x37,0x03,0x37 -,0xfb,0x36,0x62,0x2d,0x40,0x06,0xd1,0x2d,0xf4,0x01,0xba,0x44,0x40,0x06,0x8c,0x43 -,0x64,0x00,0xe8,0x2c,0xc8,0x00,0xd8,0x2b,0x05,0x00,0xe9,0x45,0x50,0x00,0x97,0x45 -,0xfa,0x00,0xae,0x2d,0x04,0x01,0x6a,0x42,0x02,0x00,0xf6,0x2c,0xbc,0x02,0x93,0x2d -,0xdc,0x05,0x1d,0x2d,0x64,0x00,0xa1,0x2d,0x14,0x00,0xd7,0x3a,0x08,0x07,0x81,0x2d -,0x64,0x00,0xb3,0x3e,0x02,0x00,0x30,0x43,0x64,0x00,0xc5,0x2c,0xf4,0x01,0x8b,0x44 -,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x80,0x3e,0xfd,0x34,0x02,0x74,0x0c,0xe8,0x20,0x05,0xc7,0x06,0xa1,0x36,0x00,0x00 -,0xe9,0x9a,0xf8,0xff,0x06,0xc0,0x33,0xe8,0x10,0x05,0x8b,0x36,0x3d,0x37,0xe8,0x73 -,0xfe,0xc3,0xcd,0x34,0xe9,0xe8,0x05,0xc7,0x06,0xa3,0x36,0x00,0x00,0xc7,0x06,0x41 -,0x37,0x00,0x00,0xe8,0xed,0xfe,0x33,0xc0,0x0d,0x41,0x00,0xe7,0x56,0xa1,0xb1,0x36 -,0x0d,0x00,0x10,0xe7,0x08,0xa1,0xb3,0x36,0xe7,0x0a,0xa1,0xaf,0x36,0xe7,0x06,0xa1 -,0xad,0x36,0xe7,0x04,0xe8,0x2b,0x09,0xc7,0x06,0x1d,0x37,0x00,0xc8,0xc7,0x06,0x0b -,0x37,0x00,0x03,0xc7,0x06,0x0d,0x37,0x7b,0x7f,0x33,0xc0,0xa3,0x9b,0x36,0xa3,0x9d -,0x36,0xc7,0x06,0x4c,0x37,0x01,0x00,0xc6,0x06,0x9e,0x36,0xff,0xc7,0x06,0x05,0x37 -,0x00,0x00,0xc7,0x06,0x07,0x37,0x00,0x00,0xc7,0x06,0x09,0x37,0x00,0x00,0xa3,0xf3 -,0x36,0xa3,0xef,0x36,0xa3,0xf1,0x36,0xe8,0xfe,0xf5,0xe5,0x02,0x25,0xf9,0xff,0x0d -,0x03,0x00,0x0d,0x00,0x88,0x25,0xef,0xff,0x0d,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02 -,0xb8,0x8f,0x03,0xcd,0x39,0xb8,0x80,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff -,0xa1,0xa9,0x36,0xa3,0xa7,0x36,0x0d,0x00,0xa4,0x0d,0x00,0x08,0xe7,0x00,0xa3,0xa9 -,0x36,0xc7,0x06,0xa3,0x36,0x01,0x00,0xc7,0x06,0xa5,0x36,0x0c,0x00,0x83,0x3e,0xa5 -,0x36,0x00,0x75,0x09,0xc7,0x06,0x3d,0x37,0x05,0x00,0xe9,0x13,0xff,0xff,0x0e,0xa5 -,0x36,0xbe,0x11,0x00,0xe8,0x22,0x05,0xb8,0x90,0x03,0xcd,0x39,0xc3,0x83,0x3e,0xa3 -,0x36,0x01,0x74,0xd9,0xc3,0xb8,0x90,0x03,0xcd,0x3a,0x26,0xa0,0x2b,0x00,0x26,0x8b -,0x1e,0x2c,0x00,0xcd,0x34,0x83,0x3e,0xa3,0x36,0x01,0x74,0x03,0xe9,0xf0,0x04,0x3c -,0x0f,0x75,0x1e,0x81,0xfb,0x00,0x02,0x75,0x18,0x26,0xa1,0x20,0x00,0xa3,0x05,0x37 -,0x26,0xa1,0x22,0x00,0xa3,0x07,0x37,0x26,0xa1,0x24,0x00,0xa3,0x09,0x37,0xe9,0x09 -,0x00,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0xb6,0xfe,0xc7,0x06,0xa3,0x36,0x02,0x00 -,0xc6,0x06,0x9e,0x36,0xff,0xe8,0xcb,0xfd,0xe8,0x1c,0xd9,0x33,0xc0,0xa3,0x85,0x37 -,0xa3,0x83,0x37,0xa3,0x87,0x37,0xa3,0x89,0x37,0xb8,0x91,0x03,0xcd,0x39,0xb8,0x80 -,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff,0xe5,0x00,0x25,0xff,0x53,0xe7,0x00 -,0x81,0x0e,0x9a,0x37,0x80,0x00,0xb8,0x92,0x03,0xcd,0x39,0x33,0xc0,0xe7,0x0e,0xbe -,0x08,0x00,0x8e,0x06,0x38,0x34,0xe8,0x8e,0xe5,0x26,0xc7,0x06,0x04,0x00,0x7d,0x4b -,0x83,0x26,0xef,0x34,0xdf,0xcd,0x50,0x83,0x0e,0xef,0x34,0x20,0xc3,0xf7,0x06,0x9a -,0x37,0x80,0x00,0x74,0x32,0xa9,0xd0,0x07,0x74,0x0c,0xa9,0x00,0x04,0x74,0x0e,0x33 -,0xc0,0xe7,0x0e,0xe9,0xda,0xff,0xff,0x06,0x85,0x37,0xe9,0xd3,0xff,0xff,0x06,0x83 -,0x37,0xe9,0xcc,0xff,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0x36,0xfe,0x83,0x26,0x9a -,0x37,0x7f,0xbb,0xff,0x7f,0xcd,0x53,0xe5,0x00,0x0d,0x00,0xac,0xe7,0x00,0xe5,0x02 -,0x25,0xff,0xfb,0x25,0xef,0xff,0x25,0xff,0xf7,0x0d,0x01,0x00,0xe7,0x02,0xa1,0x83 -,0x37,0x3b,0x06,0x46,0x37,0x7f,0xcd,0xa1,0x85,0x37,0x3b,0x06,0x48,0x37,0x7c,0xc4 -,0xc7,0x06,0xa3,0x36,0x03,0x00,0xbe,0x13,0x00,0xe8,0xfd,0x03,0xb8,0x93,0x03,0xcd -,0x39,0xb8,0x94,0x03,0xcd,0x39,0xb8,0x96,0x03,0xcd,0x39,0xb8,0x95,0x03,0xcd,0x39 -,0xbe,0x06,0x00,0xe8,0xe3,0x03,0xe9,0xd6,0x03,0x83,0x3e,0xa3,0x36,0x03,0x74,0x01 -,0xc3,0xbe,0x13,0x00,0xe8,0xd2,0x03,0xb8,0x94,0x03,0xcd,0x39,0xc3,0xb8,0x94,0x03 -,0xcd,0x3a,0x26,0xa0,0x2b,0x00,0x26,0x8b,0x1e,0x2c,0x00,0xcd,0x34,0x83,0x3e,0xa3 -,0x36,0x03,0x74,0x03,0xe9,0xa8,0x03,0x3c,0x0d,0x75,0x3e,0x83,0xfb,0x00,0x75,0x39 -,0xe5,0x02,0x0d,0x00,0x20,0xe7,0x02,0xb8,0x93,0x03,0xcd,0x3a,0xc7,0x06,0xa3,0x36 -,0x04,0x00,0xbe,0x00,0x00,0xe8,0x0c,0xfc,0xc6,0x06,0x9d,0x36,0x80,0xc6,0x06,0x9e -,0x36,0x00,0xc7,0x06,0x33,0x37,0x02,0x00,0xb8,0x9a,0x03,0xcd,0x39,0xe8,0xfc,0x00 -,0xc7,0x06,0x4c,0x37,0x00,0x00,0xe9,0x66,0x03,0xc7,0x06,0x3d,0x37,0x08,0x00,0xe9 -,0x61,0xfd,0x83,0x3e,0xa3,0x36,0x03,0x75,0x09,0xc7,0x06,0x3d,0x37,0x05,0x00,0xe9 -,0x51,0xfd,0xe9,0x4a,0x03,0x83,0x3e,0xa3,0x36,0x04,0x74,0x12,0x83,0x3e,0xa3,0x36 -,0x05,0x74,0x0b,0xcd,0x34,0xc7,0x06,0x3d,0x37,0x07,0x00,0xe9,0x35,0xfd,0xc7,0x06 -,0xa3,0x36,0x06,0x00,0xc6,0x06,0x9e,0x36,0xff,0xb8,0x9a,0x03,0xcd,0x3a,0xb8,0x99 -,0x03,0xcd,0x3a,0xb8,0x96,0x03,0xcd,0x3a,0xb8,0x97,0x03,0xcd,0x39,0xb8,0x98,0x03 -,0xcd,0x39,0xb8,0x9b,0x03,0xcd,0x39,0xe9,0x18,0xfd,0xcd,0x34,0x83,0x3e,0xa3,0x36 -,0x04,0x77,0x18,0x83,0x3e,0xa3,0x36,0x03,0x75,0x08,0xf7,0x06,0x9b,0x36,0x00,0x01 -,0x75,0x09,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0xe8,0xfc,0xe9,0xe1,0x02,0xcd,0x34 -,0x83,0x3e,0xa3,0x36,0x02,0x77,0x09,0xc7,0x06,0x3d,0x37,0x01,0x00,0xe9,0xd3,0xfc -,0x83,0x3e,0xa3,0x36,0x04,0x77,0x05,0xb8,0x96,0x03,0xcd,0x39,0xe9,0xc0,0x02,0x83 -,0x3e,0xa3,0x36,0x03,0x75,0x10,0x26,0xa1,0x0c,0x00,0x25,0x07,0x00,0x50,0x3d,0x04 -,0x00,0x75,0x03,0xe8,0x36,0x00,0xa1,0xf3,0x36,0x86,0xe0,0xe7,0x1e,0xa3,0xe3,0x36 -,0x81,0x26,0x0b,0x37,0x00,0x03,0x81,0x26,0x0d,0x37,0x7b,0x7f,0x83,0x0e,0x0d,0x37 -,0x48,0xe8,0x14,0xf3,0x58,0x3d,0x04,0x00,0x74,0x09,0x26,0xf7,0x06,0x0c,0x00,0x20 -,0x00,0x75,0x06,0xb8,0x01,0x00,0xe9,0x7a,0x02,0xe9,0x86,0xfc,0xa1,0xe5,0x36,0xe7 -,0x2e,0xa1,0xe7,0x36,0xe7,0x3e,0xa1,0xd3,0x36,0xa3,0x9c,0x34,0xa1,0xd5,0x36,0xa3 -,0x9e,0x34,0xc3,0x26,0x80,0x3e,0x1c,0x00,0xff,0x75,0x2f,0x26,0x80,0x3e,0x1e,0x00 -,0xff,0x75,0x27,0x26,0xf7,0x06,0x0c,0x00,0x40,0x00,0x75,0x1b,0xa1,0xd1,0x36,0x26 -,0xa3,0x1a,0x00,0xa1,0xd3,0x36,0x26,0xa3,0x1c,0x00,0xa1,0xd5,0x36,0x26,0xa3,0x1e -,0x00,0xb8,0x0a,0x80,0xe9,0x2c,0x02,0xe9,0x38,0xfc,0xff,0x06,0x90,0x34,0xbe,0x0a -,0x00,0xc6,0x06,0xb6,0x34,0x01,0xf6,0x06,0x9d,0x36,0x80,0x75,0x05,0x83,0x0e,0xc2 -,0x34,0x01,0xcd,0x34,0xe9,0x0c,0xfc,0x83,0x3e,0xa3,0x36,0x03,0x75,0x09,0xc7,0x06 -,0x3d,0x37,0x05,0x00,0xe9,0xfc,0xfb,0xe5,0x02,0x0d,0x03,0x00,0x0d,0x00,0x88,0x0d -,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02,0xc7,0x06,0xa3,0x36,0x05,0x00,0xc6,0x06,0x9e -,0x36,0xff,0xbe,0x02,0x00,0xe8,0xe1,0x01,0xb8,0x89,0x03,0xcd,0x3a,0xb8,0x9a,0x03 -,0xcd,0x3a,0xb8,0x99,0x03,0xcd,0x39,0xb8,0x97,0x03,0xcd,0x39,0xb8,0x98,0x03,0xcd -,0x39,0xe9,0xbb,0x01,0x83,0x3e,0xa3,0x36,0x03,0x74,0x0a,0x83,0x3e,0xa3,0x36,0x04 -,0x74,0x03,0xe9,0xaa,0x01,0xbe,0x06,0x00,0xe8,0xae,0x01,0xb8,0x95,0x03,0xcd,0x39 -,0xe9,0x9c,0x01,0x83,0x3e,0xa3,0x36,0x05,0x74,0x03,0xe9,0x92,0x01,0xbe,0x02,0x00 -,0xe8,0x96,0x01,0xb8,0x99,0x03,0xcd,0x39,0xe9,0x84,0x01,0xc7,0x06,0x0f,0x37,0x05 -,0x00,0xe9,0x7b,0x01,0xe5,0x02,0x25,0xff,0xdf,0xe7,0x02,0xc7,0x06,0xa3,0x36,0x07 -,0x00,0xc7,0x06,0x0f,0x37,0x05,0x00,0xe9,0x65,0x01,0xe8,0xd5,0x04,0xc6,0x06,0x9d -,0x36,0x00,0xc7,0x06,0x9b,0x36,0x00,0x00,0xc7,0x06,0x0f,0x37,0x05,0x00,0xc7,0x06 -,0xa8,0x02,0x00,0x00,0xc7,0x06,0x4c,0x37,0x01,0x00,0xe5,0x02,0x25,0xf9,0xff,0x0d -,0x03,0x00,0x0d,0x00,0x88,0x25,0xef,0xff,0x0d,0x00,0x40,0x0d,0x00,0x04,0xe7,0x02 -,0xe9,0x67,0xfc,0xb8,0x9a,0x03,0xcd,0x39,0xf7,0x06,0xf4,0x33,0x00,0x10,0x75,0x09 -,0xc7,0x06,0x33,0x37,0x02,0x00,0xe9,0x16,0x01,0xff,0x0e,0x33,0x37,0x74,0x03,0xe9 -,0x0d,0x01,0xff,0x06,0x8e,0x34,0x83,0x0e,0xc2,0x34,0x08,0xc7,0x06,0x3d,0x37,0x03 -,0x00,0xe9,0xff,0xfa,0xc3,0x52,0x50,0xba,0xe0,0x00,0xb8,0x00,0x10,0xef,0x58,0x5a -,0xc3,0xc7,0x06,0x3d,0x37,0x00,0x00,0xe9,0xe9,0xfa,0xfa,0xe8,0x54,0x04,0xb8,0x80 -,0x03,0x8e,0xc0,0x26,0xc7,0x06,0x04,0x00,0xd8,0x2b,0xb8,0x7f,0x03,0x8e,0xc0,0x26 -,0xc7,0x06,0x04,0x00,0xe8,0x2c,0x33,0xc0,0x8e,0xc0,0xa1,0xa7,0x36,0xa3,0xa9,0x36 -,0xa1,0xa9,0x36,0xe7,0x00,0xa1,0xab,0x36,0xe7,0x02,0xc7,0x06,0x05,0x37,0x00,0x00 -,0xc7,0x06,0x07,0x37,0x00,0x00,0xc7,0x06,0x09,0x37,0x00,0x00,0xc6,0x06,0x9d,0x36 -,0x00,0xc6,0x06,0x9e,0x36,0xff,0xc7,0x06,0x9b,0x36,0x00,0x00,0xc7,0x06,0xa3,0x36 -,0x00,0x00,0xc7,0x06,0x0f,0x37,0x00,0x00,0xc7,0x06,0xa8,0x02,0x00,0x00,0xc7,0x06 -,0x4c,0x37,0x01,0x00,0x81,0x26,0xaf,0x36,0xff,0xe7,0xa1,0xaf,0x36,0xe7,0x06,0xbb -,0xff,0x7f,0xcd,0x53,0xe8,0x7c,0xf9,0xe5,0x56,0x0d,0x02,0x00,0xe7,0x56,0xfb,0xc3 -,0x8d,0x3e,0xc0,0x53,0x8d,0x36,0xf0,0x38,0xb9,0x0e,0x00,0x8b,0x1e,0x30,0x34,0x89 -,0x5c,0x02,0x2e,0x8b,0x45,0x02,0x89,0x44,0x06,0x2e,0x8b,0x05,0x89,0x44,0x04,0x83 -,0xc7,0x04,0x83,0xc6,0x10,0xe2,0xe8,0xb8,0x80,0x03,0x8e,0xc0,0x26,0xc7,0x06,0x04 -,0x00,0xe2,0x51,0xb8,0x7f,0x03,0x8e,0xc0,0x26,0xc7,0x06,0x04,0x00,0xb2,0x52,0x33 -,0xc0,0x8e,0xc0,0xc7,0x06,0xa1,0x36,0x01,0x00,0xc7,0x06,0x0f,0x37,0x05,0x00,0xc3 -,0x33,0xff,0x8e,0x06,0xa6,0x02,0x8b,0x36,0xa4,0x02,0x2e,0xff,0xa4,0xa0,0x53,0xe8 -,0x8c,0xdb,0xc3,0xe8,0x48,0xf7,0xe9,0xf6,0xff,0x8e,0x06,0x38,0x34,0xe8,0x07,0xe1 -,0x26,0xc7,0x06,0x04,0x00,0xdf,0x4f,0xcd,0x50,0xc3,0x26,0xc7,0x06,0x0a,0x00,0x00 -,0x00,0x26,0xff,0x26,0x04,0x00,0xcd,0x34,0xe9,0xd4,0xff,0xa1,0xd1,0x36,0x26,0x39 -,0x06,0x1a,0x00,0x75,0x22,0xa1,0xd3,0x36,0x26,0x39,0x06,0x1c,0x00,0x75,0x18,0xa1 -,0xd5,0x36,0x26,0x39,0x06,0x1e,0x00,0x75,0x0e,0x26,0xf7,0x06,0x0c,0x00,0x40,0x00 -,0x74,0x05,0x83,0x0e,0x66,0x37,0x40,0x81,0x0e,0xaf,0x36,0x00,0x10,0xa1,0xaf,0x36 -,0xe7,0x06,0x83,0x3e,0xa3,0x36,0x02,0x75,0x05,0xcd,0x34,0xe9,0x56,0xfb,0x83,0x3e -,0xa3,0x36,0x00,0x74,0xb1,0x83,0x3e,0xa3,0x36,0x05,0x77,0xaa,0x26,0xf6,0x06,0x0a -,0x00,0xff,0x75,0xa2,0xe8,0xfd,0xdd,0x50,0xf6,0x06,0x93,0x36,0x20,0x75,0x03,0xe9 -,0x8c,0x00,0x26,0xa1,0x0c,0x00,0x25,0x07,0x00,0x3d,0x07,0x00,0x75,0x03,0xe9,0x76 -,0x00,0x3d,0x05,0x00,0x75,0x03,0xe9,0x6e,0x00,0xf7,0x06,0xe6,0x34,0x18,0x80,0x75 -,0x03,0xe9,0x6a,0x00,0xf7,0x06,0xe6,0x34,0x00,0x80,0x74,0x35,0x26,0x80,0x3e,0x29 -,0x00,0x02,0x75,0x2d,0x51,0x56,0x57,0x8d,0x36,0x3e,0x34,0x8d,0x3e,0x20,0x00,0xb9 -,0x06,0x00,0xf3,0xa6,0x5f,0x5e,0x59,0x75,0x45,0x26,0xa1,0x20,0x00,0xa3,0x3e,0x34 -,0x26,0xa1,0x22,0x00,0xa3,0x40,0x34,0x26,0xa1,0x24,0x00,0xa3,0x42,0x34,0xe9,0x26 -,0x00,0xf7,0x06,0xe6,0x34,0x08,0x00,0x74,0x0b,0x26,0x80,0x3e,0x19,0x00,0x00,0x74 -,0x03,0xe9,0x13,0x00,0xf7,0x06,0xe6,0x34,0x10,0x00,0x74,0x12,0x26,0xa0,0x28,0x00 -,0xc0,0xe8,0x04,0x22,0xc0,0x74,0x07,0x26,0xc7,0x06,0x04,0x00,0xff,0xff,0x58,0x23 -,0xc0,0x74,0x03,0xe9,0xdd,0xfe,0x81,0x26,0x9b,0x36,0xff,0xfe,0x26,0xa1,0x20,0x00 -,0x3b,0x06,0xd1,0x36,0x75,0x1a,0x26,0xa1,0x22,0x00,0x3b,0x06,0xd3,0x36,0x75,0x10 -,0x26,0xa1,0x24,0x00,0x3b,0x06,0xd5,0x36,0x75,0x06,0x81,0x0e,0x9b,0x36,0x00,0x01 -,0x26,0xa1,0x20,0x00,0x25,0x7f,0xff,0xa3,0xb8,0x34,0x26,0xa1,0x22,0x00,0xa3,0xba -,0x34,0x26,0xa1,0x24,0x00,0xa3,0xbc,0x34,0x8b,0xc6,0x86,0xc4,0xa3,0xc0,0x34,0xd1 -,0xe6,0x80,0xfc,0x09,0x74,0x03,0xe8,0xf6,0xf5,0xa1,0x05,0x37,0x0b,0x06,0x07,0x37 -,0x0b,0x06,0x09,0x37,0x74,0x3e,0x26,0xa1,0x20,0x00,0x3b,0x06,0x05,0x37,0x75,0x17 -,0x26,0xa1,0x22,0x00,0x3b,0x06,0x07,0x37,0x75,0x0d,0x26,0xa1,0x24,0x00,0x3b,0x06 -,0x09,0x37,0x75,0x03,0xe9,0x1d,0x00,0x26,0xa0,0x28,0x00,0x24,0x0f,0x3c,0x03,0x74 -,0x1b,0x3c,0x00,0x75,0x0f,0x83,0x3e,0xa3,0x36,0x04,0x74,0x10,0xf7,0x06,0x9b,0x36 -,0x00,0x01,0x74,0x08,0x2e,0xff,0x94,0xf8,0x53,0xe9,0x33,0xfe,0xcd,0x34,0xc7,0x06 -,0x3d,0x37,0x01,0x00,0xe9,0x2c,0xf8,0x83,0x3e,0xa3,0x36,0x05,0x74,0x10,0x83,0x3e -,0xa3,0x36,0x01,0x7e,0x09,0x83,0xee,0x16,0x2e,0xff,0x94,0x24,0x54,0xc3,0xcd,0x34 -,0xc3,0x26,0xa1,0x0c,0x00,0x3d,0xff,0x7f,0x74,0x05,0x26,0xff,0x26,0x04,0x00,0xe9 -,0xfd,0xfd,0xa1,0xf4,0x33,0xa9,0x00,0x88,0x74,0x0b,0xa9,0x00,0x10,0x75,0x09,0x8b -,0x1e,0x43,0x37,0xff,0xe3,0xe9,0x97,0x00,0xc7,0x06,0x35,0x37,0x05,0x00,0xc7,0x06 -,0x43,0x37,0x28,0x52,0xf7,0x06,0xf4,0x33,0x00,0x08,0x74,0x06,0xc7,0x06,0x43,0x37 -,0x1a,0x52,0xb8,0x80,0x03,0xcd,0x39,0xe9,0xc5,0xfd,0xa9,0x00,0x08,0x74,0xd9,0xff -,0x0e,0x35,0x37,0x75,0xed,0xe9,0x30,0x00,0xa9,0x00,0x08,0x75,0xcb,0xff,0x0e,0x35 -,0x37,0x75,0xdf,0x81,0x0e,0xc2,0x34,0xc0,0x00,0xf6,0x06,0x9d,0x36,0x80,0x74,0x0f -,0x81,0x0e,0x9b,0x36,0x00,0x80,0xc7,0x06,0x0f,0x37,0x02,0x00,0xe9,0x90,0xfd,0xc7 -,0x06,0x3d,0x37,0x02,0x00,0xe9,0x8b,0xf7,0x80,0x26,0x9e,0x36,0xff,0x75,0x30,0xf6 -,0x06,0x9d,0x36,0x80,0x74,0x20,0xff,0x06,0x94,0x34,0x83,0x0e,0x66,0x37,0x20,0x8e -,0x06,0x30,0x34,0x26,0xf7,0x06,0x0a,0x00,0x00,0x01,0x74,0x07,0x26,0x81,0x0e,0x08 -,0x00,0x00,0x01,0xe9,0x09,0x00,0xc7,0x06,0x3d,0x37,0x04,0x00,0xe9,0x54,0xf7,0x81 -,0x0e,0xaf,0x36,0x00,0x08,0xa1,0xaf,0x36,0xe7,0x06,0xe5,0x0a,0xa9,0x00,0x80,0x74 -,0x0e,0x81,0x26,0xaf,0x36,0xff,0xf7,0xa1,0xaf,0x36,0xe7,0x06,0xe9,0x49,0xff,0xe9 -,0x2d,0xfd,0xc7,0x06,0x41,0x37,0x00,0x00,0xbe,0x29,0x00,0xe8,0x2b,0xfd,0xe9,0x1e -,0xfd,0xcd,0x34,0x83,0x3e,0xa3,0x36,0x04,0x77,0x09,0xc7,0x06,0x3d,0x37,0x01,0x00 -,0xe9,0x10,0xf7,0xe9,0x09,0xfd,0xcd,0x34,0xc3,0xc7,0x06,0x9b,0x36,0x00,0x00,0xe8 -,0x0c,0xf5,0x81,0x26,0xaf,0x36,0xff,0xe7,0xa1,0xaf,0x36,0xe7,0x06,0x81,0x26,0x9b -,0x36,0xff,0x7f,0xe5,0x02,0x0d,0x01,0x00,0x25,0xef,0xff,0x25,0xff,0xdf,0xe7,0x02 -,0xbb,0xff,0x7f,0xcd,0x53,0x33,0xc0,0xa3,0x9d,0x36,0xa3,0x9f,0x36,0xe8,0x20,0xf3 -,0xe8,0x43,0xf3,0x83,0x0e,0x9b,0x36,0x10,0xc7,0x06,0x99,0x36,0x00,0x00,0xe8,0xd2 -,0xf5,0xe5,0x56,0x0d,0x02,0x00,0xe7,0x56,0xc7,0x06,0xa8,0x02,0x00,0x00,0xbe,0x00 -,0x00,0xe8,0x30,0xf5,0xc6,0x06,0xa0,0x36,0x0e,0xb8,0x9c,0x03,0xcd,0x39,0xb8,0x80 -,0x00,0xcd,0x35,0xc7,0x06,0xaa,0x02,0xff,0xff,0xc7,0x06,0xa1,0x36,0x01,0x00,0xe9 -,0xa5,0xf6,0x06,0xb8,0x8f,0x03,0xcd,0x3a,0xb8,0x90,0x03,0xcd,0x3a,0xb8,0x91,0x03 -,0xcd,0x3a,0xb8,0x92,0x03,0xcd,0x3a,0xb8,0x93,0x03,0xcd,0x3a,0xb8,0x94,0x03,0xcd -,0x3a,0xb8,0x95,0x03,0xcd,0x3a,0xb8,0x96,0x03,0xcd,0x3a,0xb8,0x97,0x03,0xcd,0x3a -,0xb8,0x98,0x03,0xcd,0x3a,0xb8,0x99,0x03,0xcd,0x3a,0xb8,0x9a,0x03,0xcd,0x3a,0xb8 -,0x9b,0x03,0xcd,0x3a,0xb8,0x7f,0x03,0xcd,0x3a,0xb8,0x80,0x03,0xcd,0x3a,0x07,0xc3 -,0xf7,0x49,0xf1,0x4e,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xf8,0x51,0xdf,0x4f -,0xfa,0x4f,0x0b,0x50,0xd1,0x51,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f,0xdf,0x4f -,0xe4,0x4e,0x06,0x00,0xcd,0x4a,0x04,0x00,0xe4,0x4e,0x19,0x00,0xad,0x4b,0xfa,0x00 -,0x82,0x4c,0x08,0x07,0x09,0x4c,0x14,0x00,0x24,0x4e,0x64,0x00,0xd7,0x4d,0xf4,0x01 -,0x64,0x4e,0xbc,0x02,0x7a,0x4e,0xe8,0x03,0x43,0x4e,0x02,0x00,0xb3,0x4e,0xf4,0x01 -,0x5b,0x4e,0xf4,0x01,0xe5,0x4e,0x14,0x00,0x06,0x50,0x06,0x50,0x95,0x4c,0xc1,0x52 -,0xc1,0x52,0xfe,0x4c,0xda,0x4c,0x06,0x50,0x06,0x50,0x06,0x50,0x06,0x50,0xb7,0x51 -,0xb7,0x51,0xb7,0x51,0xb7,0x51,0xb7,0x51,0xb7,0x51,0x06,0x50,0xd5,0x4a,0x06,0x50 -,0x1d,0x4c,0x06,0x50,0x83,0x4d,0x1f,0x4d,0x1f,0x4d,0xed,0x40,0xfa,0x40,0x07,0x41 -,0x37,0x37,0x2e,0x37,0x37,0x20,0x20,0x79,0x79,0x2f,0x79,0x79,0x2f,0x79,0x79,0x20 -,0x30,0x31,0x2e,0x39,0x30,0x20,0x20,0x30,0x32,0x2f,0x31,0x37,0x2f,0x39,0x39,0x20 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -,0x90,0xea,0xc0,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x13,0x06 -} ; diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 933fcfbf35e..d3f39e86eb9 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -24,7 +24,6 @@ #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/dma-mapping.h> -#include <linux/fsl_devices.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/workqueue.h> @@ -223,10 +222,10 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - 1))); - skb->dev = ugeth->dev; + skb->dev = ugeth->ndev; out_be32(&((struct qe_bd __iomem *)bd)->buf, - dma_map_single(&ugeth->dev->dev, + dma_map_single(ugeth->dev, skb->data, ugeth->ug_info->uf_info.max_rx_buf_length + UCC_GETH_RX_DATA_BUF_ALIGNMENT, @@ -1872,7 +1871,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) continue; for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { if (ugeth->tx_skbuff[i][j]) { - dma_unmap_single(&ugeth->dev->dev, + dma_unmap_single(ugeth->dev, in_be32(&((struct qe_bd __iomem *)bd)->buf), (in_be32((u32 __iomem *)bd) & BD_LENGTH_MASK), @@ -1900,7 +1899,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) bd = ugeth->p_rx_bd_ring[i]; for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { if (ugeth->rx_skbuff[i][j]) { - dma_unmap_single(&ugeth->dev->dev, + dma_unmap_single(ugeth->dev, in_be32(&((struct qe_bd __iomem *)bd)->buf), ugeth->ug_info-> uf_info.max_rx_buf_length + @@ -3071,7 +3070,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) /* set up the buffer descriptor */ out_be32(&((struct qe_bd __iomem *)bd)->buf, - dma_map_single(&ugeth->dev->dev, skb->data, + dma_map_single(ugeth->dev, skb->data, skb->len, DMA_TO_DEVICE)); /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ @@ -3127,7 +3126,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit ugeth_vdbg("%s: IN", __func__); - dev = ugeth->dev; + dev = ugeth->ndev; /* collect received buffers */ bd = ugeth->rxBd[rxQ]; @@ -3161,7 +3160,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit skb_put(skb, length); /* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, ugeth->dev); + skb->protocol = eth_type_trans(skb, ugeth->ndev); dev->stats.rx_bytes += length; /* Send the packet up the stack */ @@ -3432,7 +3431,7 @@ static int ucc_geth_close(struct net_device *dev) ucc_geth_stop(ugeth); - free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); + free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); netif_stop_queue(dev); @@ -3446,7 +3445,7 @@ static void ucc_geth_timeout_work(struct work_struct *work) struct net_device *dev; ugeth = container_of(work, struct ucc_geth_private, timeout_work); - dev = ugeth->dev; + dev = ugeth->ndev; ugeth_vdbg("%s: IN", __func__); @@ -3756,7 +3755,8 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma memcpy(dev->dev_addr, mac_addr, 6); ugeth->ug_info = ug_info; - ugeth->dev = dev; + ugeth->dev = device; + ugeth->ndev = dev; ugeth->node = np; return 0; diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index e3a25e64a65..2f8ee7c87ef 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -20,7 +20,6 @@ #include <linux/kernel.h> #include <linux/list.h> -#include <linux/fsl_devices.h> #include <asm/immap_qe.h> #include <asm/qe.h> @@ -1129,7 +1128,8 @@ struct ucc_geth_info { struct ucc_geth_private { struct ucc_geth_info *ug_info; struct ucc_fast_private *uccf; - struct net_device *dev; + struct device *dev; + struct net_device *ndev; struct napi_struct napi; struct work_struct timeout_work; struct ucc_geth __iomem *ug_regs; diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c index a755bea559b..6fcb500257b 100644 --- a/drivers/net/ucc_geth_ethtool.c +++ b/drivers/net/ucc_geth_ethtool.c @@ -28,7 +28,6 @@ #include <linux/mm.h> #include <linux/delay.h> #include <linux/dma-mapping.h> -#include <linux/fsl_devices.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/phy.h> diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index cde423c6d04..f84b78d94c4 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -5,6 +5,7 @@ * Copyright (C) 2008 Option International * Filip Aben <f.aben@option.com> * Denis Joseph Barrow <d.barow@option.com> + * Jan Dumon <j.dumon@option.com> * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd) * <ajb@spheresystems.co.uk> * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> @@ -462,9 +463,16 @@ static const struct usb_device_id hso_ids[] = { {USB_DEVICE(0x0af0, 0x7701)}, {USB_DEVICE(0x0af0, 0x7801)}, {USB_DEVICE(0x0af0, 0x7901)}, - {USB_DEVICE(0x0af0, 0x7361)}, - {USB_DEVICE(0x0af0, 0xd057)}, + {USB_DEVICE(0x0af0, 0x8200)}, + {USB_DEVICE(0x0af0, 0x8201)}, + {USB_DEVICE(0x0af0, 0xd035)}, {USB_DEVICE(0x0af0, 0xd055)}, + {USB_DEVICE(0x0af0, 0xd155)}, + {USB_DEVICE(0x0af0, 0xd255)}, + {USB_DEVICE(0x0af0, 0xd057)}, + {USB_DEVICE(0x0af0, 0xd157)}, + {USB_DEVICE(0x0af0, 0xd257)}, + {USB_DEVICE(0x0af0, 0xd357)}, {} }; MODULE_DEVICE_TABLE(usb, hso_ids); @@ -2410,20 +2418,22 @@ static void hso_free_net_device(struct hso_device *hso_dev) if (!hso_net) return; + remove_net_device(hso_net->parent); + + if (hso_net->net) { + unregister_netdev(hso_net->net); + free_netdev(hso_net->net); + } + /* start freeing */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); kfree(hso_net->mux_bulk_rx_buf_pool[i]); + hso_net->mux_bulk_rx_buf_pool[i] = NULL; } usb_free_urb(hso_net->mux_bulk_tx_urb); kfree(hso_net->mux_bulk_tx_buf); - - remove_net_device(hso_net->parent); - - if (hso_net->net) { - unregister_netdev(hso_net->net); - free_netdev(hso_net->net); - } + hso_net->mux_bulk_tx_buf = NULL; kfree(hso_dev); } @@ -2526,14 +2536,15 @@ static void hso_create_rfkill(struct hso_device *hso_dev, } /* Creates our network device */ -static struct hso_device *hso_create_net_device(struct usb_interface *interface) +static struct hso_device *hso_create_net_device(struct usb_interface *interface, + int port_spec) { int result, i; struct net_device *net; struct hso_net *hso_net; struct hso_device *hso_dev; - hso_dev = hso_create_device(interface, HSO_INTF_MUX | HSO_PORT_NETWORK); + hso_dev = hso_create_device(interface, port_spec); if (!hso_dev) return NULL; @@ -2613,12 +2624,12 @@ static void hso_free_tiomget(struct hso_serial *serial) { struct hso_tiocmget *tiocmget = serial->tiocmget; if (tiocmget) { - kfree(tiocmget); if (tiocmget->urb) { usb_free_urb(tiocmget->urb); tiocmget->urb = NULL; } serial->tiocmget = NULL; + kfree(tiocmget); } } @@ -2933,7 +2944,8 @@ static int hso_probe(struct usb_interface *interface, if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { /* Create the network device */ if (!disable_net) { - hso_dev = hso_create_net_device(interface); + hso_dev = hso_create_net_device(interface, + port_spec); if (!hso_dev) goto exit; tmp_dev = hso_dev; @@ -2965,7 +2977,7 @@ static int hso_probe(struct usb_interface *interface, /* It's a regular bulk interface */ if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) && !disable_net) - hso_dev = hso_create_net_device(interface); + hso_dev = hso_create_net_device(interface, port_spec); else hso_dev = hso_create_bulk_serial_device(interface, port_spec); diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 7cb10a0a531..3d0d0b0b37c 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -36,7 +36,6 @@ * Run test procedures * Fix bugs from previous two steps * Snoop other OSs for any tricks we're not doing - * SMP locking * Reduce arbitrary timeouts * Smart multicast support * Temporary MAC change support @@ -796,7 +795,7 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net) int res; - spin_lock(&kaweth->device_lock); + spin_lock_irq(&kaweth->device_lock); kaweth_async_set_rx_mode(kaweth); netif_stop_queue(net); @@ -814,7 +813,7 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net) if (!copied_skb) { kaweth->stats.tx_errors++; netif_start_queue(net); - spin_unlock(&kaweth->device_lock); + spin_unlock_irq(&kaweth->device_lock); return 0; } } @@ -848,7 +847,7 @@ skip: net->trans_start = jiffies; } - spin_unlock(&kaweth->device_lock); + spin_unlock_irq(&kaweth->device_lock); return 0; } diff --git a/drivers/net/vxge/Makefile b/drivers/net/vxge/Makefile new file mode 100644 index 00000000000..8992ca26b27 --- /dev/null +++ b/drivers/net/vxge/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for Neterion Inc's X3100 Series 10 GbE PCIe # I/O +# Virtualized Server Adapter linux driver + +obj-$(CONFIG_VXGE) += vxge.o + +vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c new file mode 100644 index 00000000000..6b41c884a33 --- /dev/null +++ b/drivers/net/vxge/vxge-config.c @@ -0,0 +1,5264 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2009 Neterion Inc. + ******************************************************************************/ +#include <linux/vmalloc.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> +#include <linux/pci_hotplug.h> + +#include "vxge-traffic.h" +#include "vxge-config.h" + +/* + * __vxge_hw_channel_allocate - Allocate memory for channel + * This function allocates required memory for the channel and various arrays + * in the channel + */ +struct __vxge_hw_channel* +__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, + enum __vxge_hw_channel_type type, + u32 length, u32 per_dtr_space, void *userdata) +{ + struct __vxge_hw_channel *channel; + struct __vxge_hw_device *hldev; + int size = 0; + u32 vp_id; + + hldev = vph->vpath->hldev; + vp_id = vph->vpath->vp_id; + + switch (type) { + case VXGE_HW_CHANNEL_TYPE_FIFO: + size = sizeof(struct __vxge_hw_fifo); + break; + case VXGE_HW_CHANNEL_TYPE_RING: + size = sizeof(struct __vxge_hw_ring); + break; + default: + break; + } + + channel = kzalloc(size, GFP_KERNEL); + if (channel == NULL) + goto exit0; + INIT_LIST_HEAD(&channel->item); + + channel->common_reg = hldev->common_reg; + channel->first_vp_id = hldev->first_vp_id; + channel->type = type; + channel->devh = hldev; + channel->vph = vph; + channel->userdata = userdata; + channel->per_dtr_space = per_dtr_space; + channel->length = length; + channel->vp_id = vp_id; + + channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->work_arr == NULL) + goto exit1; + + channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->free_arr == NULL) + goto exit1; + channel->free_ptr = length; + + channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->reserve_arr == NULL) + goto exit1; + channel->reserve_ptr = length; + channel->reserve_top = 0; + + channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->orig_arr == NULL) + goto exit1; + + return channel; +exit1: + __vxge_hw_channel_free(channel); + +exit0: + return NULL; +} + +/* + * __vxge_hw_channel_free - Free memory allocated for channel + * This function deallocates memory from the channel and various arrays + * in the channel + */ +void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) +{ + kfree(channel->work_arr); + kfree(channel->free_arr); + kfree(channel->reserve_arr); + kfree(channel->orig_arr); + kfree(channel); +} + +/* + * __vxge_hw_channel_initialize - Initialize a channel + * This function initializes a channel by properly setting the + * various references + */ +enum vxge_hw_status +__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) +{ + u32 i; + struct __vxge_hw_virtualpath *vpath; + + vpath = channel->vph->vpath; + + if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) { + for (i = 0; i < channel->length; i++) + channel->orig_arr[i] = channel->reserve_arr[i]; + } + + switch (channel->type) { + case VXGE_HW_CHANNEL_TYPE_FIFO: + vpath->fifoh = (struct __vxge_hw_fifo *)channel; + channel->stats = &((struct __vxge_hw_fifo *) + channel)->stats->common_stats; + break; + case VXGE_HW_CHANNEL_TYPE_RING: + vpath->ringh = (struct __vxge_hw_ring *)channel; + channel->stats = &((struct __vxge_hw_ring *) + channel)->stats->common_stats; + break; + default: + break; + } + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_channel_reset - Resets a channel + * This function resets a channel by properly setting the various references + */ +enum vxge_hw_status +__vxge_hw_channel_reset(struct __vxge_hw_channel *channel) +{ + u32 i; + + for (i = 0; i < channel->length; i++) { + if (channel->reserve_arr != NULL) + channel->reserve_arr[i] = channel->orig_arr[i]; + if (channel->free_arr != NULL) + channel->free_arr[i] = NULL; + if (channel->work_arr != NULL) + channel->work_arr[i] = NULL; + } + channel->free_ptr = channel->length; + channel->reserve_ptr = channel->length; + channel->reserve_top = 0; + channel->post_index = 0; + channel->compl_index = 0; + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_pci_e_init + * Initialize certain PCI/PCI-X configuration registers + * with recommended values. Save config space for future hw resets. + */ +void +__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) +{ + u16 cmd = 0; + + /* Set the PErr Repconse bit and SERR in PCI command register. */ + pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd); + cmd |= 0x140; + pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd); + + pci_save_state(hldev->pdev); + + return; +} + +/* + * __vxge_hw_device_register_poll + * Will poll certain register for specified amount of time. + * Will poll until masked bit is not cleared. + */ +enum vxge_hw_status +__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) +{ + u64 val64; + u32 i = 0; + enum vxge_hw_status ret = VXGE_HW_FAIL; + + udelay(10); + + do { + val64 = readq(reg); + if (!(val64 & mask)) + return VXGE_HW_OK; + udelay(100); + } while (++i <= 9); + + i = 0; + do { + val64 = readq(reg); + if (!(val64 & mask)) + return VXGE_HW_OK; + mdelay(1); + } while (++i <= max_millis); + + return ret; +} + + /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset + * in progress + * This routine checks the vpath reset in progress register is turned zero + */ +enum vxge_hw_status +__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) +{ + enum vxge_hw_status status; + status = __vxge_hw_device_register_poll(vpath_rst_in_prog, + VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff), + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + return status; +} + +/* + * __vxge_hw_device_toc_get + * This routine sets the swapper and reads the toc pointer and returns the + * memory mapped address of the toc + */ +struct vxge_hw_toc_reg __iomem * +__vxge_hw_device_toc_get(void __iomem *bar0) +{ + u64 val64; + struct vxge_hw_toc_reg __iomem *toc = NULL; + enum vxge_hw_status status; + + struct vxge_hw_legacy_reg __iomem *legacy_reg = + (struct vxge_hw_legacy_reg __iomem *)bar0; + + status = __vxge_hw_legacy_swapper_set(legacy_reg); + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&legacy_reg->toc_first_pointer); + toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64); +exit: + return toc; +} + +/* + * __vxge_hw_device_reg_addr_get + * This routine sets the swapper and reads the toc pointer and initializes the + * register location pointers in the device object. It waits until the ric is + * completed initializing registers. + */ +enum vxge_hw_status +__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) +{ + u64 val64; + u32 i; + enum vxge_hw_status status = VXGE_HW_OK; + + hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0; + + hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0); + if (hldev->toc_reg == NULL) { + status = VXGE_HW_FAIL; + goto exit; + } + + val64 = readq(&hldev->toc_reg->toc_common_pointer); + hldev->common_reg = + (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64); + + val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer); + hldev->mrpcim_reg = + (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64); + + for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) { + val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]); + hldev->srpcim_reg[i] = + (struct vxge_hw_srpcim_reg __iomem *) + (hldev->bar0 + val64); + } + + for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) { + val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]); + hldev->vpmgmt_reg[i] = + (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64); + } + + for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) { + val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]); + hldev->vpath_reg[i] = + (struct vxge_hw_vpath_reg __iomem *) + (hldev->bar0 + val64); + } + + val64 = readq(&hldev->toc_reg->toc_kdfc); + + switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) { + case 0: + hldev->kdfc = (u8 __iomem *)(hldev->bar0 + + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64)); + break; + case 2: + hldev->kdfc = (u8 __iomem *)(hldev->bar1 + + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64)); + break; + case 4: + hldev->kdfc = (u8 __iomem *)(hldev->bar2 + + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64)); + break; + default: + break; + } + + status = __vxge_hw_device_vpath_reset_in_prog_check( + (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog); +exit: + return status; +} + +/* + * __vxge_hw_device_id_get + * This routine returns sets the device id and revision numbers into the device + * structure + */ +void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev) +{ + u64 val64; + + val64 = readq(&hldev->common_reg->titan_asic_id); + hldev->device_id = + (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64); + + hldev->major_revision = + (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64); + + hldev->minor_revision = + (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64); + + return; +} + +/* + * __vxge_hw_device_access_rights_get: Get Access Rights of the driver + * This routine returns the Access Rights of the driver + */ +static u32 +__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id) +{ + u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH; + + switch (host_type) { + case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: + if (func_id == 0) { + access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + } + break; + case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: + access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + break; + case VXGE_HW_NO_MR_SR_VH0_FUNCTION0: + access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + break; + case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION: + case VXGE_HW_SR_VH_VIRTUAL_FUNCTION: + case VXGE_HW_MR_SR_VH0_INVALID_CONFIG: + break; + case VXGE_HW_SR_VH_FUNCTION0: + case VXGE_HW_VH_NORMAL_FUNCTION: + access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + break; + } + + return access_rights; +} +/* + * __vxge_hw_device_host_info_get + * This routine returns the host type assignments + */ +void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) +{ + u64 val64; + u32 i; + + val64 = readq(&hldev->common_reg->host_type_assignments); + + hldev->host_type = + (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); + + hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpath_assignments & vxge_mBIT(i))) + continue; + + hldev->func_id = + __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]); + + hldev->access_rights = __vxge_hw_device_access_rights_get( + hldev->host_type, hldev->func_id); + + hldev->first_vp_id = i; + break; + } + + return; +} + +/* + * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as + * link width and signalling rate. + */ +static enum vxge_hw_status +__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) +{ + int exp_cap; + u16 lnk; + + /* Get the negotiated link width and speed from PCI config space */ + exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP); + pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk); + + if ((lnk & PCI_EXP_LNKSTA_CLS) != 1) + return VXGE_HW_ERR_INVALID_PCI_INFO; + + switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) { + case PCIE_LNK_WIDTH_RESRV: + case PCIE_LNK_X1: + case PCIE_LNK_X2: + case PCIE_LNK_X4: + case PCIE_LNK_X8: + break; + default: + return VXGE_HW_ERR_INVALID_PCI_INFO; + } + + return VXGE_HW_OK; +} + +static enum vxge_hw_status +__vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev) +{ + if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION || + hldev->host_type == VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION || + hldev->host_type == VXGE_HW_NO_MR_SR_VH0_FUNCTION0) && + (hldev->func_id == 0)) + return VXGE_HW_OK; + else + return VXGE_HW_ERR_PRIVILAGED_OPEARATION; +} + +/* + * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars. + * Rebalance the RX_WRR and KDFC_WRR calandars. + */ +static enum +vxge_hw_status vxge_hw_wrr_rebalance(struct __vxge_hw_device *hldev) +{ + u64 val64; + u32 wrr_states[VXGE_HW_WEIGHTED_RR_SERVICE_STATES]; + u32 i, j, how_often = 1; + enum vxge_hw_status status = VXGE_HW_OK; + + status = __vxge_hw_device_is_privilaged(hldev); + if (status != VXGE_HW_OK) + goto exit; + + /* Reset the priorities assigned to the WRR arbitration + phases for the receive traffic */ + for (i = 0; i < VXGE_HW_WRR_RING_COUNT; i++) + writeq(0, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i)); + + /* Reset the transmit FIFO servicing calendar for FIFOs */ + for (i = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) { + writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_0) + i)); + writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_20) + i)); + } + + /* Assign WRR priority 0 for all FIFOs */ + for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0), + ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i)); + + writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0), + ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i)); + } + + /* Reset to service non-offload doorbells */ + writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_0); + writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_1); + + /* Set priority 0 to all receive queues */ + writeq(0, &hldev->mrpcim_reg->rx_queue_priority_0); + writeq(0, &hldev->mrpcim_reg->rx_queue_priority_1); + writeq(0, &hldev->mrpcim_reg->rx_queue_priority_2); + + /* Initialize all the slots as unused */ + for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++) + wrr_states[i] = -1; + + /* Prepare the Fifo service states */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!hldev->config.vp_config[i].min_bandwidth) + continue; + + how_often = VXGE_HW_VPATH_BANDWIDTH_MAX / + hldev->config.vp_config[i].min_bandwidth; + if (how_often) { + + for (j = 0; j < VXGE_HW_WRR_FIFO_SERVICE_STATES;) { + if (wrr_states[j] == -1) { + wrr_states[j] = i; + /* Make sure each fifo is serviced + * atleast once */ + if (i == j) + j += VXGE_HW_MAX_VIRTUAL_PATHS; + else + j += how_often; + } else + j++; + } + } + } + + /* Fill the unused slots with 0 */ + for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) { + if (wrr_states[j] == -1) + wrr_states[j] = 0; + } + + /* Assign WRR priority number for FIFOs */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i), + ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i)); + + writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i), + ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i)); + } + + /* Modify the servicing algorithm applied to the 3 types of doorbells. + i.e, none-offload, message and offload */ + writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) | + VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) | + VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) | + VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) | + VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) | + VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) | + VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) | + VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0), + &hldev->mrpcim_reg->kdfc_entry_type_sel_0); + + writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1), + &hldev->mrpcim_reg->kdfc_entry_type_sel_1); + + for (i = 0, j = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) { + + val64 = VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states[j++]); + val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states[j++]); + val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states[j++]); + val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states[j++]); + val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states[j++]); + val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states[j++]); + val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states[j++]); + val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states[j++]); + + writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_0 + i)); + writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_20 + i)); + } + + /* Set up the priorities assigned to receive queues */ + writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) | + VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) | + VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) | + VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) | + VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) | + VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) | + VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) | + VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7), + &hldev->mrpcim_reg->rx_queue_priority_0); + + writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) | + VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) | + VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) | + VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) | + VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) | + VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) | + VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) | + VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15), + &hldev->mrpcim_reg->rx_queue_priority_1); + + writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16), + &hldev->mrpcim_reg->rx_queue_priority_2); + + /* Initialize all the slots as unused */ + for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++) + wrr_states[i] = -1; + + /* Prepare the Ring service states */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!hldev->config.vp_config[i].min_bandwidth) + continue; + + how_often = VXGE_HW_VPATH_BANDWIDTH_MAX / + hldev->config.vp_config[i].min_bandwidth; + + if (how_often) { + for (j = 0; j < VXGE_HW_WRR_RING_SERVICE_STATES;) { + if (wrr_states[j] == -1) { + wrr_states[j] = i; + /* Make sure each ring is + * serviced atleast once */ + if (i == j) + j += VXGE_HW_MAX_VIRTUAL_PATHS; + else + j += how_often; + } else + j++; + } + } + } + + /* Fill the unused slots with 0 */ + for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) { + if (wrr_states[j] == -1) + wrr_states[j] = 0; + } + + for (i = 0, j = 0; i < VXGE_HW_WRR_RING_COUNT; i++) { + val64 = VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0( + wrr_states[j++]); + val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1( + wrr_states[j++]); + val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2( + wrr_states[j++]); + val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3( + wrr_states[j++]); + val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4( + wrr_states[j++]); + val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5( + wrr_states[j++]); + val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6( + wrr_states[j++]); + val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7( + wrr_states[j++]); + + writeq(val64, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i)); + } +exit: + return status; +} + +/* + * __vxge_hw_device_initialize + * Initialize Titan-V hardware. + */ +enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + /* Validate the pci-e link width and speed */ + status = __vxge_hw_verify_pci_e_info(hldev); + if (status != VXGE_HW_OK) + goto exit; + + vxge_hw_wrr_rebalance(hldev); +exit: + return status; +} + +/** + * vxge_hw_device_hw_info_get - Get the hw information + * Returns the vpath mask that has the bits set for each vpath allocated + * for the driver, FW version information and the first mac addresse for + * each vpath + */ +enum vxge_hw_status __devinit +vxge_hw_device_hw_info_get(void __iomem *bar0, + struct vxge_hw_device_hw_info *hw_info) +{ + u32 i; + u64 val64; + struct vxge_hw_toc_reg __iomem *toc; + struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; + struct vxge_hw_common_reg __iomem *common_reg; + struct vxge_hw_vpath_reg __iomem *vpath_reg; + struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; + enum vxge_hw_status status; + + memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); + + toc = __vxge_hw_device_toc_get(bar0); + if (toc == NULL) { + status = VXGE_HW_ERR_CRITICAL; + goto exit; + } + + val64 = readq(&toc->toc_common_pointer); + common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64); + + status = __vxge_hw_device_vpath_reset_in_prog_check( + (u64 __iomem *)&common_reg->vpath_rst_in_prog); + if (status != VXGE_HW_OK) + goto exit; + + hw_info->vpath_mask = readq(&common_reg->vpath_assignments); + + val64 = readq(&common_reg->host_type_assignments); + + hw_info->host_type = + (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!((hw_info->vpath_mask) & vxge_mBIT(i))) + continue; + + val64 = readq(&toc->toc_vpmgmt_pointer[i]); + + vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) + (bar0 + val64); + + hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg); + if (__vxge_hw_device_access_rights_get(hw_info->host_type, + hw_info->func_id) & + VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { + + val64 = readq(&toc->toc_mrpcim_pointer); + + mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *) + (bar0 + val64); + + writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask); + wmb(); + } + + val64 = readq(&toc->toc_vpath_pointer[i]); + + vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); + + hw_info->function_mode = + __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg); + + status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info); + if (status != VXGE_HW_OK) + goto exit; + + break; + } + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!((hw_info->vpath_mask) & vxge_mBIT(i))) + continue; + + val64 = readq(&toc->toc_vpath_pointer[i]); + vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); + + status = __vxge_hw_vpath_addr_get(i, vpath_reg, + hw_info->mac_addrs[i], + hw_info->mac_addr_masks[i]); + if (status != VXGE_HW_OK) + goto exit; + } +exit: + return status; +} + +/* + * vxge_hw_device_initialize - Initialize Titan device. + * Initialize Titan device. Note that all the arguments of this public API + * are 'IN', including @hldev. Driver cooperates with + * OS to find new Titan device, locate its PCI and memory spaces. + * + * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW + * to enable the latter to perform Titan hardware initialization. + */ +enum vxge_hw_status __devinit +vxge_hw_device_initialize( + struct __vxge_hw_device **devh, + struct vxge_hw_device_attr *attr, + struct vxge_hw_device_config *device_config) +{ + u32 i; + u32 nblocks = 0; + struct __vxge_hw_device *hldev = NULL; + enum vxge_hw_status status = VXGE_HW_OK; + + status = __vxge_hw_device_config_check(device_config); + if (status != VXGE_HW_OK) + goto exit; + + hldev = (struct __vxge_hw_device *) + vmalloc(sizeof(struct __vxge_hw_device)); + if (hldev == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + memset(hldev, 0, sizeof(struct __vxge_hw_device)); + hldev->magic = VXGE_HW_DEVICE_MAGIC; + + vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); + + /* apply config */ + memcpy(&hldev->config, device_config, + sizeof(struct vxge_hw_device_config)); + + hldev->bar0 = attr->bar0; + hldev->bar1 = attr->bar1; + hldev->bar2 = attr->bar2; + hldev->pdev = attr->pdev; + + hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up; + hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down; + hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err; + + __vxge_hw_device_pci_e_init(hldev); + + status = __vxge_hw_device_reg_addr_get(hldev); + if (status != VXGE_HW_OK) + goto exit; + __vxge_hw_device_id_get(hldev); + + __vxge_hw_device_host_info_get(hldev); + + /* Incrementing for stats blocks */ + nblocks++; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpath_assignments & vxge_mBIT(i))) + continue; + + if (device_config->vp_config[i].ring.enable == + VXGE_HW_RING_ENABLE) + nblocks += device_config->vp_config[i].ring.ring_blocks; + + if (device_config->vp_config[i].fifo.enable == + VXGE_HW_FIFO_ENABLE) + nblocks += device_config->vp_config[i].fifo.fifo_blocks; + nblocks++; + } + + if (__vxge_hw_blockpool_create(hldev, + &hldev->block_pool, + device_config->dma_blockpool_initial + nblocks, + device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) { + + vxge_hw_device_terminate(hldev); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + status = __vxge_hw_device_initialize(hldev); + + if (status != VXGE_HW_OK) { + vxge_hw_device_terminate(hldev); + goto exit; + } + + *devh = hldev; +exit: + return status; +} + +/* + * vxge_hw_device_terminate - Terminate Titan device. + * Terminate HW device. + */ +void +vxge_hw_device_terminate(struct __vxge_hw_device *hldev) +{ + vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC); + + hldev->magic = VXGE_HW_DEVICE_DEAD; + __vxge_hw_blockpool_destroy(&hldev->block_pool); + vfree(hldev); +} + +/* + * vxge_hw_device_stats_get - Get the device hw statistics. + * Returns the vpath h/w stats for the device. + */ +enum vxge_hw_status +vxge_hw_device_stats_get(struct __vxge_hw_device *hldev, + struct vxge_hw_device_stats_hw_info *hw_stats) +{ + u32 i; + enum vxge_hw_status status = VXGE_HW_OK; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || + (hldev->virtual_paths[i].vp_open == + VXGE_HW_VP_NOT_OPEN)) + continue; + + memcpy(hldev->virtual_paths[i].hw_stats_sav, + hldev->virtual_paths[i].hw_stats, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + status = __vxge_hw_vpath_stats_get( + &hldev->virtual_paths[i], + hldev->virtual_paths[i].hw_stats); + } + + memcpy(hw_stats, &hldev->stats.hw_dev_info_stats, + sizeof(struct vxge_hw_device_stats_hw_info)); + + return status; +} + +/* + * vxge_hw_driver_stats_get - Get the device sw statistics. + * Returns the vpath s/w stats for the device. + */ +enum vxge_hw_status vxge_hw_driver_stats_get( + struct __vxge_hw_device *hldev, + struct vxge_hw_device_stats_sw_info *sw_stats) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + memcpy(sw_stats, &hldev->stats.sw_dev_info_stats, + sizeof(struct vxge_hw_device_stats_sw_info)); + + return status; +} + +/* + * vxge_hw_mrpcim_stats_access - Access the statistics from the given location + * and offset and perform an operation + * Get the statistics from the given location and offset. + */ +enum vxge_hw_status +vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev, + u32 operation, u32 location, u32 offset, u64 *stat) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + status = __vxge_hw_device_is_privilaged(hldev); + if (status != VXGE_HW_OK) + goto exit; + + val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) | + VXGE_HW_XMAC_STATS_SYS_CMD_STROBE | + VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) | + VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset); + + status = __vxge_hw_pio_mem_write64(val64, + &hldev->mrpcim_reg->xmac_stats_sys_cmd, + VXGE_HW_XMAC_STATS_SYS_CMD_STROBE, + hldev->config.device_poll_millis); + + if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) + *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data); + else + *stat = 0; +exit: + return status; +} + +/* + * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port + * Get the Statistics on aggregate port + */ +enum vxge_hw_status +vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, + struct vxge_hw_xmac_aggr_stats *aggr_stats) +{ + u64 *val64; + int i; + u32 offset = VXGE_HW_STATS_AGGRn_OFFSET; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = (u64 *)aggr_stats; + + status = __vxge_hw_device_is_privilaged(hldev); + if (status != VXGE_HW_OK) + goto exit; + + for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) { + status = vxge_hw_mrpcim_stats_access(hldev, + VXGE_HW_STATS_OP_READ, + VXGE_HW_STATS_LOC_AGGR, + ((offset + (104 * port)) >> 3), val64); + if (status != VXGE_HW_OK) + goto exit; + + offset += 8; + val64++; + } +exit: + return status; +} + +/* + * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port + * Get the Statistics on port + */ +enum vxge_hw_status +vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, + struct vxge_hw_xmac_port_stats *port_stats) +{ + u64 *val64; + enum vxge_hw_status status = VXGE_HW_OK; + int i; + u32 offset = 0x0; + val64 = (u64 *) port_stats; + + status = __vxge_hw_device_is_privilaged(hldev); + if (status != VXGE_HW_OK) + goto exit; + + for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) { + status = vxge_hw_mrpcim_stats_access(hldev, + VXGE_HW_STATS_OP_READ, + VXGE_HW_STATS_LOC_AGGR, + ((offset + (608 * port)) >> 3), val64); + if (status != VXGE_HW_OK) + goto exit; + + offset += 8; + val64++; + } + +exit: + return status; +} + +/* + * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics + * Get the XMAC Statistics + */ +enum vxge_hw_status +vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, + struct vxge_hw_xmac_stats *xmac_stats) +{ + enum vxge_hw_status status = VXGE_HW_OK; + u32 i; + + status = vxge_hw_device_xmac_aggr_stats_get(hldev, + 0, &xmac_stats->aggr_stats[0]); + + if (status != VXGE_HW_OK) + goto exit; + + status = vxge_hw_device_xmac_aggr_stats_get(hldev, + 1, &xmac_stats->aggr_stats[1]); + if (status != VXGE_HW_OK) + goto exit; + + for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { + + status = vxge_hw_device_xmac_port_stats_get(hldev, + i, &xmac_stats->port_stats[i]); + if (status != VXGE_HW_OK) + goto exit; + } + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; + + status = __vxge_hw_vpath_xmac_tx_stats_get( + &hldev->virtual_paths[i], + &xmac_stats->vpath_tx_stats[i]); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_xmac_rx_stats_get( + &hldev->virtual_paths[i], + &xmac_stats->vpath_rx_stats[i]); + if (status != VXGE_HW_OK) + goto exit; + } +exit: + return status; +} + +/* + * vxge_hw_device_debug_set - Set the debug module, level and timestamp + * This routine is used to dynamically change the debug output + */ +void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev, + enum vxge_debug_level level, u32 mask) +{ + if (hldev == NULL) + return; + +#if defined(VXGE_DEBUG_TRACE_MASK) || \ + defined(VXGE_DEBUG_ERR_MASK) + hldev->debug_module_mask = mask; + hldev->debug_level = level; +#endif + +#if defined(VXGE_DEBUG_ERR_MASK) + hldev->level_err = level & VXGE_ERR; +#endif + +#if defined(VXGE_DEBUG_TRACE_MASK) + hldev->level_trace = level & VXGE_TRACE; +#endif +} + +/* + * vxge_hw_device_error_level_get - Get the error level + * This routine returns the current error level set + */ +u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev) +{ +#if defined(VXGE_DEBUG_ERR_MASK) + if (hldev == NULL) + return VXGE_ERR; + else + return hldev->level_err; +#else + return 0; +#endif +} + +/* + * vxge_hw_device_trace_level_get - Get the trace level + * This routine returns the current trace level set + */ +u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev) +{ +#if defined(VXGE_DEBUG_TRACE_MASK) + if (hldev == NULL) + return VXGE_TRACE; + else + return hldev->level_trace; +#else + return 0; +#endif +} +/* + * vxge_hw_device_debug_mask_get - Get the debug mask + * This routine returns the current debug mask set + */ +u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev) +{ +#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK) + if (hldev == NULL) + return 0; + return hldev->debug_module_mask; +#else + return 0; +#endif +} + +/* + * vxge_hw_getpause_data -Pause frame frame generation and reception. + * Returns the Pause frame generation and reception capability of the NIC. + */ +enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev, + u32 port, u32 *tx, u32 *rx) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { + status = VXGE_HW_ERR_INVALID_PORT; + goto exit; + } + + if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + goto exit; + } + + val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); + if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN) + *tx = 1; + if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN) + *rx = 1; +exit: + return status; +} + +/* + * vxge_hw_device_setpause_data - set/reset pause frame generation. + * It can be used to set or reset Pause frame generation or reception + * support of the NIC. + */ + +enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, + u32 port, u32 tx, u32 rx) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { + status = VXGE_HW_ERR_INVALID_PORT; + goto exit; + } + + status = __vxge_hw_device_is_privilaged(hldev); + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); + if (tx) + val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; + else + val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; + if (rx) + val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; + else + val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; + + writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); +exit: + return status; +} + +u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev) +{ + int link_width, exp_cap; + u16 lnk; + + exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP); + pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk); + link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4; + return link_width; +} + +/* + * __vxge_hw_ring_block_memblock_idx - Return the memblock index + * This function returns the index of memory block + */ +static inline u32 +__vxge_hw_ring_block_memblock_idx(u8 *block) +{ + return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)); +} + +/* + * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index + * This function sets index to a memory block + */ +static inline void +__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx) +{ + *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx; +} + +/* + * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer + * in RxD block + * Sets the next block pointer in RxD block + */ +static inline void +__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next) +{ + *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next; +} + +/* + * __vxge_hw_ring_first_block_address_get - Returns the dma address of the + * first block + * Returns the dma address of the first RxD block + */ +u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) +{ + struct vxge_hw_mempool_dma *dma_object; + + dma_object = ring->mempool->memblocks_dma_arr; + vxge_assert(dma_object != NULL); + + return dma_object->addr; +} + +/* + * __vxge_hw_ring_item_dma_addr - Return the dma address of an item + * This function returns the dma address of a given item + */ +static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh, + void *item) +{ + u32 memblock_idx; + void *memblock; + struct vxge_hw_mempool_dma *memblock_dma_object; + ptrdiff_t dma_item_offset; + + /* get owner memblock index */ + memblock_idx = __vxge_hw_ring_block_memblock_idx(item); + + /* get owner memblock by memblock index */ + memblock = mempoolh->memblocks_arr[memblock_idx]; + + /* get memblock DMA object by memblock index */ + memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx; + + /* calculate offset in the memblock of this item */ + dma_item_offset = (u8 *)item - (u8 *)memblock; + + return memblock_dma_object->addr + dma_item_offset; +} + +/* + * __vxge_hw_ring_rxdblock_link - Link the RxD blocks + * This function returns the dma address of a given item + */ +static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh, + struct __vxge_hw_ring *ring, u32 from, + u32 to) +{ + u8 *to_item , *from_item; + dma_addr_t to_dma; + + /* get "from" RxD block */ + from_item = mempoolh->items_arr[from]; + vxge_assert(from_item); + + /* get "to" RxD block */ + to_item = mempoolh->items_arr[to]; + vxge_assert(to_item); + + /* return address of the beginning of previous RxD block */ + to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item); + + /* set next pointer for this RxD block to point on + * previous item's DMA start address */ + __vxge_hw_ring_block_next_pointer_set(from_item, to_dma); +} + +/* + * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD + * block callback + * This function is callback passed to __vxge_hw_mempool_create to create memory + * pool for RxD block + */ +static void +__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh, + u32 memblock_index, + struct vxge_hw_mempool_dma *dma_object, + u32 index, u32 is_last) +{ + u32 i; + void *item = mempoolh->items_arr[index]; + struct __vxge_hw_ring *ring = + (struct __vxge_hw_ring *)mempoolh->userdata; + + /* format rxds array */ + for (i = 0; i < ring->rxds_per_block; i++) { + void *rxdblock_priv; + void *uld_priv; + struct vxge_hw_ring_rxd_1 *rxdp; + + u32 reserve_index = ring->channel.reserve_ptr - + (index * ring->rxds_per_block + i + 1); + u32 memblock_item_idx; + + ring->channel.reserve_arr[reserve_index] = ((u8 *)item) + + i * ring->rxd_size; + + /* Note: memblock_item_idx is index of the item within + * the memblock. For instance, in case of three RxD-blocks + * per memblock this value can be 0, 1 or 2. */ + rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh, + memblock_index, item, + &memblock_item_idx); + + rxdp = (struct vxge_hw_ring_rxd_1 *) + ring->channel.reserve_arr[reserve_index]; + + uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i); + + /* pre-format Host_Control */ + rxdp->host_control = (u64)(size_t)uld_priv; + } + + __vxge_hw_ring_block_memblock_idx_set(item, memblock_index); + + if (is_last) { + /* link last one with first one */ + __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0); + } + + if (index > 0) { + /* link this RxD block with previous one */ + __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index); + } + + return; +} + +/* + * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs + * This function replenishes the RxDs from reserve array to work array + */ +enum vxge_hw_status +vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag) +{ + void *rxd; + int i = 0; + struct __vxge_hw_channel *channel; + enum vxge_hw_status status = VXGE_HW_OK; + + channel = &ring->channel; + + while (vxge_hw_channel_dtr_count(channel) > 0) { + + status = vxge_hw_ring_rxd_reserve(ring, &rxd); + + vxge_assert(status == VXGE_HW_OK); + + if (ring->rxd_init) { + status = ring->rxd_init(rxd, channel->userdata); + if (status != VXGE_HW_OK) { + vxge_hw_ring_rxd_free(ring, rxd); + goto exit; + } + } + + vxge_hw_ring_rxd_post(ring, rxd); + if (min_flag) { + i++; + if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION) + break; + } + } + status = VXGE_HW_OK; +exit: + return status; +} + +/* + * __vxge_hw_ring_create - Create a Ring + * This function creates Ring and initializes it. + * + */ +enum vxge_hw_status +__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, + struct vxge_hw_ring_attr *attr) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_ring *ring; + u32 ring_length; + struct vxge_hw_ring_config *config; + struct __vxge_hw_device *hldev; + u32 vp_id; + struct vxge_hw_mempool_cbs ring_mp_callback; + + if ((vp == NULL) || (attr == NULL)) { + status = VXGE_HW_FAIL; + goto exit; + } + + hldev = vp->vpath->hldev; + vp_id = vp->vpath->vp_id; + + config = &hldev->config.vp_config[vp_id].ring; + + ring_length = config->ring_blocks * + vxge_hw_ring_rxds_per_block_get(config->buffer_mode); + + ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, + VXGE_HW_CHANNEL_TYPE_RING, + ring_length, + attr->per_rxd_space, + attr->userdata); + + if (ring == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + vp->vpath->ringh = ring; + ring->vp_id = vp_id; + ring->vp_reg = vp->vpath->vp_reg; + ring->common_reg = hldev->common_reg; + ring->stats = &vp->vpath->sw_stats->ring_stats; + ring->config = config; + ring->callback = attr->callback; + ring->rxd_init = attr->rxd_init; + ring->rxd_term = attr->rxd_term; + ring->buffer_mode = config->buffer_mode; + ring->rxds_limit = config->rxds_limit; + + ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); + ring->rxd_priv_size = + sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; + ring->per_rxd_space = attr->per_rxd_space; + + ring->rxd_priv_size = + ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / + VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; + + /* how many RxDs can fit into one block. Depends on configured + * buffer_mode. */ + ring->rxds_per_block = + vxge_hw_ring_rxds_per_block_get(config->buffer_mode); + + /* calculate actual RxD block private size */ + ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; + ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc; + ring->mempool = __vxge_hw_mempool_create(hldev, + VXGE_HW_BLOCK_SIZE, + VXGE_HW_BLOCK_SIZE, + ring->rxdblock_priv_size, + ring->config->ring_blocks, + ring->config->ring_blocks, + &ring_mp_callback, + ring); + + if (ring->mempool == NULL) { + __vxge_hw_ring_delete(vp); + return VXGE_HW_ERR_OUT_OF_MEMORY; + } + + status = __vxge_hw_channel_initialize(&ring->channel); + if (status != VXGE_HW_OK) { + __vxge_hw_ring_delete(vp); + goto exit; + } + + /* Note: + * Specifying rxd_init callback means two things: + * 1) rxds need to be initialized by driver at channel-open time; + * 2) rxds need to be posted at channel-open time + * (that's what the initial_replenish() below does) + * Currently we don't have a case when the 1) is done without the 2). + */ + if (ring->rxd_init) { + status = vxge_hw_ring_replenish(ring, 1); + if (status != VXGE_HW_OK) { + __vxge_hw_ring_delete(vp); + goto exit; + } + } + + /* initial replenish will increment the counter in its post() routine, + * we have to reset it */ + ring->stats->common_stats.usage_cnt = 0; +exit: + return status; +} + +/* + * __vxge_hw_ring_abort - Returns the RxD + * This function terminates the RxDs of ring + */ +enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) +{ + void *rxdh; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + for (;;) { + vxge_hw_channel_dtr_try_complete(channel, &rxdh); + + if (rxdh == NULL) + break; + + vxge_hw_channel_dtr_complete(channel); + + if (ring->rxd_term) + ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, + channel->userdata); + + vxge_hw_channel_dtr_free(channel, rxdh); + } + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_ring_reset - Resets the ring + * This function resets the ring during vpath reset operation + */ +enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + __vxge_hw_ring_abort(ring); + + status = __vxge_hw_channel_reset(channel); + + if (status != VXGE_HW_OK) + goto exit; + + if (ring->rxd_init) { + status = vxge_hw_ring_replenish(ring, 1); + if (status != VXGE_HW_OK) + goto exit; + } +exit: + return status; +} + +/* + * __vxge_hw_ring_delete - Removes the ring + * This function freeup the memory pool and removes the ring + */ +enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_ring *ring = vp->vpath->ringh; + + __vxge_hw_ring_abort(ring); + + if (ring->mempool) + __vxge_hw_mempool_destroy(ring->mempool); + + vp->vpath->ringh = NULL; + __vxge_hw_channel_free(&ring->channel); + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_mempool_grow + * Will resize mempool up to %num_allocate value. + */ +enum vxge_hw_status +__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, + u32 *num_allocated) +{ + u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0; + u32 n_items = mempool->items_per_memblock; + u32 start_block_idx = mempool->memblocks_allocated; + u32 end_block_idx = mempool->memblocks_allocated + num_allocate; + enum vxge_hw_status status = VXGE_HW_OK; + + *num_allocated = 0; + + if (end_block_idx > mempool->memblocks_max) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + for (i = start_block_idx; i < end_block_idx; i++) { + u32 j; + u32 is_last = ((end_block_idx - 1) == i); + struct vxge_hw_mempool_dma *dma_object = + mempool->memblocks_dma_arr + i; + void *the_memblock; + + /* allocate memblock's private part. Each DMA memblock + * has a space allocated for item's private usage upon + * mempool's user request. Each time mempool grows, it will + * allocate new memblock and its private part at once. + * This helps to minimize memory usage a lot. */ + mempool->memblocks_priv_arr[i] = + vmalloc(mempool->items_priv_size * n_items); + if (mempool->memblocks_priv_arr[i] == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + memset(mempool->memblocks_priv_arr[i], 0, + mempool->items_priv_size * n_items); + + /* allocate DMA-capable memblock */ + mempool->memblocks_arr[i] = + __vxge_hw_blockpool_malloc(mempool->devh, + mempool->memblock_size, dma_object); + if (mempool->memblocks_arr[i] == NULL) { + vfree(mempool->memblocks_priv_arr[i]); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + (*num_allocated)++; + mempool->memblocks_allocated++; + + memset(mempool->memblocks_arr[i], 0, mempool->memblock_size); + + the_memblock = mempool->memblocks_arr[i]; + + /* fill the items hash array */ + for (j = 0; j < n_items; j++) { + u32 index = i * n_items + j; + + if (first_time && index >= mempool->items_initial) + break; + + mempool->items_arr[index] = + ((char *)the_memblock + j*mempool->item_size); + + /* let caller to do more job on each item */ + if (mempool->item_func_alloc != NULL) + mempool->item_func_alloc(mempool, i, + dma_object, index, is_last); + + mempool->items_current = index + 1; + } + + if (first_time && mempool->items_current == + mempool->items_initial) + break; + } +exit: + return status; +} + +/* + * vxge_hw_mempool_create + * This function will create memory pool object. Pool may grow but will + * never shrink. Pool consists of number of dynamically allocated blocks + * with size enough to hold %items_initial number of items. Memory is + * DMA-able but client must map/unmap before interoperating with the device. + */ +struct vxge_hw_mempool* +__vxge_hw_mempool_create( + struct __vxge_hw_device *devh, + u32 memblock_size, + u32 item_size, + u32 items_priv_size, + u32 items_initial, + u32 items_max, + struct vxge_hw_mempool_cbs *mp_callback, + void *userdata) +{ + enum vxge_hw_status status = VXGE_HW_OK; + u32 memblocks_to_allocate; + struct vxge_hw_mempool *mempool = NULL; + u32 allocated; + + if (memblock_size < item_size) { + status = VXGE_HW_FAIL; + goto exit; + } + + mempool = (struct vxge_hw_mempool *) + vmalloc(sizeof(struct vxge_hw_mempool)); + if (mempool == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + memset(mempool, 0, sizeof(struct vxge_hw_mempool)); + + mempool->devh = devh; + mempool->memblock_size = memblock_size; + mempool->items_max = items_max; + mempool->items_initial = items_initial; + mempool->item_size = item_size; + mempool->items_priv_size = items_priv_size; + mempool->item_func_alloc = mp_callback->item_func_alloc; + mempool->userdata = userdata; + + mempool->memblocks_allocated = 0; + + mempool->items_per_memblock = memblock_size / item_size; + + mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) / + mempool->items_per_memblock; + + /* allocate array of memblocks */ + mempool->memblocks_arr = + (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); + if (mempool->memblocks_arr == NULL) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + memset(mempool->memblocks_arr, 0, + sizeof(void *) * mempool->memblocks_max); + + /* allocate array of private parts of items per memblocks */ + mempool->memblocks_priv_arr = + (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); + if (mempool->memblocks_priv_arr == NULL) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + memset(mempool->memblocks_priv_arr, 0, + sizeof(void *) * mempool->memblocks_max); + + /* allocate array of memblocks DMA objects */ + mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) + vmalloc(sizeof(struct vxge_hw_mempool_dma) * + mempool->memblocks_max); + + if (mempool->memblocks_dma_arr == NULL) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + memset(mempool->memblocks_dma_arr, 0, + sizeof(struct vxge_hw_mempool_dma) * + mempool->memblocks_max); + + /* allocate hash array of items */ + mempool->items_arr = + (void **) vmalloc(sizeof(void *) * mempool->items_max); + if (mempool->items_arr == NULL) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max); + + /* calculate initial number of memblocks */ + memblocks_to_allocate = (mempool->items_initial + + mempool->items_per_memblock - 1) / + mempool->items_per_memblock; + + /* pre-allocate the mempool */ + status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate, + &allocated); + if (status != VXGE_HW_OK) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + +exit: + return mempool; +} + +/* + * vxge_hw_mempool_destroy + */ +void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) +{ + u32 i, j; + struct __vxge_hw_device *devh = mempool->devh; + + for (i = 0; i < mempool->memblocks_allocated; i++) { + struct vxge_hw_mempool_dma *dma_object; + + vxge_assert(mempool->memblocks_arr[i]); + vxge_assert(mempool->memblocks_dma_arr + i); + + dma_object = mempool->memblocks_dma_arr + i; + + for (j = 0; j < mempool->items_per_memblock; j++) { + u32 index = i * mempool->items_per_memblock + j; + + /* to skip last partially filled(if any) memblock */ + if (index >= mempool->items_current) + break; + } + + vfree(mempool->memblocks_priv_arr[i]); + + __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], + mempool->memblock_size, dma_object); + } + + if (mempool->items_arr) + vfree(mempool->items_arr); + + if (mempool->memblocks_dma_arr) + vfree(mempool->memblocks_dma_arr); + + if (mempool->memblocks_priv_arr) + vfree(mempool->memblocks_priv_arr); + + if (mempool->memblocks_arr) + vfree(mempool->memblocks_arr); + + vfree(mempool); +} + +/* + * __vxge_hw_device_fifo_config_check - Check fifo configuration. + * Check the fifo configuration + */ +enum vxge_hw_status +__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) +{ + if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || + (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) + return VXGE_HW_BADCFG_FIFO_BLOCKS; + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_vpath_config_check - Check vpath configuration. + * Check the vpath configuration + */ +enum vxge_hw_status +__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) +{ + enum vxge_hw_status status; + + if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || + (vp_config->min_bandwidth > + VXGE_HW_VPATH_BANDWIDTH_MAX)) + return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; + + status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); + if (status != VXGE_HW_OK) + return status; + + if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && + ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || + (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) + return VXGE_HW_BADCFG_VPATH_MTU; + + if ((vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && + (vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && + (vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) + return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_config_check - Check device configuration. + * Check the device configuration + */ +enum vxge_hw_status +__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) +{ + u32 i; + enum vxge_hw_status status; + + if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && + (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && + (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && + (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) + return VXGE_HW_BADCFG_INTR_MODE; + + if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && + (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) + return VXGE_HW_BADCFG_RTS_MAC_EN; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + status = __vxge_hw_device_vpath_config_check( + &new_config->vp_config[i]); + if (status != VXGE_HW_OK) + return status; + } + + return VXGE_HW_OK; +} + +/* + * vxge_hw_device_config_default_get - Initialize device config with defaults. + * Initialize Titan device config with default values. + */ +enum vxge_hw_status __devinit +vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) +{ + u32 i; + + device_config->dma_blockpool_initial = + VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; + device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; + device_config->intr_mode = VXGE_HW_INTR_MODE_DEF; + device_config->rth_en = VXGE_HW_RTH_DEFAULT; + device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT; + device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS; + device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + device_config->vp_config[i].vp_id = i; + + device_config->vp_config[i].min_bandwidth = + VXGE_HW_VPATH_BANDWIDTH_DEFAULT; + + device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT; + + device_config->vp_config[i].ring.ring_blocks = + VXGE_HW_DEF_RING_BLOCKS; + + device_config->vp_config[i].ring.buffer_mode = + VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT; + + device_config->vp_config[i].ring.scatter_mode = + VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT; + + device_config->vp_config[i].ring.rxds_limit = + VXGE_HW_DEF_RING_RXDS_LIMIT; + + device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE; + + device_config->vp_config[i].fifo.fifo_blocks = + VXGE_HW_MIN_FIFO_BLOCKS; + + device_config->vp_config[i].fifo.max_frags = + VXGE_HW_MAX_FIFO_FRAGS; + + device_config->vp_config[i].fifo.memblock_size = + VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE; + + device_config->vp_config[i].fifo.alignment_size = + VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE; + + device_config->vp_config[i].fifo.intr = + VXGE_HW_FIFO_QUEUE_INTR_DEFAULT; + + device_config->vp_config[i].fifo.no_snoop_bits = + VXGE_HW_FIFO_NO_SNOOP_DEFAULT; + device_config->vp_config[i].tti.intr_enable = + VXGE_HW_TIM_INTR_DEFAULT; + + device_config->vp_config[i].tti.btimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.timer_ac_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.timer_ci_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.timer_ri_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.rtimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.util_sel = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.ltimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.urange_a = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.uec_a = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.urange_b = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.uec_b = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.urange_c = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.uec_c = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.uec_d = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.intr_enable = + VXGE_HW_TIM_INTR_DEFAULT; + + device_config->vp_config[i].rti.btimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.timer_ac_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.timer_ci_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.timer_ri_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.rtimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.util_sel = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.ltimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.urange_a = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.uec_a = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.urange_b = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.uec_b = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.urange_c = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.uec_c = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.uec_d = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].mtu = + VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU; + + device_config->vp_config[i].rpa_strip_vlan_tag = + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT; + } + + return VXGE_HW_OK; +} + +/* + * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. + * Set the swapper bits appropriately for the lagacy section. + */ +enum vxge_hw_status +__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = readq(&legacy_reg->toc_swapper_fb); + + wmb(); + + switch (val64) { + + case VXGE_HW_SWAPPER_INITIAL_VALUE: + return status; + + case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: + writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_rd_swap_en); + writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, + &legacy_reg->pifm_rd_flip_en); + writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_wr_swap_en); + writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, + &legacy_reg->pifm_wr_flip_en); + break; + + case VXGE_HW_SWAPPER_BYTE_SWAPPED: + writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_rd_swap_en); + writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_wr_swap_en); + break; + + case VXGE_HW_SWAPPER_BIT_FLIPPED: + writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, + &legacy_reg->pifm_rd_flip_en); + writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, + &legacy_reg->pifm_wr_flip_en); + break; + } + + wmb(); + + val64 = readq(&legacy_reg->toc_swapper_fb); + + if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) + status = VXGE_HW_ERR_SWAPPER_CTRL; + + return status; +} + +/* + * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. + * Set the swapper bits appropriately for the vpath. + */ +enum vxge_hw_status +__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) +{ +#ifndef __BIG_ENDIAN + u64 val64; + + val64 = readq(&vpath_reg->vpath_general_cfg1); + wmb(); + val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN; + writeq(val64, &vpath_reg->vpath_general_cfg1); + wmb(); +#endif + return VXGE_HW_OK; +} + +/* + * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. + * Set the swapper bits appropriately for the vpath. + */ +enum vxge_hw_status +__vxge_hw_kdfc_swapper_set( + struct vxge_hw_legacy_reg __iomem *legacy_reg, + struct vxge_hw_vpath_reg __iomem *vpath_reg) +{ + u64 val64; + + val64 = readq(&legacy_reg->pifm_wr_swap_en); + + if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) { + val64 = readq(&vpath_reg->kdfcctl_cfg0); + wmb(); + + val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 | + VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 | + VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2; + + writeq(val64, &vpath_reg->kdfcctl_cfg0); + wmb(); + } + + return VXGE_HW_OK; +} + +/* + * vxge_hw_mgmt_device_config - Retrieve device configuration. + * Get device configuration. Permits to retrieve at run-time configuration + * values that were used to initialize and configure the device. + */ +enum vxge_hw_status +vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev, + struct vxge_hw_device_config *dev_config, int size) +{ + + if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) + return VXGE_HW_ERR_INVALID_DEVICE; + + if (size != sizeof(struct vxge_hw_device_config)) + return VXGE_HW_ERR_VERSION_CONFLICT; + + memcpy(dev_config, &hldev->config, + sizeof(struct vxge_hw_device_config)); + + return VXGE_HW_OK; +} + +/* + * vxge_hw_mgmt_reg_read - Read Titan register. + */ +enum vxge_hw_status +vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, + enum vxge_hw_mgmt_reg_type type, + u32 index, u32 offset, u64 *value) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + switch (type) { + case vxge_hw_mgmt_reg_type_legacy: + if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->legacy_reg + offset); + break; + case vxge_hw_mgmt_reg_type_toc: + if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->toc_reg + offset); + break; + case vxge_hw_mgmt_reg_type_common: + if (offset > sizeof(struct vxge_hw_common_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->common_reg + offset); + break; + case vxge_hw_mgmt_reg_type_mrpcim: + if (!(hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + break; + } + if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->mrpcim_reg + offset); + break; + case vxge_hw_mgmt_reg_type_srpcim: + if (!(hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + break; + } + if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->srpcim_reg[index] + + offset); + break; + case vxge_hw_mgmt_reg_type_vpmgmt: + if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || + (!(hldev->vpath_assignments & vxge_mBIT(index)))) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->vpmgmt_reg[index] + + offset); + break; + case vxge_hw_mgmt_reg_type_vpath: + if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) || + (!(hldev->vpath_assignments & vxge_mBIT(index)))) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->vpath_reg[index] + + offset); + break; + default: + status = VXGE_HW_ERR_INVALID_TYPE; + break; + } + +exit: + return status; +} + +/* + * vxge_hw_mgmt_reg_Write - Write Titan register. + */ +enum vxge_hw_status +vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, + enum vxge_hw_mgmt_reg_type type, + u32 index, u32 offset, u64 value) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + switch (type) { + case vxge_hw_mgmt_reg_type_legacy: + if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->legacy_reg + offset); + break; + case vxge_hw_mgmt_reg_type_toc: + if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->toc_reg + offset); + break; + case vxge_hw_mgmt_reg_type_common: + if (offset > sizeof(struct vxge_hw_common_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->common_reg + offset); + break; + case vxge_hw_mgmt_reg_type_mrpcim: + if (!(hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + break; + } + if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->mrpcim_reg + offset); + break; + case vxge_hw_mgmt_reg_type_srpcim: + if (!(hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + break; + } + if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->srpcim_reg[index] + + offset); + + break; + case vxge_hw_mgmt_reg_type_vpmgmt: + if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || + (!(hldev->vpath_assignments & vxge_mBIT(index)))) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] + + offset); + break; + case vxge_hw_mgmt_reg_type_vpath: + if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) || + (!(hldev->vpath_assignments & vxge_mBIT(index)))) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->vpath_reg[index] + + offset); + break; + default: + status = VXGE_HW_ERR_INVALID_TYPE; + break; + } +exit: + return status; +} + +/* + * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD + * list callback + * This function is callback passed to __vxge_hw_mempool_create to create memory + * pool for TxD list + */ +static void +__vxge_hw_fifo_mempool_item_alloc( + struct vxge_hw_mempool *mempoolh, + u32 memblock_index, struct vxge_hw_mempool_dma *dma_object, + u32 index, u32 is_last) +{ + u32 memblock_item_idx; + struct __vxge_hw_fifo_txdl_priv *txdl_priv; + struct vxge_hw_fifo_txd *txdp = + (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index]; + struct __vxge_hw_fifo *fifo = + (struct __vxge_hw_fifo *)mempoolh->userdata; + void *memblock = mempoolh->memblocks_arr[memblock_index]; + + vxge_assert(txdp); + + txdp->host_control = (u64) (size_t) + __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp, + &memblock_item_idx); + + txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); + + vxge_assert(txdl_priv); + + fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp; + + /* pre-format HW's TxDL's private */ + txdl_priv->dma_offset = (char *)txdp - (char *)memblock; + txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset; + txdl_priv->dma_handle = dma_object->handle; + txdl_priv->memblock = memblock; + txdl_priv->first_txdp = txdp; + txdl_priv->next_txdl_priv = NULL; + txdl_priv->alloc_frags = 0; + + return; +} + +/* + * __vxge_hw_fifo_create - Create a FIFO + * This function creates FIFO and initializes it. + */ +enum vxge_hw_status +__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, + struct vxge_hw_fifo_attr *attr) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_fifo *fifo; + struct vxge_hw_fifo_config *config; + u32 txdl_size, txdl_per_memblock; + struct vxge_hw_mempool_cbs fifo_mp_callback; + struct __vxge_hw_virtualpath *vpath; + + if ((vp == NULL) || (attr == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + vpath = vp->vpath; + config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo; + + txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd); + + txdl_per_memblock = config->memblock_size / txdl_size; + + fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp, + VXGE_HW_CHANNEL_TYPE_FIFO, + config->fifo_blocks * txdl_per_memblock, + attr->per_txdl_space, attr->userdata); + + if (fifo == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + vpath->fifoh = fifo; + fifo->nofl_db = vpath->nofl_db; + + fifo->vp_id = vpath->vp_id; + fifo->vp_reg = vpath->vp_reg; + fifo->stats = &vpath->sw_stats->fifo_stats; + + fifo->config = config; + + /* apply "interrupts per txdl" attribute */ + fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; + + if (fifo->config->intr) + fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; + + fifo->no_snoop_bits = config->no_snoop_bits; + + /* + * FIFO memory management strategy: + * + * TxDL split into three independent parts: + * - set of TxD's + * - TxD HW private part + * - driver private part + * + * Adaptative memory allocation used. i.e. Memory allocated on + * demand with the size which will fit into one memory block. + * One memory block may contain more than one TxDL. + * + * During "reserve" operations more memory can be allocated on demand + * for example due to FIFO full condition. + * + * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close + * routine which will essentially stop the channel and free resources. + */ + + /* TxDL common private size == TxDL private + driver private */ + fifo->priv_size = + sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space; + fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) / + VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; + + fifo->per_txdl_space = attr->per_txdl_space; + + /* recompute txdl size to be cacheline aligned */ + fifo->txdl_size = txdl_size; + fifo->txdl_per_memblock = txdl_per_memblock; + + fifo->txdl_term = attr->txdl_term; + fifo->callback = attr->callback; + + if (fifo->txdl_per_memblock == 0) { + __vxge_hw_fifo_delete(vp); + status = VXGE_HW_ERR_INVALID_BLOCK_SIZE; + goto exit; + } + + fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc; + + fifo->mempool = + __vxge_hw_mempool_create(vpath->hldev, + fifo->config->memblock_size, + fifo->txdl_size, + fifo->priv_size, + (fifo->config->fifo_blocks * fifo->txdl_per_memblock), + (fifo->config->fifo_blocks * fifo->txdl_per_memblock), + &fifo_mp_callback, + fifo); + + if (fifo->mempool == NULL) { + __vxge_hw_fifo_delete(vp); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + status = __vxge_hw_channel_initialize(&fifo->channel); + if (status != VXGE_HW_OK) { + __vxge_hw_fifo_delete(vp); + goto exit; + } + + vxge_assert(fifo->channel.reserve_ptr); +exit: + return status; +} + +/* + * __vxge_hw_fifo_abort - Returns the TxD + * This function terminates the TxDs of fifo + */ +enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) +{ + void *txdlh; + + for (;;) { + vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); + + if (txdlh == NULL) + break; + + vxge_hw_channel_dtr_complete(&fifo->channel); + + if (fifo->txdl_term) { + fifo->txdl_term(txdlh, + VXGE_HW_TXDL_STATE_POSTED, + fifo->channel.userdata); + } + + vxge_hw_channel_dtr_free(&fifo->channel, txdlh); + } + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_fifo_reset - Resets the fifo + * This function resets the fifo during vpath reset operation + */ +enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + __vxge_hw_fifo_abort(fifo); + status = __vxge_hw_channel_reset(&fifo->channel); + + return status; +} + +/* + * __vxge_hw_fifo_delete - Removes the FIFO + * This function freeup the memory pool and removes the FIFO + */ +enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; + + __vxge_hw_fifo_abort(fifo); + + if (fifo->mempool) + __vxge_hw_mempool_destroy(fifo->mempool); + + vp->vpath->fifoh = NULL; + + __vxge_hw_channel_free(&fifo->channel); + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_vpath_pci_read - Read the content of given address + * in pci config space. + * Read from the vpath pci config space. + */ +enum vxge_hw_status +__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, + u32 phy_func_0, u32 offset, u32 *val) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; + + val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset); + + if (phy_func_0) + val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0; + + writeq(val64, &vp_reg->pci_config_access_cfg1); + wmb(); + writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ, + &vp_reg->pci_config_access_cfg2); + wmb(); + + status = __vxge_hw_device_register_poll( + &vp_reg->pci_config_access_cfg2, + VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vp_reg->pci_config_access_status); + + if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) { + status = VXGE_HW_FAIL; + *val = 0; + } else + *val = (u32)vxge_bVALn(val64, 32, 32); +exit: + return status; +} + +/* + * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. + * Returns the function number of the vpath. + */ +u32 +__vxge_hw_vpath_func_id_get(u32 vp_id, + struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) +{ + u64 val64; + + val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); + + return + (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); +} + +/* + * __vxge_hw_read_rts_ds - Program RTS steering critieria + */ +static inline void +__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg, + u64 dta_struct_sel) +{ + writeq(0, &vpath_reg->rts_access_steer_ctrl); + wmb(); + writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0); + writeq(0, &vpath_reg->rts_access_steer_data1); + wmb(); + return; +} + + +/* + * __vxge_hw_vpath_card_info_get - Get the serial numbers, + * part number and product description. + */ +enum vxge_hw_status +__vxge_hw_vpath_card_info_get( + u32 vp_id, + struct vxge_hw_vpath_reg __iomem *vpath_reg, + struct vxge_hw_device_hw_info *hw_info) +{ + u32 i, j; + u64 val64; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + u8 *serial_number = hw_info->serial_number; + u8 *part_number = hw_info->part_number; + u8 *product_desc = hw_info->product_desc; + + __vxge_hw_read_rts_ds(vpath_reg, + VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER); + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); + + status = __vxge_hw_pio_mem_write64(val64, + &vpath_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + if (status != VXGE_HW_OK) + return status; + + val64 = readq(&vpath_reg->rts_access_steer_ctrl); + + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + data1 = readq(&vpath_reg->rts_access_steer_data0); + ((u64 *)serial_number)[0] = be64_to_cpu(data1); + + data2 = readq(&vpath_reg->rts_access_steer_data1); + ((u64 *)serial_number)[1] = be64_to_cpu(data2); + status = VXGE_HW_OK; + } else + *serial_number = 0; + + __vxge_hw_read_rts_ds(vpath_reg, + VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER); + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); + + status = __vxge_hw_pio_mem_write64(val64, + &vpath_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + if (status != VXGE_HW_OK) + return status; + + val64 = readq(&vpath_reg->rts_access_steer_ctrl); + + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + + data1 = readq(&vpath_reg->rts_access_steer_data0); + ((u64 *)part_number)[0] = be64_to_cpu(data1); + + data2 = readq(&vpath_reg->rts_access_steer_data1); + ((u64 *)part_number)[1] = be64_to_cpu(data2); + + status = VXGE_HW_OK; + + } else + *part_number = 0; + + j = 0; + + for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; + i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { + + __vxge_hw_read_rts_ds(vpath_reg, i); + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); + + status = __vxge_hw_pio_mem_write64(val64, + &vpath_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + if (status != VXGE_HW_OK) + return status; + + val64 = readq(&vpath_reg->rts_access_steer_ctrl); + + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + + data1 = readq(&vpath_reg->rts_access_steer_data0); + ((u64 *)product_desc)[j++] = be64_to_cpu(data1); + + data2 = readq(&vpath_reg->rts_access_steer_data1); + ((u64 *)product_desc)[j++] = be64_to_cpu(data2); + + status = VXGE_HW_OK; + } else + *product_desc = 0; + } + + return status; +} + +/* + * __vxge_hw_vpath_fw_ver_get - Get the fw version + * Returns FW Version + */ +enum vxge_hw_status +__vxge_hw_vpath_fw_ver_get( + u32 vp_id, + struct vxge_hw_vpath_reg __iomem *vpath_reg, + struct vxge_hw_device_hw_info *hw_info) +{ + u64 val64; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + struct vxge_hw_device_version *fw_version = &hw_info->fw_version; + struct vxge_hw_device_date *fw_date = &hw_info->fw_date; + struct vxge_hw_device_version *flash_version = &hw_info->flash_version; + struct vxge_hw_device_date *flash_date = &hw_info->flash_date; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); + + status = __vxge_hw_pio_mem_write64(val64, + &vpath_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vpath_reg->rts_access_steer_ctrl); + + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + + data1 = readq(&vpath_reg->rts_access_steer_data0); + data2 = readq(&vpath_reg->rts_access_steer_data1); + + fw_date->day = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY( + data1); + fw_date->month = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH( + data1); + fw_date->year = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR( + data1); + + snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", + fw_date->month, fw_date->day, fw_date->year); + + fw_version->major = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1); + fw_version->minor = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1); + fw_version->build = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1); + + snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", + fw_version->major, fw_version->minor, fw_version->build); + + flash_date->day = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2); + flash_date->month = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2); + flash_date->year = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2); + + snprintf(flash_date->date, VXGE_HW_FW_STRLEN, + "%2.2d/%2.2d/%4.4d", + flash_date->month, flash_date->day, flash_date->year); + + flash_version->major = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2); + flash_version->minor = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2); + flash_version->build = + (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2); + + snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", + flash_version->major, flash_version->minor, + flash_version->build); + + status = VXGE_HW_OK; + + } else + status = VXGE_HW_FAIL; +exit: + return status; +} + +/* + * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode + * Returns pci function mode + */ +u64 +__vxge_hw_vpath_pci_func_mode_get( + u32 vp_id, + struct vxge_hw_vpath_reg __iomem *vpath_reg) +{ + u64 val64; + u64 data1 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + __vxge_hw_read_rts_ds(vpath_reg, + VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE); + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); + + status = __vxge_hw_pio_mem_write64(val64, + &vpath_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vpath_reg->rts_access_steer_ctrl); + + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + data1 = readq(&vpath_reg->rts_access_steer_data0); + status = VXGE_HW_OK; + } else { + data1 = 0; + status = VXGE_HW_FAIL; + } +exit: + return data1; +} + +/** + * vxge_hw_device_flick_link_led - Flick (blink) link LED. + * @hldev: HW device. + * @on_off: TRUE if flickering to be on, FALSE to be off + * + * Flicker the link LED. + */ +enum vxge_hw_status +vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, + u64 on_off) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (hldev == NULL) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + vp_reg = hldev->vpath_reg[hldev->first_vp_id]; + + writeq(0, &vp_reg->rts_access_steer_ctrl); + wmb(); + writeq(on_off, &vp_reg->rts_access_steer_data0); + writeq(0, &vp_reg->rts_access_steer_data1); + wmb(); + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); + + status = __vxge_hw_pio_mem_write64(val64, + &vp_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); +exit: + return status; +} + +/* + * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables + */ +enum vxge_hw_status +__vxge_hw_vpath_rts_table_get( + struct __vxge_hw_vpath_handle *vp, + u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + vp_reg = vpath->vp_reg; + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); + + if ((rts_table == + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || + (rts_table == + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || + (rts_table == + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || + (rts_table == + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { + val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; + } + + status = __vxge_hw_pio_mem_write64(val64, + &vp_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + vpath->hldev->config.device_poll_millis); + + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vp_reg->rts_access_steer_ctrl); + + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + + *data1 = readq(&vp_reg->rts_access_steer_data0); + + if ((rts_table == + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || + (rts_table == + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { + *data2 = readq(&vp_reg->rts_access_steer_data1); + } + status = VXGE_HW_OK; + } else + status = VXGE_HW_FAIL; +exit: + return status; +} + +/* + * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables + */ +enum vxge_hw_status +__vxge_hw_vpath_rts_table_set( + struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, + u32 offset, u64 data1, u64 data2) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + vp_reg = vpath->vp_reg; + + writeq(data1, &vp_reg->rts_access_steer_data0); + wmb(); + + if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || + (rts_table == + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { + writeq(data2, &vp_reg->rts_access_steer_data1); + wmb(); + } + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset); + + status = __vxge_hw_pio_mem_write64(val64, + &vp_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + vpath->hldev->config.device_poll_millis); + + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vp_reg->rts_access_steer_ctrl); + + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) + status = VXGE_HW_OK; + else + status = VXGE_HW_FAIL; +exit: + return status; +} + +/* + * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath + * from MAC address table. + */ +enum vxge_hw_status +__vxge_hw_vpath_addr_get( + u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, + u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) +{ + u32 i; + u64 val64; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION( + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL( + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0); + + status = __vxge_hw_pio_mem_write64(val64, + &vpath_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vpath_reg->rts_access_steer_ctrl); + + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + + data1 = readq(&vpath_reg->rts_access_steer_data0); + data2 = readq(&vpath_reg->rts_access_steer_data1); + + data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); + data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( + data2); + + for (i = ETH_ALEN; i > 0; i--) { + macaddr[i-1] = (u8)(data1 & 0xFF); + data1 >>= 8; + + macaddr_mask[i-1] = (u8)(data2 & 0xFF); + data2 >>= 8; + } + status = VXGE_HW_OK; + } else + status = VXGE_HW_FAIL; +exit: + return status; +} + +/* + * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing. + */ +enum vxge_hw_status vxge_hw_vpath_rts_rth_set( + struct __vxge_hw_vpath_handle *vp, + enum vxge_hw_rth_algoritms algorithm, + struct vxge_hw_rth_hash_types *hash_type, + u16 bucket_size) +{ + u64 data0, data1; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_get(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, + 0, &data0, &data1); + + data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); + + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm); + + if (hash_type->hash_type_tcpipv4_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN; + + if (hash_type->hash_type_ipv4_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN; + + if (hash_type->hash_type_tcpipv6_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN; + + if (hash_type->hash_type_ipv6_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN; + + if (hash_type->hash_type_tcpipv6ex_en) + data0 |= + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN; + + if (hash_type->hash_type_ipv6ex_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN; + + if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0)) + data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; + else + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, + 0, data0, 0); +exit: + return status; +} + +static void +vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1, + u16 flag, u8 *itable) +{ + switch (flag) { + case 1: + *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)| + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( + itable[j]); + case 2: + *data0 |= + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( + itable[j]); + case 3: + *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( + itable[j]); + case 4: + *data1 |= + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA( + itable[j]); + default: + return; + } +} +/* + * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT). + */ +enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( + struct __vxge_hw_vpath_handle **vpath_handles, + u32 vpath_count, + u8 *mtable, + u8 *itable, + u32 itable_size) +{ + u32 i, j, action, rts_table; + u64 data0; + u64 data1; + u32 max_entries; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_vpath_handle *vp = vpath_handles[0]; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + max_entries = (((u32)1) << itable_size); + + if (vp->vpath->hldev->config.rth_it_type + == VXGE_HW_RTH_IT_TYPE_SOLO_IT) { + action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; + rts_table = + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT; + + for (j = 0; j < max_entries; j++) { + + data1 = 0; + + data0 = + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( + itable[j]); + + status = __vxge_hw_vpath_rts_table_set(vpath_handles[0], + action, rts_table, j, data0, data1); + + if (status != VXGE_HW_OK) + goto exit; + } + + for (j = 0; j < max_entries; j++) { + + data1 = 0; + + data0 = + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( + itable[j]); + + status = __vxge_hw_vpath_rts_table_set( + vpath_handles[mtable[itable[j]]], action, + rts_table, j, data0, data1); + + if (status != VXGE_HW_OK) + goto exit; + } + } else { + action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; + rts_table = + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT; + for (i = 0; i < vpath_count; i++) { + + for (j = 0; j < max_entries;) { + + data0 = 0; + data1 = 0; + + while (j < max_entries) { + if (mtable[itable[j]] != i) { + j++; + continue; + } + vxge_hw_rts_rth_data0_data1_get(j, + &data0, &data1, 1, itable); + j++; + break; + } + + while (j < max_entries) { + if (mtable[itable[j]] != i) { + j++; + continue; + } + vxge_hw_rts_rth_data0_data1_get(j, + &data0, &data1, 2, itable); + j++; + break; + } + + while (j < max_entries) { + if (mtable[itable[j]] != i) { + j++; + continue; + } + vxge_hw_rts_rth_data0_data1_get(j, + &data0, &data1, 3, itable); + j++; + break; + } + + while (j < max_entries) { + if (mtable[itable[j]] != i) { + j++; + continue; + } + vxge_hw_rts_rth_data0_data1_get(j, + &data0, &data1, 4, itable); + j++; + break; + } + + if (data0 != 0) { + status = __vxge_hw_vpath_rts_table_set( + vpath_handles[i], + action, rts_table, + 0, data0, data1); + + if (status != VXGE_HW_OK) + goto exit; + } + } + } + } +exit: + return status; +} + +/** + * vxge_hw_vpath_check_leak - Check for memory leak + * @ringh: Handle to the ring object used for receive + * + * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to + * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred. + * Returns: VXGE_HW_FAIL, if leak has occurred. + * + */ +enum vxge_hw_status +vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring) +{ + enum vxge_hw_status status = VXGE_HW_OK; + u64 rxd_new_count, rxd_spat; + + if (ring == NULL) + return status; + + rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell); + rxd_spat = readq(&ring->vp_reg->prc_cfg6); + rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat); + + if (rxd_new_count >= rxd_spat) + status = VXGE_HW_FAIL; + + return status; +} + +/* + * __vxge_hw_vpath_mgmt_read + * This routine reads the vpath_mgmt registers + */ +static enum vxge_hw_status +__vxge_hw_vpath_mgmt_read( + struct __vxge_hw_device *hldev, + struct __vxge_hw_virtualpath *vpath) +{ + u32 i, mtu = 0, max_pyld = 0; + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { + + val64 = readq(&vpath->vpmgmt_reg-> + rxmac_cfg0_port_vpmgmt_clone[i]); + max_pyld = + (u32) + VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN + (val64); + if (mtu < max_pyld) + mtu = max_pyld; + } + + vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE; + + val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (val64 & vxge_mBIT(i)) + vpath->vsport_number = i; + } + + val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone); + + if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK) + VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP); + else + VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN); + + return status; +} + +/* + * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed + * This routine checks the vpath_rst_in_prog register to see if + * adapter completed the reset process for the vpath + */ +enum vxge_hw_status +__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) +{ + enum vxge_hw_status status; + + status = __vxge_hw_device_register_poll( + &vpath->hldev->common_reg->vpath_rst_in_prog, + VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG( + 1 << (16 - vpath->vp_id)), + vpath->hldev->config.device_poll_millis); + + return status; +} + +/* + * __vxge_hw_vpath_reset + * This routine resets the vpath on the device + */ +enum vxge_hw_status +__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id)); + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), + &hldev->common_reg->cmn_rsthdlr_cfg0); + + return status; +} + +/* + * __vxge_hw_vpath_sw_reset + * This routine resets the vpath structures + */ +enum vxge_hw_status +__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + + vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id]; + + if (vpath->ringh) { + status = __vxge_hw_ring_reset(vpath->ringh); + if (status != VXGE_HW_OK) + goto exit; + } + + if (vpath->fifoh) + status = __vxge_hw_fifo_reset(vpath->fifoh); +exit: + return status; +} + +/* + * __vxge_hw_vpath_prc_configure + * This routine configures the prc registers of virtual path using the config + * passed + */ +void +__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vp_config *vp_config; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + vp_config = vpath->vp_config; + + if (vp_config->ring.enable == VXGE_HW_RING_DISABLE) + return; + + val64 = readq(&vp_reg->prc_cfg1); + val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE; + writeq(val64, &vp_reg->prc_cfg1); + + val64 = readq(&vpath->vp_reg->prc_cfg6); + val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN; + writeq(val64, &vpath->vp_reg->prc_cfg6); + + val64 = readq(&vp_reg->prc_cfg7); + + if (vpath->vp_config->ring.scatter_mode != + VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) { + + val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3); + + switch (vpath->vp_config->ring.scatter_mode) { + case VXGE_HW_RING_SCATTER_MODE_A: + val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( + VXGE_HW_PRC_CFG7_SCATTER_MODE_A); + break; + case VXGE_HW_RING_SCATTER_MODE_B: + val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( + VXGE_HW_PRC_CFG7_SCATTER_MODE_B); + break; + case VXGE_HW_RING_SCATTER_MODE_C: + val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( + VXGE_HW_PRC_CFG7_SCATTER_MODE_C); + break; + } + } + + writeq(val64, &vp_reg->prc_cfg7); + + writeq(VXGE_HW_PRC_CFG5_RXD0_ADD( + __vxge_hw_ring_first_block_address_get( + vpath->ringh) >> 3), &vp_reg->prc_cfg5); + + val64 = readq(&vp_reg->prc_cfg4); + val64 |= VXGE_HW_PRC_CFG4_IN_SVC; + val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3); + + val64 |= VXGE_HW_PRC_CFG4_RING_MODE( + VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER); + + if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE) + val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE; + else + val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE; + + writeq(val64, &vp_reg->prc_cfg4); + return; +} + +/* + * __vxge_hw_vpath_kdfc_configure + * This routine configures the kdfc registers of virtual path using the + * config passed + */ +enum vxge_hw_status +__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + u64 vpath_stride; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg); + + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vp_reg->kdfc_drbl_triplet_total); + + vpath->max_kdfc_db = + (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE( + val64+1)/2; + + if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { + + vpath->max_nofl_db = vpath->max_kdfc_db; + + if (vpath->max_nofl_db < + ((vpath->vp_config->fifo.memblock_size / + (vpath->vp_config->fifo.max_frags * + sizeof(struct vxge_hw_fifo_txd))) * + vpath->vp_config->fifo.fifo_blocks)) { + + return VXGE_HW_BADCFG_FIFO_BLOCKS; + } + val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0( + (vpath->max_nofl_db*2)-1); + } + + writeq(val64, &vp_reg->kdfc_fifo_trpl_partition); + + writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE, + &vp_reg->kdfc_fifo_trpl_ctrl); + + val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl); + + val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) | + VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF)); + + val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE( + VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) | +#ifndef __BIG_ENDIAN + VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN | +#endif + VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0); + + writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl); + writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address); + wmb(); + vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride); + + vpath->nofl_db = + (struct __vxge_hw_non_offload_db_wrapper __iomem *) + (hldev->kdfc + (vp_id * + VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE( + vpath_stride))); +exit: + return status; +} + +/* + * __vxge_hw_vpath_mac_configure + * This routine configures the mac of virtual path using the config passed + */ +enum vxge_hw_status +__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vp_config *vp_config; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + vp_config = vpath->vp_config; + + writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER( + vpath->vsport_number), &vp_reg->xmac_vsport_choice); + + if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) { + + val64 = readq(&vp_reg->xmac_rpa_vcfg); + + if (vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) { + if (vp_config->rpa_strip_vlan_tag) + val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; + else + val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; + } + + writeq(val64, &vp_reg->xmac_rpa_vcfg); + val64 = readq(&vp_reg->rxmac_vcfg0); + + if (vp_config->mtu != + VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) { + val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); + if ((vp_config->mtu + + VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu) + val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( + vp_config->mtu + + VXGE_HW_MAC_HEADER_MAX_SIZE); + else + val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( + vpath->max_mtu); + } + + writeq(val64, &vp_reg->rxmac_vcfg0); + + val64 = readq(&vp_reg->rxmac_vcfg1); + + val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) | + VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE); + + if (hldev->config.rth_it_type == + VXGE_HW_RTH_IT_TYPE_MULTI_IT) { + val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE( + 0x2) | + VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE; + } + + writeq(val64, &vp_reg->rxmac_vcfg1); + } + return status; +} + +/* + * __vxge_hw_vpath_tim_configure + * This routine configures the tim registers of virtual path using the config + * passed + */ +enum vxge_hw_status +__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct vxge_hw_vp_config *config; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + config = vpath->vp_config; + + writeq((u64)0, &vp_reg->tim_dest_addr); + writeq((u64)0, &vp_reg->tim_vpath_map); + writeq((u64)0, &vp_reg->tim_bitmap); + writeq((u64)0, &vp_reg->tim_remap); + + if (config->ring.enable == VXGE_HW_RING_ENABLE) + writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( + (vp_id * VXGE_HW_MAX_INTR_PER_VP) + + VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn); + + val64 = readq(&vp_reg->tim_pci_cfg); + val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD; + writeq(val64, &vp_reg->tim_pci_cfg); + + if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { + + val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + + if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( + config->tti.btimer_val); + } + + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; + + if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->tti.timer_ac_en) + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; + else + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; + } + + if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->tti.timer_ci_en) + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + else + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + } + + if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( + config->tti.urange_a); + } + + if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( + config->tti.urange_b); + } + + if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( + config->tti.urange_c); + } + + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); + + if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( + config->tti.uec_a); + } + + if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( + config->tti.uec_b); + } + + if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( + config->tti.uec_c); + } + + if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( + config->tti.uec_d); + } + + writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); + val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); + + if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->tti.timer_ri_en) + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; + else + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; + } + + if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( + config->tti.rtimer_val); + } + + if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( + config->tti.util_sel); + } + + if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( + config->tti.ltimer_val); + } + + writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); + } + + if (config->ring.enable == VXGE_HW_RING_ENABLE) { + + val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); + + if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( + config->rti.btimer_val); + } + + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; + + if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->rti.timer_ac_en) + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; + else + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; + } + + if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->rti.timer_ci_en) + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + else + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + } + + if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( + config->rti.urange_a); + } + + if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( + config->rti.urange_b); + } + + if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( + config->rti.urange_c); + } + + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); + val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); + + if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( + config->rti.uec_a); + } + + if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( + config->rti.uec_b); + } + + if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( + config->rti.uec_c); + } + + if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( + config->rti.uec_d); + } + + writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); + val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); + + if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->rti.timer_ri_en) + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; + else + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; + } + + if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( + config->rti.rtimer_val); + } + + if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( + config->rti.util_sel); + } + + if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( + config->rti.ltimer_val); + } + + writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); + } + + val64 = 0; + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]); + writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]); + writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]); + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]); + writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); + writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); + + return status; +} + +/* + * __vxge_hw_vpath_initialize + * This routine is the final phase of init which initializes the + * registers of the vpath using the configuration passed. + */ +enum vxge_hw_status +__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + u32 val32; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + vpath = &hldev->virtual_paths[vp_id]; + + if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { + status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; + goto exit; + } + vp_reg = vpath->vp_reg; + + status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); + + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_mac_configure(hldev, vp_id); + + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); + + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_tim_configure(hldev, vp_id); + + if (status != VXGE_HW_OK) + goto exit; + + writeq(0, &vp_reg->gendma_int); + + val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl); + + /* Get MRRS value from device control */ + status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); + + if (status == VXGE_HW_OK) { + val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; + val64 &= + ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7)); + val64 |= + VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32); + + val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE; + } + + val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7)); + val64 |= + VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY( + VXGE_HW_MAX_PAYLOAD_SIZE_512); + + val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN; + writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl); + +exit: + return status; +} + +/* + * __vxge_hw_vp_initialize - Initialize Virtual Path structure + * This routine is the initial phase of init which resets the vpath and + * initializes the software support structures. + */ +enum vxge_hw_status +__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, + struct vxge_hw_vp_config *config) +{ + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { + status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; + goto exit; + } + + vpath = &hldev->virtual_paths[vp_id]; + + vpath->vp_id = vp_id; + vpath->vp_open = VXGE_HW_VP_OPEN; + vpath->hldev = hldev; + vpath->vp_config = config; + vpath->vp_reg = hldev->vpath_reg[vp_id]; + vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id]; + + __vxge_hw_vpath_reset(hldev, vp_id); + + status = __vxge_hw_vpath_reset_check(vpath); + + if (status != VXGE_HW_OK) { + memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); + goto exit; + } + + status = __vxge_hw_vpath_mgmt_read(hldev, vpath); + + if (status != VXGE_HW_OK) { + memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); + goto exit; + } + + INIT_LIST_HEAD(&vpath->vpath_handles); + + vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id]; + + VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0, + hldev->tim_int_mask1, vp_id); + + status = __vxge_hw_vpath_initialize(hldev, vp_id); + + if (status != VXGE_HW_OK) + __vxge_hw_vp_terminate(hldev, vp_id); +exit: + return status; +} + +/* + * __vxge_hw_vp_terminate - Terminate Virtual Path structure + * This routine closes all channels it opened and freeup memory + */ +void +__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) +{ + struct __vxge_hw_virtualpath *vpath; + + vpath = &hldev->virtual_paths[vp_id]; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) + goto exit; + + VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, + vpath->hldev->tim_int_mask1, vpath->vp_id); + hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; + + memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); +exit: + return; +} + +/* + * vxge_hw_vpath_mtu_set - Set MTU. + * Set new MTU value. Example, to use jumbo frames: + * vxge_hw_vpath_mtu_set(my_device, 9600); + */ +enum vxge_hw_status +vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + vpath = vp->vpath; + + new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE; + + if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu)) + status = VXGE_HW_ERR_INVALID_MTU_SIZE; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); + val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu); + + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + + vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE; + +exit: + return status; +} + +/* + * vxge_hw_vpath_open - Open a virtual path on a given adapter + * This function is used to open access to virtual path of an + * adapter for offload, GRO operations. This function returns + * synchronously. + */ +enum vxge_hw_status +vxge_hw_vpath_open(struct __vxge_hw_device *hldev, + struct vxge_hw_vpath_attr *attr, + struct __vxge_hw_vpath_handle **vpath_handle) +{ + struct __vxge_hw_virtualpath *vpath; + struct __vxge_hw_vpath_handle *vp; + enum vxge_hw_status status; + + vpath = &hldev->virtual_paths[attr->vp_id]; + + if (vpath->vp_open == VXGE_HW_VP_OPEN) { + status = VXGE_HW_ERR_INVALID_STATE; + goto vpath_open_exit1; + } + + status = __vxge_hw_vp_initialize(hldev, attr->vp_id, + &hldev->config.vp_config[attr->vp_id]); + + if (status != VXGE_HW_OK) + goto vpath_open_exit1; + + vp = (struct __vxge_hw_vpath_handle *) + vmalloc(sizeof(struct __vxge_hw_vpath_handle)); + if (vp == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto vpath_open_exit2; + } + + memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle)); + + vp->vpath = vpath; + + if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { + status = __vxge_hw_fifo_create(vp, &attr->fifo_attr); + if (status != VXGE_HW_OK) + goto vpath_open_exit6; + } + + if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) { + status = __vxge_hw_ring_create(vp, &attr->ring_attr); + if (status != VXGE_HW_OK) + goto vpath_open_exit7; + + __vxge_hw_vpath_prc_configure(hldev, attr->vp_id); + } + + vpath->fifoh->tx_intr_num = + (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) + + VXGE_HW_VPATH_INTR_TX; + + vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, + VXGE_HW_BLOCK_SIZE); + + if (vpath->stats_block == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto vpath_open_exit8; + } + + vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath-> + stats_block->memblock; + memset(vpath->hw_stats, 0, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] = + vpath->hw_stats; + + vpath->hw_stats_sav = + &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id]; + memset(vpath->hw_stats_sav, 0, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg); + + status = vxge_hw_vpath_stats_enable(vp); + if (status != VXGE_HW_OK) + goto vpath_open_exit8; + + list_add(&vp->item, &vpath->vpath_handles); + + hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id); + + *vpath_handle = vp; + + attr->fifo_attr.userdata = vpath->fifoh; + attr->ring_attr.userdata = vpath->ringh; + + return VXGE_HW_OK; + +vpath_open_exit8: + if (vpath->ringh != NULL) + __vxge_hw_ring_delete(vp); +vpath_open_exit7: + if (vpath->fifoh != NULL) + __vxge_hw_fifo_delete(vp); +vpath_open_exit6: + vfree(vp); +vpath_open_exit2: + __vxge_hw_vp_terminate(hldev, attr->vp_id); +vpath_open_exit1: + + return status; +} + +/** + * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath + * (vpath) open + * @vp: Handle got from previous vpath open + * + * This function is used to close access to virtual path opened + * earlier. + */ +void +vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_virtualpath *vpath = NULL; + u64 new_count, val64, val164; + struct __vxge_hw_ring *ring; + + vpath = vp->vpath; + ring = vpath->ringh; + + new_count = readq(&vpath->vp_reg->rxdmem_size); + new_count &= 0x1fff; + val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count)); + + writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), + &vpath->vp_reg->prc_rxd_doorbell); + readl(&vpath->vp_reg->prc_rxd_doorbell); + + val164 /= 2; + val64 = readq(&vpath->vp_reg->prc_cfg6); + val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64); + val64 &= 0x1ff; + + /* + * Each RxD is of 4 qwords + */ + new_count -= (val64 + 1); + val64 = min(val164, new_count) / 4; + + ring->rxds_limit = min(ring->rxds_limit, val64); + if (ring->rxds_limit < 4) + ring->rxds_limit = 4; +} + +/* + * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open + * This function is used to close access to virtual path opened + * earlier. + */ +enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_virtualpath *vpath = NULL; + struct __vxge_hw_device *devh = NULL; + u32 vp_id = vp->vpath->vp_id; + u32 is_empty = TRUE; + enum vxge_hw_status status = VXGE_HW_OK; + + vpath = vp->vpath; + devh = vpath->hldev; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto vpath_close_exit; + } + + list_del(&vp->item); + + if (!list_empty(&vpath->vpath_handles)) { + list_add(&vp->item, &vpath->vpath_handles); + is_empty = FALSE; + } + + if (!is_empty) { + status = VXGE_HW_FAIL; + goto vpath_close_exit; + } + + devh->vpaths_deployed &= ~vxge_mBIT(vp_id); + + if (vpath->ringh != NULL) + __vxge_hw_ring_delete(vp); + + if (vpath->fifoh != NULL) + __vxge_hw_fifo_delete(vp); + + if (vpath->stats_block != NULL) + __vxge_hw_blockpool_block_free(devh, vpath->stats_block); + + vfree(vp); + + __vxge_hw_vp_terminate(devh, vp_id); + + vpath->vp_open = VXGE_HW_VP_NOT_OPEN; + +vpath_close_exit: + return status; +} + +/* + * vxge_hw_vpath_reset - Resets vpath + * This function is used to request a reset of vpath + */ +enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp) +{ + enum vxge_hw_status status; + u32 vp_id; + struct __vxge_hw_virtualpath *vpath = vp->vpath; + + vp_id = vpath->vp_id; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + status = __vxge_hw_vpath_reset(vpath->hldev, vp_id); + if (status == VXGE_HW_OK) + vpath->sw_stats->soft_reset_cnt++; +exit: + return status; +} + +/* + * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize. + * This function poll's for the vpath reset completion and re initializes + * the vpath. + */ +enum vxge_hw_status +vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_virtualpath *vpath = NULL; + enum vxge_hw_status status; + struct __vxge_hw_device *hldev; + u32 vp_id; + + vp_id = vp->vpath->vp_id; + vpath = vp->vpath; + hldev = vpath->hldev; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + status = __vxge_hw_vpath_reset_check(vpath); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_sw_reset(hldev, vp_id); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_initialize(hldev, vp_id); + if (status != VXGE_HW_OK) + goto exit; + + if (vpath->ringh != NULL) + __vxge_hw_vpath_prc_configure(hldev, vp_id); + + memset(vpath->hw_stats, 0, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + memset(vpath->hw_stats_sav, 0, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + writeq(vpath->stats_block->dma_addr, + &vpath->vp_reg->stats_cfg); + + status = vxge_hw_vpath_stats_enable(vp); + +exit: + return status; +} + +/* + * vxge_hw_vpath_enable - Enable vpath. + * This routine clears the vpath reset thereby enabling a vpath + * to start forwarding frames and generating interrupts. + */ +void +vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_device *hldev; + u64 val64; + + hldev = vp->vpath->hldev; + + val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET( + 1 << (16 - vp->vpath->vp_id)); + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), + &hldev->common_reg->cmn_rsthdlr_cfg1); +} + +/* + * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. + * Enable the DMA vpath statistics. The function is to be called to re-enable + * the adapter to update stats into the host memory + */ +enum vxge_hw_status +vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + + vpath = vp->vpath; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + memcpy(vpath->hw_stats_sav, vpath->hw_stats, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); +exit: + return status; +} + +/* + * __vxge_hw_vpath_stats_access - Get the statistics from the given location + * and offset and perform an operation + */ +enum vxge_hw_status +__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, + u32 operation, u32 offset, u64 *stat) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto vpath_stats_access_exit; + } + + vp_reg = vpath->vp_reg; + + val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | + VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | + VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); + + status = __vxge_hw_pio_mem_write64(val64, + &vp_reg->xmac_stats_access_cmd, + VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, + vpath->hldev->config.device_poll_millis); + + if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) + *stat = readq(&vp_reg->xmac_stats_access_data); + else + *stat = 0; + +vpath_stats_access_exit: + return status; +} + +/* + * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath + */ +enum vxge_hw_status +__vxge_hw_vpath_xmac_tx_stats_get( + struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) +{ + u64 *val64; + int i; + u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = (u64 *) vpath_tx_stats; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { + status = __vxge_hw_vpath_stats_access(vpath, + VXGE_HW_STATS_OP_READ, + offset, val64); + if (status != VXGE_HW_OK) + goto exit; + offset++; + val64++; + } +exit: + return status; +} + +/* + * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath + */ +enum vxge_hw_status +__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) +{ + u64 *val64; + enum vxge_hw_status status = VXGE_HW_OK; + int i; + u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; + val64 = (u64 *) vpath_rx_stats; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { + status = __vxge_hw_vpath_stats_access(vpath, + VXGE_HW_STATS_OP_READ, + offset >> 3, val64); + if (status != VXGE_HW_OK) + goto exit; + + offset += 8; + val64++; + } +exit: + return status; +} + +/* + * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. + */ +enum vxge_hw_status __vxge_hw_vpath_stats_get( + struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_vpath_stats_hw_info *hw_stats) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + vp_reg = vpath->vp_reg; + + val64 = readq(&vp_reg->vpath_debug_stats0); + hw_stats->ini_num_mwr_sent = + (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); + + val64 = readq(&vp_reg->vpath_debug_stats1); + hw_stats->ini_num_mrd_sent = + (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); + + val64 = readq(&vp_reg->vpath_debug_stats2); + hw_stats->ini_num_cpl_rcvd = + (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); + + val64 = readq(&vp_reg->vpath_debug_stats3); + hw_stats->ini_num_mwr_byte_sent = + VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); + + val64 = readq(&vp_reg->vpath_debug_stats4); + hw_stats->ini_num_cpl_byte_rcvd = + VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); + + val64 = readq(&vp_reg->vpath_debug_stats5); + hw_stats->wrcrdtarb_xoff = + (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); + + val64 = readq(&vp_reg->vpath_debug_stats6); + hw_stats->rdcrdtarb_xoff = + (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); + + val64 = readq(&vp_reg->vpath_genstats_count01); + hw_stats->vpath_genstats_count0 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count01); + hw_stats->vpath_genstats_count1 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count23); + hw_stats->vpath_genstats_count2 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count01); + hw_stats->vpath_genstats_count3 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count4); + hw_stats->vpath_genstats_count4 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count5); + hw_stats->vpath_genstats_count5 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( + val64); + + status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); + if (status != VXGE_HW_OK) + goto exit; + + VXGE_HW_VPATH_STATS_PIO_READ( + VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); + + hw_stats->prog_event_vnum0 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); + + hw_stats->prog_event_vnum1 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); + + VXGE_HW_VPATH_STATS_PIO_READ( + VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); + + hw_stats->prog_event_vnum2 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); + + hw_stats->prog_event_vnum3 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); + + val64 = readq(&vp_reg->rx_multi_cast_stats); + hw_stats->rx_multi_cast_frame_discard = + (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); + + val64 = readq(&vp_reg->rx_frm_transferred); + hw_stats->rx_frm_transferred = + (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); + + val64 = readq(&vp_reg->rxd_returned); + hw_stats->rxd_returned = + (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); + + val64 = readq(&vp_reg->dbg_stats_rx_mpa); + hw_stats->rx_mpa_len_fail_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); + hw_stats->rx_mpa_mrk_fail_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); + hw_stats->rx_mpa_crc_fail_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); + + val64 = readq(&vp_reg->dbg_stats_rx_fau); + hw_stats->rx_permitted_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); + hw_stats->rx_vp_reset_discarded_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); + hw_stats->rx_wol_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); + + val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); + hw_stats->tx_vp_reset_discarded_frms = + (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( + val64); +exit: + return status; +} + +/* + * __vxge_hw_blockpool_create - Create block pool + */ + +enum vxge_hw_status +__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, + struct __vxge_hw_blockpool *blockpool, + u32 pool_size, + u32 pool_max) +{ + u32 i; + struct __vxge_hw_blockpool_entry *entry = NULL; + void *memblock; + dma_addr_t dma_addr; + struct pci_dev *dma_handle; + struct pci_dev *acc_handle; + enum vxge_hw_status status = VXGE_HW_OK; + + if (blockpool == NULL) { + status = VXGE_HW_FAIL; + goto blockpool_create_exit; + } + + blockpool->hldev = hldev; + blockpool->block_size = VXGE_HW_BLOCK_SIZE; + blockpool->pool_size = 0; + blockpool->pool_max = pool_max; + blockpool->req_out = 0; + + INIT_LIST_HEAD(&blockpool->free_block_list); + INIT_LIST_HEAD(&blockpool->free_entry_list); + + for (i = 0; i < pool_size + pool_max; i++) { + entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), + GFP_KERNEL); + if (entry == NULL) { + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + list_add(&entry->item, &blockpool->free_entry_list); + } + + for (i = 0; i < pool_size; i++) { + + memblock = vxge_os_dma_malloc( + hldev->pdev, + VXGE_HW_BLOCK_SIZE, + &dma_handle, + &acc_handle); + + if (memblock == NULL) { + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + + dma_addr = pci_map_single(hldev->pdev, memblock, + VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); + + if (unlikely(pci_dma_mapping_error(hldev->pdev, + dma_addr))) { + + vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + + if (!list_empty(&blockpool->free_entry_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_entry_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry == NULL) + entry = + kzalloc(sizeof(struct __vxge_hw_blockpool_entry), + GFP_KERNEL); + if (entry != NULL) { + list_del(&entry->item); + entry->length = VXGE_HW_BLOCK_SIZE; + entry->memblock = memblock; + entry->dma_addr = dma_addr; + entry->acc_handle = acc_handle; + entry->dma_handle = dma_handle; + list_add(&entry->item, + &blockpool->free_block_list); + blockpool->pool_size++; + } else { + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + } + +blockpool_create_exit: + return status; +} + +/* + * __vxge_hw_blockpool_destroy - Deallocates the block pool + */ + +void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) +{ + + struct __vxge_hw_device *hldev; + struct list_head *p, *n; + u16 ret; + + if (blockpool == NULL) { + ret = 1; + goto exit; + } + + hldev = blockpool->hldev; + + list_for_each_safe(p, n, &blockpool->free_block_list) { + + pci_unmap_single(hldev->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + PCI_DMA_BIDIRECTIONAL); + + vxge_os_dma_free(hldev->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->memblock, + &((struct __vxge_hw_blockpool_entry *) p)->acc_handle); + + list_del( + &((struct __vxge_hw_blockpool_entry *)p)->item); + kfree(p); + blockpool->pool_size--; + } + + list_for_each_safe(p, n, &blockpool->free_entry_list) { + list_del( + &((struct __vxge_hw_blockpool_entry *)p)->item); + kfree((void *)p); + } + ret = 0; +exit: + return; +} + +/* + * __vxge_hw_blockpool_blocks_add - Request additional blocks + */ +static +void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) +{ + u32 nreq = 0, i; + + if ((blockpool->pool_size + blockpool->req_out) < + VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { + nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; + blockpool->req_out += nreq; + } + + for (i = 0; i < nreq; i++) + vxge_os_dma_malloc_async( + ((struct __vxge_hw_device *)blockpool->hldev)->pdev, + blockpool->hldev, VXGE_HW_BLOCK_SIZE); +} + +/* + * __vxge_hw_blockpool_blocks_remove - Free additional blocks + */ +static +void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) +{ + struct list_head *p, *n; + + list_for_each_safe(p, n, &blockpool->free_block_list) { + + if (blockpool->pool_size < blockpool->pool_max) + break; + + pci_unmap_single( + ((struct __vxge_hw_device *)blockpool->hldev)->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + PCI_DMA_BIDIRECTIONAL); + + vxge_os_dma_free( + ((struct __vxge_hw_device *)blockpool->hldev)->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->memblock, + &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); + + list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); + + list_add(p, &blockpool->free_entry_list); + + blockpool->pool_size--; + + } +} + +/* + * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async + * Adds a block to block pool + */ +void vxge_hw_blockpool_block_add( + struct __vxge_hw_device *devh, + void *block_addr, + u32 length, + struct pci_dev *dma_h, + struct pci_dev *acc_handle) +{ + struct __vxge_hw_blockpool *blockpool; + struct __vxge_hw_blockpool_entry *entry = NULL; + dma_addr_t dma_addr; + enum vxge_hw_status status = VXGE_HW_OK; + u32 req_out; + + blockpool = &devh->block_pool; + + if (block_addr == NULL) { + blockpool->req_out--; + status = VXGE_HW_FAIL; + goto exit; + } + + dma_addr = pci_map_single(devh->pdev, block_addr, length, + PCI_DMA_BIDIRECTIONAL); + + if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { + + vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); + blockpool->req_out--; + status = VXGE_HW_FAIL; + goto exit; + } + + + if (!list_empty(&blockpool->free_entry_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_entry_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry == NULL) + entry = (struct __vxge_hw_blockpool_entry *) + vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); + else + list_del(&entry->item); + + if (entry != NULL) { + entry->length = length; + entry->memblock = block_addr; + entry->dma_addr = dma_addr; + entry->acc_handle = acc_handle; + entry->dma_handle = dma_h; + list_add(&entry->item, &blockpool->free_block_list); + blockpool->pool_size++; + status = VXGE_HW_OK; + } else + status = VXGE_HW_ERR_OUT_OF_MEMORY; + + blockpool->req_out--; + + req_out = blockpool->req_out; +exit: + return; +} + +/* + * __vxge_hw_blockpool_malloc - Allocate a memory block from pool + * Allocates a block of memory of given size, either from block pool + * or by calling vxge_os_dma_malloc() + */ +void * +__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, + struct vxge_hw_mempool_dma *dma_object) +{ + struct __vxge_hw_blockpool_entry *entry = NULL; + struct __vxge_hw_blockpool *blockpool; + void *memblock = NULL; + enum vxge_hw_status status = VXGE_HW_OK; + + blockpool = &devh->block_pool; + + if (size != blockpool->block_size) { + + memblock = vxge_os_dma_malloc(devh->pdev, size, + &dma_object->handle, + &dma_object->acc_handle); + + if (memblock == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + dma_object->addr = pci_map_single(devh->pdev, memblock, size, + PCI_DMA_BIDIRECTIONAL); + + if (unlikely(pci_dma_mapping_error(devh->pdev, + dma_object->addr))) { + vxge_os_dma_free(devh->pdev, memblock, + &dma_object->acc_handle); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + } else { + + if (!list_empty(&blockpool->free_block_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_block_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry != NULL) { + list_del(&entry->item); + dma_object->addr = entry->dma_addr; + dma_object->handle = entry->dma_handle; + dma_object->acc_handle = entry->acc_handle; + memblock = entry->memblock; + + list_add(&entry->item, + &blockpool->free_entry_list); + blockpool->pool_size--; + } + + if (memblock != NULL) + __vxge_hw_blockpool_blocks_add(blockpool); + } +exit: + return memblock; +} + +/* + * __vxge_hw_blockpool_free - Frees the memory allcoated with + __vxge_hw_blockpool_malloc + */ +void +__vxge_hw_blockpool_free(struct __vxge_hw_device *devh, + void *memblock, u32 size, + struct vxge_hw_mempool_dma *dma_object) +{ + struct __vxge_hw_blockpool_entry *entry = NULL; + struct __vxge_hw_blockpool *blockpool; + enum vxge_hw_status status = VXGE_HW_OK; + + blockpool = &devh->block_pool; + + if (size != blockpool->block_size) { + pci_unmap_single(devh->pdev, dma_object->addr, size, + PCI_DMA_BIDIRECTIONAL); + vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); + } else { + + if (!list_empty(&blockpool->free_entry_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_entry_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry == NULL) + entry = (struct __vxge_hw_blockpool_entry *) + vmalloc(sizeof( + struct __vxge_hw_blockpool_entry)); + else + list_del(&entry->item); + + if (entry != NULL) { + entry->length = size; + entry->memblock = memblock; + entry->dma_addr = dma_object->addr; + entry->acc_handle = dma_object->acc_handle; + entry->dma_handle = dma_object->handle; + list_add(&entry->item, + &blockpool->free_block_list); + blockpool->pool_size++; + status = VXGE_HW_OK; + } else + status = VXGE_HW_ERR_OUT_OF_MEMORY; + + if (status == VXGE_HW_OK) + __vxge_hw_blockpool_blocks_remove(blockpool); + } + + return; +} + +/* + * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool + * This function allocates a block from block pool or from the system + */ +struct __vxge_hw_blockpool_entry * +__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) +{ + struct __vxge_hw_blockpool_entry *entry = NULL; + struct __vxge_hw_blockpool *blockpool; + + blockpool = &devh->block_pool; + + if (size == blockpool->block_size) { + + if (!list_empty(&blockpool->free_block_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_block_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry != NULL) { + list_del(&entry->item); + blockpool->pool_size--; + } + } + + if (entry != NULL) + __vxge_hw_blockpool_blocks_add(blockpool); + + return entry; +} + +/* + * __vxge_hw_blockpool_block_free - Frees a block from block pool + * @devh: Hal device + * @entry: Entry of block to be freed + * + * This function frees a block from block pool + */ +void +__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, + struct __vxge_hw_blockpool_entry *entry) +{ + struct __vxge_hw_blockpool *blockpool; + + blockpool = &devh->block_pool; + + if (entry->length == blockpool->block_size) { + list_add(&entry->item, &blockpool->free_block_list); + blockpool->pool_size++; + } + + __vxge_hw_blockpool_blocks_remove(blockpool); + + return; +} diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h new file mode 100644 index 00000000000..afbdf6f4d22 --- /dev/null +++ b/drivers/net/vxge/vxge-config.h @@ -0,0 +1,2259 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-config.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2009 Neterion Inc. + ******************************************************************************/ +#ifndef VXGE_CONFIG_H +#define VXGE_CONFIG_H +#include <linux/list.h> + +#ifndef VXGE_CACHE_LINE_SIZE +#define VXGE_CACHE_LINE_SIZE 128 +#endif + +#define vxge_os_vaprintf(level, mask, fmt, ...) { \ + char buff[255]; \ + snprintf(buff, 255, fmt, __VA_ARGS__); \ + printk(buff); \ + printk("\n"); \ +} + +#ifndef VXGE_ALIGN +#define VXGE_ALIGN(adrs, size) \ + (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) +#endif + +#define VXGE_HW_MIN_MTU 68 +#define VXGE_HW_MAX_MTU 9600 +#define VXGE_HW_DEFAULT_MTU 1500 + +#ifdef VXGE_DEBUG_ASSERT + +/** + * vxge_assert + * @test: C-condition to check + * @fmt: printf like format string + * + * This function implements traditional assert. By default assertions + * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in + * compilation + * time. + */ +#define vxge_assert(test) { \ + if (!(test)) \ + vxge_os_bug("bad cond: "#test" at %s:%d\n", \ + __FILE__, __LINE__); } +#else +#define vxge_assert(test) +#endif /* end of VXGE_DEBUG_ASSERT */ + +/** + * enum enum vxge_debug_level + * @VXGE_NONE: debug disabled + * @VXGE_ERR: all errors going to be logged out + * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs + * going to be logged out. Very noisy. + * + * This enumeration going to be used to switch between different + * debug levels during runtime if DEBUG macro defined during + * compilation. If DEBUG macro not defined than code will be + * compiled out. + */ +enum vxge_debug_level { + VXGE_NONE = 0, + VXGE_TRACE = 1, + VXGE_ERR = 2 +}; + +#define NULL_VPID 0xFFFFFFFF +#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL +#define VXGE_DEBUG_MODULE_MASK 0xffffffff +#define VXGE_DEBUG_TRACE_MASK 0xffffffff +#define VXGE_DEBUG_ERR_MASK 0xffffffff +#define VXGE_DEBUG_MASK 0x000001ff +#else +#define VXGE_DEBUG_MODULE_MASK 0x20000000 +#define VXGE_DEBUG_TRACE_MASK 0x20000000 +#define VXGE_DEBUG_ERR_MASK 0x20000000 +#define VXGE_DEBUG_MASK 0x00000001 +#endif + +/* + * @VXGE_COMPONENT_LL: do debug for vxge link layer module + * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions + * + * This enumeration going to be used to distinguish modules + * or libraries during compilation and runtime. Makefile must declare + * VXGE_DEBUG_MODULE_MASK macro and set it to proper value. + */ +#define VXGE_COMPONENT_LL 0x20000000 +#define VXGE_COMPONENT_ALL 0xffffffff + +#define VXGE_HW_BASE_INF 100 +#define VXGE_HW_BASE_ERR 200 +#define VXGE_HW_BASE_BADCFG 300 + +enum vxge_hw_status { + VXGE_HW_OK = 0, + VXGE_HW_FAIL = 1, + VXGE_HW_PENDING = 2, + VXGE_HW_COMPLETIONS_REMAIN = 3, + + VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1, + VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2, + + VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1, + VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2, + VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3, + VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4, + VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5, + VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6, + VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7, + VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8, + VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9, + VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10, + VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11, + VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12, + VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13, + VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14, + VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15, + VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16, + VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17, + VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18, + VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19, + VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20, + VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21, + VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22, + + VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1, + VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2, + VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3, + VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4, + VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5, + VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6, + VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7, + + VXGE_HW_EOF_TRACE_BUF = -1 +}; + +/** + * enum enum vxge_hw_device_link_state - Link state enumeration. + * @VXGE_HW_LINK_NONE: Invalid link state. + * @VXGE_HW_LINK_DOWN: Link is down. + * @VXGE_HW_LINK_UP: Link is up. + * + */ +enum vxge_hw_device_link_state { + VXGE_HW_LINK_NONE, + VXGE_HW_LINK_DOWN, + VXGE_HW_LINK_UP +}; + +/** + * struct vxge_hw_device_date - Date Format + * @day: Day + * @month: Month + * @year: Year + * @date: Date in string format + * + * Structure for returning date + */ + +#define VXGE_HW_FW_STRLEN 32 +struct vxge_hw_device_date { + u32 day; + u32 month; + u32 year; + char date[VXGE_HW_FW_STRLEN]; +}; + +struct vxge_hw_device_version { + u32 major; + u32 minor; + u32 build; + char version[VXGE_HW_FW_STRLEN]; +}; + +u64 +__vxge_hw_vpath_pci_func_mode_get( + u32 vp_id, + struct vxge_hw_vpath_reg __iomem *vpath_reg); + +/** + * struct vxge_hw_fifo_config - Configuration of fifo. + * @enable: Is this fifo to be commissioned + * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors) + * blocks per queue. + * @max_frags: Max number of Tx buffers per TxDL (that is, per single + * transmit operation). + * No more than 256 transmit buffers can be specified. + * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size + * bytes. Setting @memblock_size to page size ensures + * by-page allocation of descriptors. 128K bytes is the + * maximum supported block size. + * @alignment_size: per Tx fragment DMA-able memory used to align transmit data + * (e.g., to align on a cache line). + * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL. + * Use 0 otherwise. + * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation, + * which generally improves latency of the host bridge operation + * (see PCI specification). For valid values please refer + * to struct vxge_hw_fifo_config{} in the driver sources. + * Configuration of all Titan fifos. + * Note: Valid (min, max) range for each attribute is specified in the body of + * the struct vxge_hw_fifo_config{} structure. + */ +struct vxge_hw_fifo_config { + u32 enable; +#define VXGE_HW_FIFO_ENABLE 1 +#define VXGE_HW_FIFO_DISABLE 0 + + u32 fifo_blocks; +#define VXGE_HW_MIN_FIFO_BLOCKS 2 +#define VXGE_HW_MAX_FIFO_BLOCKS 128 + + u32 max_frags; +#define VXGE_HW_MIN_FIFO_FRAGS 1 +#define VXGE_HW_MAX_FIFO_FRAGS 256 + + u32 memblock_size; +#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE +#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072 +#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096 + + u32 alignment_size; +#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0 +#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536 +#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE + + u32 intr; +#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1 +#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0 +#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0 + + u32 no_snoop_bits; +#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0 +#define VXGE_HW_FIFO_NO_SNOOP_TXD 1 +#define VXGE_HW_FIFO_NO_SNOOP_FRM 2 +#define VXGE_HW_FIFO_NO_SNOOP_ALL 3 +#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0 + +}; +/** + * struct vxge_hw_ring_config - Ring configurations. + * @enable: Is this ring to be commissioned + * @ring_blocks: Numbers of RxD blocks in the ring + * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer + * to Titan User Guide. + * @scatter_mode: Titan supports two receive scatter modes: A and B. + * For details please refer to Titan User Guide. + * @rx_timer_val: The number of 32ns periods that would be counted between two + * timer interrupts. + * @greedy_return: If Set it forces the device to return absolutely all RxD + * that are consumed and still on board when a timer interrupt + * triggers. If Clear, then if the device has already returned + * RxD before current timer interrupt trigerred and after the + * previous timer interrupt triggered, then the device is not + * forced to returned the rest of the consumed RxD that it has + * on board which account for a byte count less than the one + * programmed into PRC_CFG6.RXD_CRXDT field + * @rx_timer_ci: TBD + * @backoff_interval_us: Time (in microseconds), after which Titan + * tries to download RxDs posted by the host. + * Note that the "backoff" does not happen if host posts receive + * descriptors in the timely fashion. + * Ring configuration. + */ +struct vxge_hw_ring_config { + u32 enable; +#define VXGE_HW_RING_ENABLE 1 +#define VXGE_HW_RING_DISABLE 0 +#define VXGE_HW_RING_DEFAULT 1 + + u32 ring_blocks; +#define VXGE_HW_MIN_RING_BLOCKS 1 +#define VXGE_HW_MAX_RING_BLOCKS 128 +#define VXGE_HW_DEF_RING_BLOCKS 2 + + u32 buffer_mode; +#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 +#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3 +#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5 +#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1 + + u32 scatter_mode; +#define VXGE_HW_RING_SCATTER_MODE_A 0 +#define VXGE_HW_RING_SCATTER_MODE_B 1 +#define VXGE_HW_RING_SCATTER_MODE_C 2 +#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff + + u64 rxds_limit; +#define VXGE_HW_DEF_RING_RXDS_LIMIT 44 +}; + +/** + * struct vxge_hw_vp_config - Configuration of virtual path + * @vp_id: Virtual Path Id + * @min_bandwidth: Minimum Guaranteed bandwidth + * @ring: See struct vxge_hw_ring_config{}. + * @fifo: See struct vxge_hw_fifo_config{}. + * @tti: Configuration of interrupt associated with Transmit. + * see struct vxge_hw_tim_intr_config(); + * @rti: Configuration of interrupt associated with Receive. + * see struct vxge_hw_tim_intr_config(); + * @mtu: mtu size used on this port. + * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to + * remove the VLAN tag from all received tagged frames that are not + * replicated at the internal L2 switch. + * 0 - Do not strip the VLAN tag. + * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are + * always placed into the RxDMA descriptor. + * + * This structure is used by the driver to pass the configuration parameters to + * configure Virtual Path. + */ +struct vxge_hw_vp_config { + u32 vp_id; + +#define VXGE_HW_VPATH_PRIORITY_MIN 0 +#define VXGE_HW_VPATH_PRIORITY_MAX 16 +#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0 + + u32 min_bandwidth; +#define VXGE_HW_VPATH_BANDWIDTH_MIN 0 +#define VXGE_HW_VPATH_BANDWIDTH_MAX 100 +#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0 + + struct vxge_hw_ring_config ring; + struct vxge_hw_fifo_config fifo; + struct vxge_hw_tim_intr_config tti; + struct vxge_hw_tim_intr_config rti; + + u32 mtu; +#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU +#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU +#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff + + u32 rpa_strip_vlan_tag; +#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1 +#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0 +#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff + +}; +/** + * struct vxge_hw_device_config - Device configuration. + * @dma_blockpool_initial: Initial size of DMA Pool + * @dma_blockpool_max: Maximum blocks in DMA pool + * @intr_mode: Line, or MSI-X interrupt. + * + * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table). + * @rth_it_type: RTH IT table programming type + * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address + * @vp_config: Configuration for virtual paths + * @device_poll_millis: Specify the interval (in mulliseconds) + * to wait for register reads + * + * Titan configuration. + * Contains per-device configuration parameters, including: + * - stats sampling interval, etc. + * + * In addition, struct vxge_hw_device_config{} includes "subordinate" + * configurations, including: + * - fifos and rings; + * - MAC (done at firmware level). + * + * See Titan User Guide for more details. + * Note: Valid (min, max) range for each attribute is specified in the body of + * the struct vxge_hw_device_config{} structure. Please refer to the + * corresponding include file. + * See also: struct vxge_hw_tim_intr_config{}. + */ +struct vxge_hw_device_config { + u32 dma_blockpool_initial; + u32 dma_blockpool_max; +#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0 +#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0 +#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4 +#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096 + +#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2 + + u32 intr_mode; +#define VXGE_HW_INTR_MODE_IRQLINE 0 +#define VXGE_HW_INTR_MODE_MSIX 1 +#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2 + +#define VXGE_HW_INTR_MODE_DEF 0 + + u32 rth_en; +#define VXGE_HW_RTH_DISABLE 0 +#define VXGE_HW_RTH_ENABLE 1 +#define VXGE_HW_RTH_DEFAULT 0 + + u32 rth_it_type; +#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0 +#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1 +#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0 + + u32 rts_mac_en; +#define VXGE_HW_RTS_MAC_DISABLE 0 +#define VXGE_HW_RTS_MAC_ENABLE 1 +#define VXGE_HW_RTS_MAC_DEFAULT 0 + + struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS]; + + u32 device_poll_millis; +#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1 +#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000 +#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000 + +}; + +/** + * function vxge_uld_link_up_f - Link-Up callback provided by driver. + * @devh: HW device handle. + * Link-up notification callback provided by the driver. + * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. + * + * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{}, + * vxge_hw_driver_initialize(). + */ + +/** + * function vxge_uld_link_down_f - Link-Down callback provided by + * driver. + * @devh: HW device handle. + * + * Link-Down notification callback provided by the driver. + * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. + * + * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{}, + * vxge_hw_driver_initialize(). + */ + +/** + * function vxge_uld_crit_err_f - Critical Error notification callback. + * @devh: HW device handle. + * (typically - at HW device iinitialization time). + * @type: Enumerated hw error, e.g.: double ECC. + * @serr_data: Titan status. + * @ext_data: Extended data. The contents depends on the @type. + * + * Link-Down notification callback provided by the driver. + * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. + * + * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{}, + * vxge_hw_driver_initialize(). + */ + +/** + * struct vxge_hw_uld_cbs - driver "slow-path" callbacks. + * @link_up: See vxge_uld_link_up_f{}. + * @link_down: See vxge_uld_link_down_f{}. + * @crit_err: See vxge_uld_crit_err_f{}. + * + * Driver slow-path (per-driver) callbacks. + * Implemented by driver and provided to HW via + * vxge_hw_driver_initialize(). + * Note that these callbacks are not mandatory: HW will not invoke + * a callback if NULL is specified. + * + * See also: vxge_hw_driver_initialize(). + */ +struct vxge_hw_uld_cbs { + + void (*link_up)(struct __vxge_hw_device *devh); + void (*link_down)(struct __vxge_hw_device *devh); + void (*crit_err)(struct __vxge_hw_device *devh, + enum vxge_hw_event type, u64 ext_data); +}; + +/* + * struct __vxge_hw_blockpool_entry - Block private data structure + * @item: List header used to link. + * @length: Length of the block + * @memblock: Virtual address block + * @dma_addr: DMA Address of the block. + * @dma_handle: DMA handle of the block. + * @acc_handle: DMA acc handle + * + * Block is allocated with a header to put the blocks into list. + * + */ +struct __vxge_hw_blockpool_entry { + struct list_head item; + u32 length; + void *memblock; + dma_addr_t dma_addr; + struct pci_dev *dma_handle; + struct pci_dev *acc_handle; +}; + +/* + * struct __vxge_hw_blockpool - Block Pool + * @hldev: HW device + * @block_size: size of each block. + * @Pool_size: Number of blocks in the pool + * @pool_max: Maximum number of blocks above which to free additional blocks + * @req_out: Number of block requests with OS out standing + * @free_block_list: List of free blocks + * + * Block pool contains the DMA blocks preallocated. + * + */ +struct __vxge_hw_blockpool { + struct __vxge_hw_device *hldev; + u32 block_size; + u32 pool_size; + u32 pool_max; + u32 req_out; + struct list_head free_block_list; + struct list_head free_entry_list; +}; + +/* + * enum enum __vxge_hw_channel_type - Enumerated channel types. + * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel. + * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo. + * @VXGE_HW_CHANNEL_TYPE_RING: ring. + * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported + * (and recognized) channel types. Currently: 2. + * + * Enumerated channel types. Currently there are only two link-layer + * channels - Titan fifo and Titan ring. In the future the list will grow. + */ +enum __vxge_hw_channel_type { + VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0, + VXGE_HW_CHANNEL_TYPE_FIFO = 1, + VXGE_HW_CHANNEL_TYPE_RING = 2, + VXGE_HW_CHANNEL_TYPE_MAX = 3 +}; + +/* + * struct __vxge_hw_channel + * @item: List item; used to maintain a list of open channels. + * @type: Channel type. See enum vxge_hw_channel_type{}. + * @devh: Device handle. HW device object that contains _this_ channel. + * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel. + * @length: Channel length. Currently allocated number of descriptors. + * The channel length "grows" when more descriptors get allocated. + * See _hw_mempool_grow. + * @reserve_arr: Reserve array. Contains descriptors that can be reserved + * by driver for the subsequent send or receive operation. + * See vxge_hw_fifo_txdl_reserve(), + * vxge_hw_ring_rxd_reserve(). + * @reserve_ptr: Current pointer in the resrve array + * @reserve_top: Reserve top gives the maximum number of dtrs available in + * reserve array. + * @work_arr: Work array. Contains descriptors posted to the channel. + * Note that at any point in time @work_arr contains 3 types of + * descriptors: + * 1) posted but not yet consumed by Titan device; + * 2) consumed but not yet completed; + * 3) completed but not yet freed + * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free()) + * @post_index: Post index. At any point in time points on the + * position in the channel, which'll contain next to-be-posted + * descriptor. + * @compl_index: Completion index. At any point in time points on the + * position in the channel, which will contain next + * to-be-completed descriptor. + * @free_arr: Free array. Contains completed descriptors that were freed + * (i.e., handed over back to HW) by driver. + * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free(). + * @free_ptr: current pointer in free array + * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize + * to store per-operation control information. + * @stats: Pointer to common statistics + * @userdata: Per-channel opaque (void*) user-defined context, which may be + * driver object, ULP connection, etc. + * Once channel is open, @userdata is passed back to user via + * vxge_hw_channel_callback_f. + * + * HW channel object. + * + * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag + */ +struct __vxge_hw_channel { + struct list_head item; + enum __vxge_hw_channel_type type; + struct __vxge_hw_device *devh; + struct __vxge_hw_vpath_handle *vph; + u32 length; + u32 vp_id; + void **reserve_arr; + u32 reserve_ptr; + u32 reserve_top; + void **work_arr; + u32 post_index ____cacheline_aligned; + u32 compl_index ____cacheline_aligned; + void **free_arr; + u32 free_ptr; + void **orig_arr; + u32 per_dtr_space; + void *userdata; + struct vxge_hw_common_reg __iomem *common_reg; + u32 first_vp_id; + struct vxge_hw_vpath_stats_sw_common_info *stats; + +} ____cacheline_aligned; + +/* + * struct __vxge_hw_virtualpath - Virtual Path + * + * @vp_id: Virtual path id + * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver + * @hldev: Hal device + * @vp_config: Virtual Path Config + * @vp_reg: VPATH Register map address in BAR0 + * @vpmgmt_reg: VPATH_MGMT register map address + * @max_mtu: Max mtu that can be supported + * @vsport_number: vsport attached to this vpath + * @max_kdfc_db: Maximum kernel mode doorbells + * @max_nofl_db: Maximum non offload doorbells + * @tx_intr_num: Interrupt Number associated with the TX + + * @ringh: Ring Queue + * @fifoh: FIFO Queue + * @vpath_handles: Virtual Path handles list + * @stats_block: Memory for DMAing stats + * @stats: Vpath statistics + * + * Virtual path structure to encapsulate the data related to a virtual path. + * Virtual paths are allocated by the HW upon getting configuration from the + * driver and inserted into the list of virtual paths. + */ +struct __vxge_hw_virtualpath { + u32 vp_id; + + u32 vp_open; +#define VXGE_HW_VP_NOT_OPEN 0 +#define VXGE_HW_VP_OPEN 1 + + struct __vxge_hw_device *hldev; + struct vxge_hw_vp_config *vp_config; + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; + struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db; + + u32 max_mtu; + u32 vsport_number; + u32 max_kdfc_db; + u32 max_nofl_db; + + struct __vxge_hw_ring *____cacheline_aligned ringh; + struct __vxge_hw_fifo *____cacheline_aligned fifoh; + struct list_head vpath_handles; + struct __vxge_hw_blockpool_entry *stats_block; + struct vxge_hw_vpath_stats_hw_info *hw_stats; + struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; + struct vxge_hw_vpath_stats_sw_info *sw_stats; +}; + +/* + * struct __vxge_hw_vpath_handle - List item to store callback information + * @item: List head to keep the item in linked list + * @vpath: Virtual path to which this item belongs + * + * This structure is used to store the callback information. + */ +struct __vxge_hw_vpath_handle{ + struct list_head item; + struct __vxge_hw_virtualpath *vpath; +}; + +/* + * struct __vxge_hw_device + * + * HW device object. + */ +/** + * struct __vxge_hw_device - Hal device object + * @magic: Magic Number + * @device_id: PCI Device Id of the adapter + * @major_revision: PCI Device major revision + * @minor_revision: PCI Device minor revision + * @bar0: BAR0 virtual address. + * @bar1: BAR1 virtual address. + * @bar2: BAR2 virtual address. + * @pdev: Physical device handle + * @config: Confguration passed by the LL driver at initialization + * @link_state: Link state + * + * HW device object. Represents Titan adapter + */ +struct __vxge_hw_device { + u32 magic; +#define VXGE_HW_DEVICE_MAGIC 0x12345678 +#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD + u16 device_id; + u8 major_revision; + u8 minor_revision; + void __iomem *bar0; + void __iomem *bar1; + void __iomem *bar2; + struct pci_dev *pdev; + struct net_device *ndev; + struct vxge_hw_device_config config; + enum vxge_hw_device_link_state link_state; + + struct vxge_hw_uld_cbs uld_callbacks; + + u32 host_type; + u32 func_id; + u32 access_rights; +#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1 +#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2 +#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4 + struct vxge_hw_legacy_reg __iomem *legacy_reg; + struct vxge_hw_toc_reg __iomem *toc_reg; + struct vxge_hw_common_reg __iomem *common_reg; + struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; + struct vxge_hw_srpcim_reg __iomem *srpcim_reg \ + [VXGE_HW_TITAN_SRPCIM_REG_SPACES]; + struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \ + [VXGE_HW_TITAN_VPMGMT_REG_SPACES]; + struct vxge_hw_vpath_reg __iomem *vpath_reg \ + [VXGE_HW_TITAN_VPATH_REG_SPACES]; + u8 __iomem *kdfc; + u8 __iomem *usdc; + struct __vxge_hw_virtualpath virtual_paths \ + [VXGE_HW_MAX_VIRTUAL_PATHS]; + u64 vpath_assignments; + u64 vpaths_deployed; + u32 first_vp_id; + u64 tim_int_mask0[4]; + u32 tim_int_mask1[4]; + + struct __vxge_hw_blockpool block_pool; + struct vxge_hw_device_stats stats; + u32 debug_module_mask; + u32 debug_level; + u32 level_err; + u32 level_trace; +}; + +#define VXGE_HW_INFO_LEN 64 +/** + * struct vxge_hw_device_hw_info - Device information + * @host_type: Host Type + * @func_id: Function Id + * @vpath_mask: vpath bit mask + * @fw_version: Firmware version + * @fw_date: Firmware Date + * @flash_version: Firmware version + * @flash_date: Firmware Date + * @mac_addrs: Mac addresses for each vpath + * @mac_addr_masks: Mac address masks for each vpath + * + * Returns the vpath mask that has the bits set for each vpath allocated + * for the driver and the first mac address for each vpath + */ +struct vxge_hw_device_hw_info { + u32 host_type; +#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0 +#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1 +#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2 +#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3 +#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4 +#define VXGE_HW_SR_VH_FUNCTION0 5 +#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6 +#define VXGE_HW_VH_NORMAL_FUNCTION 7 + u64 function_mode; +#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0 +#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1 +#define VXGE_HW_FUNCTION_MODE_SRIOV 2 +#define VXGE_HW_FUNCTION_MODE_MRIOV 3 + u32 func_id; + u64 vpath_mask; + struct vxge_hw_device_version fw_version; + struct vxge_hw_device_date fw_date; + struct vxge_hw_device_version flash_version; + struct vxge_hw_device_date flash_date; + u8 serial_number[VXGE_HW_INFO_LEN]; + u8 part_number[VXGE_HW_INFO_LEN]; + u8 product_desc[VXGE_HW_INFO_LEN]; + u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; + u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; +}; + +/** + * struct vxge_hw_device_attr - Device memory spaces. + * @bar0: BAR0 virtual address. + * @bar1: BAR1 virtual address. + * @bar2: BAR2 virtual address. + * @pdev: PCI device object. + * + * Device memory spaces. Includes configuration, BAR0, BAR1, etc. per device + * mapped memories. Also, includes a pointer to OS-specific PCI device object. + */ +struct vxge_hw_device_attr { + void __iomem *bar0; + void __iomem *bar1; + void __iomem *bar2; + struct pci_dev *pdev; + struct vxge_hw_uld_cbs uld_callbacks; +}; + +#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls) + +#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \ + if (i < 16) { \ + m0[0] |= vxge_vBIT(0x8, (i*4), 4); \ + m0[1] |= vxge_vBIT(0x4, (i*4), 4); \ + } \ + else { \ + m1[0] = 0x80000000; \ + m1[1] = 0x40000000; \ + } \ +} + +#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \ + if (i < 16) { \ + m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \ + m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \ + } \ + else { \ + m1[0] = 0; \ + m1[1] = 0; \ + } \ +} + +#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \ + status = vxge_hw_mrpcim_stats_access(hldev, \ + VXGE_HW_STATS_OP_READ, \ + loc, \ + offset, \ + &val64); \ + \ + if (status != VXGE_HW_OK) \ + return status; \ +} + +#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ + status = __vxge_hw_vpath_stats_access(vpath, \ + VXGE_HW_STATS_OP_READ, \ + offset, \ + &val64); \ + if (status != VXGE_HW_OK) \ + return status; \ +} + +/* + * struct __vxge_hw_ring - Ring channel. + * @channel: Channel "base" of this ring, the common part of all HW + * channels. + * @mempool: Memory pool, the pool from which descriptors get allocated. + * (See vxge_hw_mm.h). + * @config: Ring configuration, part of device configuration + * (see struct vxge_hw_device_config{}). + * @ring_length: Length of the ring + * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode, + * as per Titan User Guide. + * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec, + * 1-buffer mode descriptor is 32 byte long, etc. + * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep + * per-descriptor data (e.g., DMA handle for Solaris) + * @per_rxd_space: Per rxd space requested by driver + * @rxds_per_block: Number of descriptors per hardware-defined RxD + * block. Depends on the (1-, 3-, 5-) buffer mode. + * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal + * usage. Not to confuse with @rxd_priv_size. + * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR. + * @callback: Channel completion callback. HW invokes the callback when there + * are new completions on that channel. In many implementations + * the @callback executes in the hw interrupt context. + * @rxd_init: Channel's descriptor-initialize callback. + * See vxge_hw_ring_rxd_init_f{}. + * If not NULL, HW invokes the callback when opening + * the ring. + * @rxd_term: Channel's descriptor-terminate callback. If not NULL, + * HW invokes the callback when closing the corresponding channel. + * See also vxge_hw_channel_rxd_term_f{}. + * @stats: Statistics for ring + * Ring channel. + * + * Note: The structure is cache line aligned to better utilize + * CPU cache performance. + */ +struct __vxge_hw_ring { + struct __vxge_hw_channel channel; + struct vxge_hw_mempool *mempool; + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct vxge_hw_common_reg __iomem *common_reg; + u32 ring_length; + u32 buffer_mode; + u32 rxd_size; + u32 rxd_priv_size; + u32 per_rxd_space; + u32 rxds_per_block; + u32 rxdblock_priv_size; + u32 cmpl_cnt; + u32 vp_id; + u32 doorbell_cnt; + u32 total_db_cnt; + u64 rxds_limit; + + enum vxge_hw_status (*callback)( + struct __vxge_hw_ring *ringh, + void *rxdh, + u8 t_code, + void *userdata); + + enum vxge_hw_status (*rxd_init)( + void *rxdh, + void *userdata); + + void (*rxd_term)( + void *rxdh, + enum vxge_hw_rxd_state state, + void *userdata); + + struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned; + struct vxge_hw_ring_config *config; +} ____cacheline_aligned; + +/** + * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state. + * @VXGE_HW_TXDL_STATE_NONE: Invalid state. + * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation. + * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the + * device. + * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for + * filling-in and posting later. + * + * Titan/HW descriptor states. + * + */ +enum vxge_hw_txdl_state { + VXGE_HW_TXDL_STATE_NONE = 0, + VXGE_HW_TXDL_STATE_AVAIL = 1, + VXGE_HW_TXDL_STATE_POSTED = 2, + VXGE_HW_TXDL_STATE_FREED = 3 +}; +/* + * struct __vxge_hw_fifo - Fifo. + * @channel: Channel "base" of this fifo, the common part of all HW + * channels. + * @mempool: Memory pool, from which descriptors get allocated. + * @config: Fifo configuration, part of device configuration + * (see struct vxge_hw_device_config{}). + * @interrupt_type: Interrupt type to be used + * @no_snoop_bits: See struct vxge_hw_fifo_config{}. + * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock. + * on TxDL please refer to Titan UG. + * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus + * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv). + * @priv_size: Per-Tx descriptor space reserved for driver + * usage. + * @per_txdl_space: Per txdl private space for the driver + * @callback: Fifo completion callback. HW invokes the callback when there + * are new completions on that fifo. In many implementations + * the @callback executes in the hw interrupt context. + * @txdl_term: Fifo's descriptor-terminate callback. If not NULL, + * HW invokes the callback when closing the corresponding fifo. + * See also vxge_hw_fifo_txdl_term_f{}. + * @stats: Statistics of this fifo + * + * Fifo channel. + * Note: The structure is cache line aligned. + */ +struct __vxge_hw_fifo { + struct __vxge_hw_channel channel; + struct vxge_hw_mempool *mempool; + struct vxge_hw_fifo_config *config; + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db; + u64 interrupt_type; + u32 no_snoop_bits; + u32 txdl_per_memblock; + u32 txdl_size; + u32 priv_size; + u32 per_txdl_space; + u32 vp_id; + u32 tx_intr_num; + + enum vxge_hw_status (*callback)( + struct __vxge_hw_fifo *fifo_handle, + void *txdlh, + enum vxge_hw_fifo_tcode t_code, + void *userdata, + void **skb_ptr); + + void (*txdl_term)( + void *txdlh, + enum vxge_hw_txdl_state state, + void *userdata); + + struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned; +} ____cacheline_aligned; + +/* + * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data. + * @dma_addr: DMA (mapped) address of _this_ descriptor. + * @dma_handle: DMA handle used to map the descriptor onto device. + * @dma_offset: Descriptor's offset in the memory block. HW allocates + * descriptors in memory blocks (see struct vxge_hw_fifo_config{}) + * Each memblock is a contiguous block of DMA-able memory. + * @frags: Total number of fragments (that is, contiguous data buffers) + * carried by this TxDL. + * @align_vaddr_start: Aligned virtual address start + * @align_vaddr: Virtual address of the per-TxDL area in memory used for + * alignement. Used to place one or more mis-aligned fragments + * @align_dma_addr: DMA address translated from the @align_vaddr. + * @align_dma_handle: DMA handle that corresponds to @align_dma_addr. + * @align_dma_acch: DMA access handle corresponds to @align_dma_addr. + * @align_dma_offset: The current offset into the @align_vaddr area. + * Grows while filling the descriptor, gets reset. + * @align_used_frags: Number of fragments used. + * @alloc_frags: Total number of fragments allocated. + * @unused: TODO + * @next_txdl_priv: (TODO). + * @first_txdp: (TODO). + * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous + * TxDL list. + * @txdlh: Corresponding txdlh to this TxDL. + * @memblock: Pointer to the TxDL memory block or memory page. + * on the next send operation. + * @dma_object: DMA address and handle of the memory block that contains + * the descriptor. This member is used only in the "checked" + * version of the HW (to enforce certain assertions); + * otherwise it gets compiled out. + * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage. + * + * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA + * information associated with the descriptor. Note that driver can ask HW + * to allocate additional per-descriptor space for its own (driver-specific) + * purposes. + * + * See also: struct vxge_hw_ring_rxd_priv{}. + */ +struct __vxge_hw_fifo_txdl_priv { + dma_addr_t dma_addr; + struct pci_dev *dma_handle; + ptrdiff_t dma_offset; + u32 frags; + u8 *align_vaddr_start; + u8 *align_vaddr; + dma_addr_t align_dma_addr; + struct pci_dev *align_dma_handle; + struct pci_dev *align_dma_acch; + ptrdiff_t align_dma_offset; + u32 align_used_frags; + u32 alloc_frags; + u32 unused; + struct __vxge_hw_fifo_txdl_priv *next_txdl_priv; + struct vxge_hw_fifo_txd *first_txdp; + void *memblock; +}; + +/* + * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper + * @control_0: Bits 0 to 7 - Doorbell type. + * Bits 8 to 31 - Reserved. + * Bits 32 to 39 - The highest TxD in this TxDL. + * Bits 40 to 47 - Reserved. + * Bits 48 to 55 - Reserved. + * Bits 56 to 63 - No snoop flags. + * @txdl_ptr: The starting location of the TxDL in host memory. + * + * Created by the host and written to the adapter via PIO to a Kernel Doorbell + * FIFO. All non-offload doorbell wrapper fields must be written by the host as + * part of a doorbell write. Consumed by the adapter but is not written by the + * adapter. + */ +struct __vxge_hw_non_offload_db_wrapper { + u64 control_0; +#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8) +#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_NODBW_TYPE_NODBW 0 + +#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8) +#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8) + +#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8) +#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8) +#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2 +#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1 + + u64 txdl_ptr; +}; + +/* + * TX Descriptor + */ + +/** + * struct vxge_hw_fifo_txd - Transmit Descriptor + * @control_0: Bits 0 to 6 - Reserved. + * Bit 7 - List Ownership. This field should be initialized + * to '1' by the driver before the transmit list pointer is + * written to the adapter. This field will be set to '0' by the + * adapter once it has completed transmitting the frame or frames in + * the list. Note - This field is only valid in TxD0. Additionally, + * for multi-list sequences, the driver should not release any + * buffers until the ownership of the last list in the multi-list + * sequence has been returned to the host. + * Bits 8 to 11 - Reserved + * Bits 12 to 15 - Transfer_Code. This field is only valid in + * TxD0. It is used to describe the status of the transmit data + * buffer transfer. This field is always overwritten by the + * adapter, so this field may be initialized to any value. + * Bits 16 to 17 - Host steering. This field allows the host to + * override the selection of the physical transmit port. + * Attention: + * Normal sounds as if learned from the switch rather than from + * the aggregation algorythms. + * 00: Normal. Use Destination/MAC Address + * lookup to determine the transmit port. + * 01: Send on physical Port1. + * 10: Send on physical Port0. + * 11: Send on both ports. + * Bits 18 to 21 - Reserved + * Bits 22 to 23 - Gather_Code. This field is set by the host and + * is used to describe how individual buffers comprise a frame. + * 10: First descriptor of a frame. + * 00: Middle of a multi-descriptor frame. + * 01: Last descriptor of a frame. + * 11: First and last descriptor of a frame (the entire frame + * resides in a single buffer). + * For multi-descriptor frames, the only valid gather code sequence + * is {10, [00], 01}. In other words, the descriptors must be placed + * in the list in the correct order. + * Bits 24 to 27 - Reserved + * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation + * definition. Only valid in TxD0. This field allows the host to + * indicate the Ethernet encapsulation of an outbound LSO packet. + * 00 - classic mode (best guess) + * 01 - LLC + * 10 - SNAP + * 11 - DIX + * If "classic mode" is selected, the adapter will attempt to + * decode the frame's Ethernet encapsulation by examining the L/T + * field as follows: + * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine + * if packet is IPv4 or IPv6. + * 0x8870 Jumbo-SNAP encoding. + * 0x0800 IPv4 DIX encoding + * 0x86DD IPv6 DIX encoding + * others illegal encapsulation + * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag. + * Set to 1 to perform segmentation offload for TCP/UDP. + * This field is valid only in TxD0. + * Bits 31 to 33 - Reserved. + * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size + * This field is meaningful only when LSO_Control is non-zero. + * When LSO_Control is set to TCP_LSO, the single (possibly large) + * TCP segment described by this TxDL will be sent as a series of + * TCP segments each of which contains no more than LSO_MSS + * payload bytes. + * When LSO_Control is set to UDP_LSO, the single (possibly large) + * UDP datagram described by this TxDL will be sent as a series of + * UDP datagrams each of which contains no more than LSO_MSS + * payload bytes. + * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP + * or TCP payload, with the exception of the last, which will have + * <= LSO_MSS bytes of payload. + * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the + * buffer to be read by the adapter. This field is written by the + * host. A value of 0 is illegal. + * Bits 32 to 63 - This value is written by the adapter upon + * completion of a UDP or TCP LSO operation and indicates the number + * of UDP or TCP payload bytes that were transmitted. 0x0000 will be + * returned for any non-LSO operation. + * @control_1: Bits 0 to 4 - Reserved. + * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum + * offload. This field is only valid in the first TxD of a frame. + * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload. + * This field is only valid in the first TxD of a frame (the TxD's + * gather code must be 10 or 11). The driver should only set this + * bit if it can guarantee that TCP is present. + * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload. + * This field is only valid in the first TxD of a frame (the TxD's + * gather code must be 10 or 11). The driver should only set this + * bit if it can guarantee that UDP is present. + * Bits 8 to 14 - Reserved. + * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to + * instruct the adapter to insert the VLAN tag specified by the + * Tx_VLAN_Tag field. This field is only valid in the first TxD of + * a frame. + * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag + * to be inserted into the frame by the adapter (the first two bytes + * of a VLAN tag are always 0x8100). This field is only valid if the + * Tx_VLAN_Enable field is set to '1'. + * Bits 32 to 33 - Reserved. + * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt + * number the frame associated with. This field is written by the + * host. It is only valid in the first TxD of a frame. + * Bits 40 to 42 - Reserved. + * Bit 43 - Set to 1 to exclude the frame from bandwidth metering + * functions. This field is valid only in the first TxD + * of a frame. + * Bits 44 to 45 - Reserved. + * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to + * generate an interrupt as soon as all of the frames in the list + * have been transmitted. In order to have per-frame interrupts, + * the driver should place a maximum of one frame per list. This + * field is only valid in the first TxD of a frame. + * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter + * to count the frame toward the utilization interrupt specified in + * the Tx_Int_Number field. This field is only valid in the first + * TxD of a frame. + * Bits 48 to 63 - Reserved. + * @buffer_pointer: Buffer start address. + * @host_control: Host_Control.Opaque 64bit data stored by driver inside the + * Titan descriptor prior to posting the latter on the fifo + * via vxge_hw_fifo_txdl_post().The %host_control is returned as is + * to the driver with each completed descriptor. + * + * Transmit descriptor (TxD).Fifo descriptor contains configured number + * (list) of TxDs. * For more details please refer to Titan User Guide, + * Section 5.4.2 "Transmit Descriptor (TxD) Format". + */ +struct vxge_hw_fifo_txd { + u64 control_0; +#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7) + +#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4) +#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED + + +#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2) +#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST +#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST + + +#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30) + +#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14) + +#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16) + + u64 control_1; +#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5) +#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6) +#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7) +#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15) + +#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16) + +#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6) + +#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46) +#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47) + + u64 buffer_pointer; + + u64 host_control; +}; + +/** + * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring + * @host_control: This field is exclusively for host use and is "readonly" + * from the adapter's perspective. + * @control_0:Bits 0 to 6 - RTH_Bucket get + * Bit 7 - Own Descriptor ownership bit. This bit is set to 1 + * by the host, and is set to 0 by the adapter. + * 0 - Host owns RxD and buffer. + * 1 - The adapter owns RxD and buffer. + * Bit 8 - Fast_Path_Eligible When set, indicates that the + * received frame meets all of the criteria for fast path processing. + * The required criteria are as follows: + * !SYN & + * (Transfer_Code == "Transfer OK") & + * (!Is_IP_Fragment) & + * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) | + * (Is_IPv6)) & + * ((Is_TCP & computed_L4_checksum == 0xFFFF) | + * (Is_UDP & (computed_L4_checksum == 0xFFFF | + * computed _L4_checksum == 0x0000))) + * (same meaning for all RxD buffer modes) + * Bit 9 - L3 Checksum Correct + * Bit 10 - L4 Checksum Correct + * Bit 11 - Reserved + * Bit 12 to 15 - This field is written by the adapter. It is + * used to report the status of the frame transfer to the host. + * 0x0 - Transfer OK + * 0x4 - RDA Failure During Transfer + * 0x5 - Unparseable Packet, such as unknown IPv6 header. + * 0x6 - Frame integrity error (FCS or ECC). + * 0x7 - Buffer Size Error. The provided buffer(s) were not + * appropriately sized and data loss occurred. + * 0x8 - Internal ECC Error. RxD corrupted. + * 0x9 - IPv4 Checksum error + * 0xA - TCP/UDP Checksum error + * 0xF - Unknown Error or Multiple Error. Indicates an + * unknown problem or that more than one of transfer codes is set. + * Bit 16 - SYN The adapter sets this field to indicate that + * the incoming frame contained a TCP segment with its SYN bit + * set and its ACK bit NOT set. (same meaning for all RxD buffer + * modes) + * Bit 17 - Is ICMP + * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the + * Socket Pair Direct Match Table and the frame was steered based + * on SPDM. + * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the + * Indirection Table and the frame was steered based on hash + * indirection. + * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash + * type) that was used to calculate the hash. + * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN + * tagged. + * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation + * of the received frame. + * 0x0 - Ethernet DIX + * 0x1 - LLC + * 0x2 - SNAP (includes Jumbo-SNAP) + * 0x3 - IPX + * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet. + * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet. + * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented + * IP packet. + * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment. + * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message. + * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that + * arrived with the frame. If the resulting computed IPv4 header + * checksum for the frame did not produce the expected 0xFFFF value, + * then the transfer code would be set to 0x9. + * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that + * arrived with the frame. If the resulting computed TCP/UDP checksum + * for the frame did not produce the expected 0xFFFF value, then the + * transfer code would be set to 0xA. + * @control_1:Bits 0 to 1 - Reserved + * Bits 2 to 15 - Buffer0_Size.This field is set by the host and + * eventually overwritten by the adapter. The host writes the + * available buffer size in bytes when it passes the descriptor to + * the adapter. When a frame is delivered the host, the adapter + * populates this field with the number of bytes written into the + * buffer. The largest supported buffer is 16, 383 bytes. + * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if + * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero. + * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion + * of the VLAN tag, if one was detected by the adapter. This field is + * populated even if VLAN-tag stripping is enabled. + * @buffer0_ptr: Pointer to buffer. This field is populated by the driver. + * + * One buffer mode RxD for ring structure + */ +struct vxge_hw_ring_rxd_1 { + u64 host_control; + u64 control_0; +#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7) + +#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7) + +#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1) + +#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1) + +#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1) + +#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4) +#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4) + +#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED + +#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1) + +#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1) + +#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1) + +#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1) + +#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4) + +#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1) + +#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2) + +#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5) + +#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16) + +#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16) + + u64 control_1; + +#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14) +#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14) +#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14) + +#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32) + +#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16) + + u64 buffer0_ptr; +}; + +enum vxge_hw_rth_algoritms { + RTH_ALG_JENKINS = 0, + RTH_ALG_MS_RSS = 1, + RTH_ALG_CRC32C = 2 +}; + +/** + * struct vxge_hw_rth_hash_types - RTH hash types. + * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4 + * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4 + * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6 + * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6 + * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex + * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex + * + * Used to pass RTH hash types to rts_rts_set. + * + * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). + */ +struct vxge_hw_rth_hash_types { + u8 hash_type_tcpipv4_en; + u8 hash_type_ipv4_en; + u8 hash_type_tcpipv6_en; + u8 hash_type_ipv6_en; + u8 hash_type_tcpipv6ex_en; + u8 hash_type_ipv6ex_en; +}; + +u32 +vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh); + +void vxge_hw_device_debug_set( + struct __vxge_hw_device *devh, + enum vxge_debug_level level, + u32 mask); + +u32 +vxge_hw_device_error_level_get(struct __vxge_hw_device *devh); + +u32 +vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh); + +u32 +vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh); + +/** + * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor. + * @buf_mode: Buffer mode (1, 3 or 5) + * + * This function returns the size of RxD for given buffer mode + */ +static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode) +{ + return sizeof(struct vxge_hw_ring_rxd_1); +} + +/** + * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block. + * @buf_mode: Buffer mode (1 buffer mode only) + * + * This function returns the number of RxD for RxD block for given buffer mode + */ +static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode) +{ + return (u32)((VXGE_HW_BLOCK_SIZE-16) / + sizeof(struct vxge_hw_ring_rxd_1)); +} + +/** + * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor. + * @rxdh: Descriptor handle. + * @dma_pointer: DMA address of a single receive buffer this descriptor + * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called, + * the receive buffer should be already mapped to the device + * @size: Size of the receive @dma_pointer buffer. + * + * Prepare 1-buffer-mode Rx descriptor for posting + * (via vxge_hw_ring_rxd_post()). + * + * This inline helper-function does not return any parameters and always + * succeeds. + * + */ +static inline +void vxge_hw_ring_rxd_1b_set( + void *rxdh, + dma_addr_t dma_pointer, + u32 size) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + rxdp->buffer0_ptr = dma_pointer; + rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK; + rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size); +} + +/** + * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf + * descriptor. + * @vpath_handle: Virtual Path handle. + * @rxdh: Descriptor handle. + * @dma_pointer: DMA address of a single receive buffer this descriptor + * carries. Returned by HW. + * @pkt_length: Length (in bytes) of the data in the buffer pointed by + * + * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor. + * This inline helper-function uses completed descriptor to populate receive + * buffer pointer and other "out" parameters. The function always succeeds. + * + */ +static inline +void vxge_hw_ring_rxd_1b_get( + struct __vxge_hw_ring *ring_handle, + void *rxdh, + u32 *pkt_length) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + + *pkt_length = + (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1); +} + +/** + * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with + * a completed receive descriptor for 1b mode. + * @vpath_handle: Virtual Path handle. + * @rxdh: Descriptor handle. + * @rxd_info: Descriptor information + * + * Retrieve extended information associated with a completed receive descriptor. + * + */ +static inline +void vxge_hw_ring_rxd_1b_info_get( + struct __vxge_hw_ring *ring_handle, + void *rxdh, + struct vxge_hw_ring_rxd_info *rxd_info) +{ + + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + rxd_info->syn_flag = + (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0); + rxd_info->is_icmp = + (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0); + rxd_info->fast_path_eligible = + (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0); + rxd_info->l3_cksum_valid = + (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0); + rxd_info->l3_cksum = + (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0); + rxd_info->l4_cksum_valid = + (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0); + rxd_info->l4_cksum = + (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);; + rxd_info->frame = + (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0); + rxd_info->proto = + (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0); + rxd_info->is_vlan = + (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0); + rxd_info->vlan = + (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1); + rxd_info->rth_bucket = + (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0); + rxd_info->rth_it_hit = + (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0); + rxd_info->rth_spdm_hit = + (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0); + rxd_info->rth_hash_type = + (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0); + rxd_info->rth_value = + (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1); +} + +/** + * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data + * of 1b mode 3b mode ring. + * @rxdh: Descriptor handle. + * + * Returns: private driver info associated with the descriptor. + * driver requests per-descriptor space via vxge_hw_ring_attr. + * + */ +static inline void *vxge_hw_ring_rxd_private_get(void *rxdh) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + return (void *)(size_t)rxdp->host_control; +} + +/** + * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum. + * @txdlh: Descriptor handle. + * @cksum_bits: Specifies which checksums are to be offloaded: IPv4, + * and/or TCP and/or UDP. + * + * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit + * descriptor. + * This API is part of the preparation of the transmit descriptor for posting + * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include + * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(), + * and vxge_hw_fifo_txdl_buffer_set(). + * All these APIs fill in the fields of the fifo descriptor, + * in accordance with the Titan specification. + * + */ +static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits) +{ + struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; + txdp->control_1 |= cksum_bits; +} + +/** + * vxge_hw_fifo_txdl_mss_set - Set MSS. + * @txdlh: Descriptor handle. + * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the + * driver, which in turn inserts the MSS into the @txdlh. + * + * This API is part of the preparation of the transmit descriptor for posting + * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include + * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(), + * and vxge_hw_fifo_txdl_cksum_set_bits(). + * All these APIs fill in the fields of the fifo descriptor, + * in accordance with the Titan specification. + * + */ +static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss) +{ + struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; + + txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN; + txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss); +} + +/** + * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag. + * @txdlh: Descriptor handle. + * @vlan_tag: 16bit VLAN tag. + * + * Insert VLAN tag into specified transmit descriptor. + * The actual insertion of the tag into outgoing frame is done by the hardware. + */ +static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag) +{ + struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; + + txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE; + txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag); +} + +/** + * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data. + * @txdlh: Descriptor handle. + * + * Retrieve per-descriptor private data. + * Note that driver requests per-descriptor space via + * struct vxge_hw_fifo_attr passed to + * vxge_hw_vpath_open(). + * + * Returns: private driver data associated with the descriptor. + */ +static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh) +{ + struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; + + return (void *)(size_t)txdp->host_control; +} + +/** + * struct vxge_hw_ring_attr - Ring open "template". + * @callback: Ring completion callback. HW invokes the callback when there + * are new completions on that ring. In many implementations + * the @callback executes in the hw interrupt context. + * @rxd_init: Ring's descriptor-initialize callback. + * See vxge_hw_ring_rxd_init_f{}. + * If not NULL, HW invokes the callback when opening + * the ring. + * @rxd_term: Ring's descriptor-terminate callback. If not NULL, + * HW invokes the callback when closing the corresponding ring. + * See also vxge_hw_ring_rxd_term_f{}. + * @userdata: User-defined "context" of _that_ ring. Passed back to the + * user as one of the @callback, @rxd_init, and @rxd_term arguments. + * @per_rxd_space: If specified (i.e., greater than zero): extra space + * reserved by HW per each receive descriptor. + * Can be used to store + * and retrieve on completion, information specific + * to the driver. + * + * Ring open "template". User fills the structure with ring + * attributes and passes it to vxge_hw_vpath_open(). + */ +struct vxge_hw_ring_attr { + enum vxge_hw_status (*callback)( + struct __vxge_hw_ring *ringh, + void *rxdh, + u8 t_code, + void *userdata); + + enum vxge_hw_status (*rxd_init)( + void *rxdh, + void *userdata); + + void (*rxd_term)( + void *rxdh, + enum vxge_hw_rxd_state state, + void *userdata); + + void *userdata; + u32 per_rxd_space; +}; + +/** + * function vxge_hw_fifo_callback_f - FIFO callback. + * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed + * descriptors. + * @txdlh: First completed descriptor. + * @txdl_priv: Pointer to per txdl space allocated + * @t_code: Transfer code, as per Titan User Guide. + * Returned by HW. + * @host_control: Opaque 64bit data stored by driver inside the Titan + * descriptor prior to posting the latter on the fifo + * via vxge_hw_fifo_txdl_post(). The @host_control is returned + * as is to the driver with each completed descriptor. + * @userdata: Opaque per-fifo data specified at fifo open + * time, via vxge_hw_vpath_open(). + * + * Fifo completion callback (type declaration). A single per-fifo + * callback is specified at fifo open time, via + * vxge_hw_vpath_open(). Typically gets called as part of the processing + * of the Interrupt Service Routine. + * + * Fifo callback gets called by HW if, and only if, there is at least + * one new completion on a given fifo. Upon processing the first @txdlh driver + * is _supposed_ to continue consuming completions using: + * - vxge_hw_fifo_txdl_next_completed() + * + * Note that failure to process new completions in a timely fashion + * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition. + * + * Non-zero @t_code means failure to process transmit descriptor. + * + * In the "transmit" case the failure could happen, for instance, when the + * link is down, in which case Titan completes the descriptor because it + * is not able to send the data out. + * + * For details please refer to Titan User Guide. + * + * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}. + */ +/** + * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback. + * @txdlh: First completed descriptor. + * @txdl_priv: Pointer to per txdl space allocated + * @state: One of the enum vxge_hw_txdl_state{} enumerated states. + * @userdata: Per-fifo user data (a.k.a. context) specified at + * fifo open time, via vxge_hw_vpath_open(). + * + * Terminate descriptor callback. Unless NULL is specified in the + * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()), + * HW invokes the callback as part of closing fifo, prior to + * de-allocating the ring and associated data structures + * (including descriptors). + * driver should utilize the callback to (for instance) unmap + * and free DMA data buffers associated with the posted (state = + * VXGE_HW_TXDL_STATE_POSTED) descriptors, + * as well as other relevant cleanup functions. + * + * See also: struct vxge_hw_fifo_attr{} + */ +/** + * struct vxge_hw_fifo_attr - Fifo open "template". + * @callback: Fifo completion callback. HW invokes the callback when there + * are new completions on that fifo. In many implementations + * the @callback executes in the hw interrupt context. + * @txdl_term: Fifo's descriptor-terminate callback. If not NULL, + * HW invokes the callback when closing the corresponding fifo. + * See also vxge_hw_fifo_txdl_term_f{}. + * @userdata: User-defined "context" of _that_ fifo. Passed back to the + * user as one of the @callback, and @txdl_term arguments. + * @per_txdl_space: If specified (i.e., greater than zero): extra space + * reserved by HW per each transmit descriptor. Can be used to + * store, and retrieve on completion, information specific + * to the driver. + * + * Fifo open "template". User fills the structure with fifo + * attributes and passes it to vxge_hw_vpath_open(). + */ +struct vxge_hw_fifo_attr { + + enum vxge_hw_status (*callback)( + struct __vxge_hw_fifo *fifo_handle, + void *txdlh, + enum vxge_hw_fifo_tcode t_code, + void *userdata, + void **skb_ptr); + + void (*txdl_term)( + void *txdlh, + enum vxge_hw_txdl_state state, + void *userdata); + + void *userdata; + u32 per_txdl_space; +}; + +/** + * struct vxge_hw_vpath_attr - Attributes of virtual path + * @vp_id: Identifier of Virtual Path + * @ring_attr: Attributes of ring for non-offload receive + * @fifo_attr: Attributes of fifo for non-offload transmit + * + * Attributes of virtual path. This structure is passed as parameter + * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo. + */ +struct vxge_hw_vpath_attr { + u32 vp_id; + struct vxge_hw_ring_attr ring_attr; + struct vxge_hw_fifo_attr fifo_attr; +}; + +enum vxge_hw_status +__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, + struct __vxge_hw_blockpool *blockpool, + u32 pool_size, + u32 pool_max); + +void +__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool); + +struct __vxge_hw_blockpool_entry * +__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev, + u32 size); + +void +__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev, + struct __vxge_hw_blockpool_entry *entry); + +void * +__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev, + u32 size, + struct vxge_hw_mempool_dma *dma_object); + +void +__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev, + void *memblock, + u32 size, + struct vxge_hw_mempool_dma *dma_object); + +enum vxge_hw_status +__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config); + +enum vxge_hw_status +__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); + +enum vxge_hw_status +vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh, + struct vxge_hw_device_config *dev_config, int size); + +enum vxge_hw_status __devinit vxge_hw_device_hw_info_get( + void __iomem *bar0, + struct vxge_hw_device_hw_info *hw_info); + +enum vxge_hw_status +__vxge_hw_vpath_fw_ver_get( + u32 vp_id, + struct vxge_hw_vpath_reg __iomem *vpath_reg, + struct vxge_hw_device_hw_info *hw_info); + +enum vxge_hw_status +__vxge_hw_vpath_card_info_get( + u32 vp_id, + struct vxge_hw_vpath_reg __iomem *vpath_reg, + struct vxge_hw_device_hw_info *hw_info); + +enum vxge_hw_status __devinit vxge_hw_device_config_default_get( + struct vxge_hw_device_config *device_config); + +/** + * vxge_hw_device_link_state_get - Get link state. + * @devh: HW device handle. + * + * Get link state. + * Returns: link state. + */ +static inline +enum vxge_hw_device_link_state vxge_hw_device_link_state_get( + struct __vxge_hw_device *devh) +{ + return devh->link_state; +} + +void vxge_hw_device_terminate(struct __vxge_hw_device *devh); + +const u8 * +vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh); + +u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh); + +const u8 * +vxge_hw_device_product_name_get(struct __vxge_hw_device *devh); + +enum vxge_hw_status __devinit vxge_hw_device_initialize( + struct __vxge_hw_device **devh, + struct vxge_hw_device_attr *attr, + struct vxge_hw_device_config *device_config); + +enum vxge_hw_status vxge_hw_device_getpause_data( + struct __vxge_hw_device *devh, + u32 port, + u32 *tx, + u32 *rx); + +enum vxge_hw_status vxge_hw_device_setpause_data( + struct __vxge_hw_device *devh, + u32 port, + u32 tx, + u32 rx); + +static inline void *vxge_os_dma_malloc(struct pci_dev *pdev, + unsigned long size, + struct pci_dev **p_dmah, + struct pci_dev **p_dma_acch) +{ + gfp_t flags; + void *vaddr; + unsigned long misaligned = 0; + *p_dma_acch = *p_dmah = NULL; + + if (in_interrupt()) + flags = GFP_ATOMIC | GFP_DMA; + else + flags = GFP_KERNEL | GFP_DMA; + + size += VXGE_CACHE_LINE_SIZE; + + vaddr = kmalloc((size), flags); + if (vaddr == NULL) + return vaddr; + misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr), + VXGE_CACHE_LINE_SIZE); + *(unsigned long *)p_dma_acch = misaligned; + vaddr = (void *)((u8 *)vaddr + misaligned); + return vaddr; +} + +extern void vxge_hw_blockpool_block_add( + struct __vxge_hw_device *devh, + void *block_addr, + u32 length, + struct pci_dev *dma_h, + struct pci_dev *acc_handle); + +static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, + unsigned long size) +{ + gfp_t flags; + void *vaddr; + + if (in_interrupt()) + flags = GFP_ATOMIC | GFP_DMA; + else + flags = GFP_KERNEL | GFP_DMA; + + vaddr = kmalloc((size), flags); + + vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); +} + +static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, + struct pci_dev **p_dma_acch) +{ + unsigned long misaligned = *(unsigned long *)p_dma_acch; + u8 *tmp = (u8 *)vaddr; + tmp -= misaligned; + kfree((void *)tmp); +} + +/* + * __vxge_hw_mempool_item_priv - will return pointer on per item private space + */ +static inline void* +__vxge_hw_mempool_item_priv( + struct vxge_hw_mempool *mempool, + u32 memblock_idx, + void *item, + u32 *memblock_item_idx) +{ + ptrdiff_t offset; + void *memblock = mempool->memblocks_arr[memblock_idx]; + + + offset = (u32)((u8 *)item - (u8 *)memblock); + vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size); + + (*memblock_item_idx) = (u32) offset / mempool->item_size; + vxge_assert((*memblock_item_idx) < mempool->items_per_memblock); + + return (u8 *)mempool->memblocks_priv_arr[memblock_idx] + + (*memblock_item_idx) * mempool->items_priv_size; +} + +enum vxge_hw_status +__vxge_hw_mempool_grow( + struct vxge_hw_mempool *mempool, + u32 num_allocate, + u32 *num_allocated); + +struct vxge_hw_mempool* +__vxge_hw_mempool_create( + struct __vxge_hw_device *devh, + u32 memblock_size, + u32 item_size, + u32 private_size, + u32 items_initial, + u32 items_max, + struct vxge_hw_mempool_cbs *mp_callback, + void *userdata); + +struct __vxge_hw_channel* +__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, + enum __vxge_hw_channel_type type, u32 length, + u32 per_dtr_space, void *userdata); + +void +__vxge_hw_channel_free( + struct __vxge_hw_channel *channel); + +enum vxge_hw_status +__vxge_hw_channel_initialize( + struct __vxge_hw_channel *channel); + +enum vxge_hw_status +__vxge_hw_channel_reset( + struct __vxge_hw_channel *channel); + +/* + * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated + * for the fifo. + * @fifo: Fifo + * @txdp: Poniter to a TxD + */ +static inline struct __vxge_hw_fifo_txdl_priv * +__vxge_hw_fifo_txdl_priv( + struct __vxge_hw_fifo *fifo, + struct vxge_hw_fifo_txd *txdp) +{ + return (struct __vxge_hw_fifo_txdl_priv *) + (((char *)((ulong)txdp->host_control)) + + fifo->per_txdl_space); +} + +enum vxge_hw_status vxge_hw_vpath_open( + struct __vxge_hw_device *devh, + struct vxge_hw_vpath_attr *attr, + struct __vxge_hw_vpath_handle **vpath_handle); + +enum vxge_hw_status +__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog); + +enum vxge_hw_status vxge_hw_vpath_close( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status +vxge_hw_vpath_reset( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status +vxge_hw_vpath_recover_from_reset( + struct __vxge_hw_vpath_handle *vpath_handle); + +void +vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp); + +enum vxge_hw_status +vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh); + +enum vxge_hw_status vxge_hw_vpath_mtu_set( + struct __vxge_hw_vpath_handle *vpath_handle, + u32 new_mtu); + +enum vxge_hw_status vxge_hw_vpath_stats_enable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status +__vxge_hw_vpath_stats_access( + struct __vxge_hw_virtualpath *vpath, + u32 operation, + u32 offset, + u64 *stat); + +enum vxge_hw_status +__vxge_hw_vpath_xmac_tx_stats_get( + struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats); + +enum vxge_hw_status +__vxge_hw_vpath_xmac_rx_stats_get( + struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); + +enum vxge_hw_status +__vxge_hw_vpath_stats_get( + struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_vpath_stats_hw_info *hw_stats); + +void +vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); + +enum vxge_hw_status +__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config); + +void +__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev); + +enum vxge_hw_status +__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); + +enum vxge_hw_status +__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg); + +enum vxge_hw_status +__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, + struct vxge_hw_vpath_reg __iomem *vpath_reg); + +enum vxge_hw_status +__vxge_hw_device_register_poll( + void __iomem *reg, + u64 mask, u32 max_millis); + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + u64 ret = 0; + ret = readl(addr + 4); + ret <<= 32; + ret |= readl(addr); + + return ret; +} +#endif + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel((u32) (val), addr); + writel((u32) (val >> 32), (addr + 4)); +} +#endif + +static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr) +{ + writel(val, addr + 4); +} + +static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr) +{ + writel(val, addr); +} + +static inline enum vxge_hw_status +__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, + u64 mask, u32 max_millis) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); + wmb(); + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); + wmb(); + + status = __vxge_hw_device_register_poll(addr, mask, max_millis); + return status; +} + +struct vxge_hw_toc_reg __iomem * +__vxge_hw_device_toc_get(void __iomem *bar0); + +enum vxge_hw_status +__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev); + +void +__vxge_hw_device_id_get(struct __vxge_hw_device *hldev); + +void +__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev); + +enum vxge_hw_status +vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off); + +enum vxge_hw_status +__vxge_hw_device_initialize(struct __vxge_hw_device *hldev); + +enum vxge_hw_status +__vxge_hw_vpath_pci_read( + struct __vxge_hw_virtualpath *vpath, + u32 phy_func_0, + u32 offset, + u32 *val); + +enum vxge_hw_status +__vxge_hw_vpath_addr_get( + u32 vp_id, + struct vxge_hw_vpath_reg __iomem *vpath_reg, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]); + +u32 +__vxge_hw_vpath_func_id_get( + u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg); + +enum vxge_hw_status +__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); + +/** + * vxge_debug + * @level: level of debug verbosity. + * @mask: mask for the debug + * @buf: Circular buffer for tracing + * @fmt: printf like format string + * + * Provides logging facilities. Can be customized on per-module + * basis or/and with debug levels. Input parameters, except + * module and level, are the same as posix printf. This function + * may be compiled out if DEBUG macro was never defined. + * See also: enum vxge_debug_level{}. + */ + +#define vxge_trace_aux(level, mask, fmt, ...) \ +{\ + vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\ +} + +#define vxge_debug(module, level, mask, fmt, ...) { \ +if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \ + (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\ + if ((mask & VXGE_DEBUG_MASK) == mask)\ + vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \ +} \ +} + +#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) +#define vxge_debug_ll(level, mask, fmt, ...) \ +{\ + vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\ +} + +#else +#define vxge_debug_ll(level, mask, fmt, ...) +#endif + +enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( + struct __vxge_hw_vpath_handle **vpath_handles, + u32 vpath_count, + u8 *mtable, + u8 *itable, + u32 itable_size); + +enum vxge_hw_status vxge_hw_vpath_rts_rth_set( + struct __vxge_hw_vpath_handle *vpath_handle, + enum vxge_hw_rth_algoritms algorithm, + struct vxge_hw_rth_hash_types *hash_type, + u16 bucket_size); + +#endif diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c new file mode 100644 index 00000000000..c6736b97263 --- /dev/null +++ b/drivers/net/vxge/vxge-ethtool.c @@ -0,0 +1,1148 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-ethtool.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2009 Neterion Inc. + ******************************************************************************/ +#include<linux/ethtool.h> +#include <linux/pci.h> +#include <linux/etherdevice.h> + +#include "vxge-ethtool.h" + +/** + * vxge_ethtool_sset - Sets different link parameters. + * @dev: device pointer. + * @info: pointer to the structure with parameters given by ethtool to set + * link information. + * + * The function sets different link parameters provided by the user onto + * the NIC. + * Return value: + * 0 on success. + */ + +static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) +{ + /* We currently only support 10Gb/FULL */ + if ((info->autoneg == AUTONEG_ENABLE) || + (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL)) + return -EINVAL; + + return 0; +} + +/** + * vxge_ethtool_gset - Return link specific information. + * @dev: device pointer. + * @info: pointer to the structure with parameters given by ethtool + * to return link information. + * + * Returns link specific information like speed, duplex etc.. to ethtool. + * Return value : + * return 0 on success. + */ +static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) +{ + info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + info->port = PORT_FIBRE; + + info->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(dev)) { + info->speed = SPEED_10000; + info->duplex = DUPLEX_FULL; + } else { + info->speed = -1; + info->duplex = -1; + } + + info->autoneg = AUTONEG_DISABLE; + return 0; +} + +/** + * vxge_ethtool_gdrvinfo - Returns driver specific information. + * @dev: device pointer. + * @info: pointer to the structure with parameters given by ethtool to + * return driver information. + * + * Returns driver specefic information like name, version etc.. to ethtool. + */ +static void vxge_ethtool_gdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct vxgedev *vdev; + vdev = (struct vxgedev *)netdev_priv(dev); + strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME)); + strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); + strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN); + strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info)); + info->regdump_len = sizeof(struct vxge_hw_vpath_reg) + * vdev->no_of_vpath; + + info->n_stats = STAT_LEN; +} + +/** + * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer. + * @dev: device pointer. + * @regs: pointer to the structure with parameters given by ethtool for + * dumping the registers. + * @reg_space: The input argumnet into which all the registers are dumped. + * + * Dumps the vpath register space of Titan NIC into the user given + * buffer area. + */ +static void vxge_ethtool_gregs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + int index, offset; + enum vxge_hw_status status; + u64 reg; + u8 *reg_space = (u8 *) space; + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) + pci_get_drvdata(vdev->pdev); + + regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; + regs->version = vdev->pdev->subsystem_device; + for (index = 0; index < vdev->no_of_vpath; index++) { + for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg); + offset += 8) { + status = vxge_hw_mgmt_reg_read(hldev, + vxge_hw_mgmt_reg_type_vpath, + vdev->vpaths[index].device_id, + offset, ®); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s:%d Getting reg dump Failed", + __func__, __LINE__); + return; + } + + memcpy((reg_space + offset), ®, 8); + } + } +} + +/** + * vxge_ethtool_idnic - To physically identify the nic on the system. + * @dev : device pointer. + * @id : pointer to the structure with identification parameters given by + * ethtool. + * + * Used to physically identify the NIC on the system. + * The Link LED will blink for a time specified by the user. + * Return value: + * 0 on success + */ +static int vxge_ethtool_idnic(struct net_device *dev, u32 data) +{ + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) + pci_get_drvdata(vdev->pdev); + + vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); + msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME); + vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF); + + return 0; +} + +/** + * vxge_ethtool_getpause_data - Pause frame frame generation and reception. + * @dev : device pointer. + * @ep : pointer to the structure with pause parameters given by ethtool. + * Description: + * Returns the Pause frame generation and reception capability of the NIC. + * Return value: + * void + */ +static void vxge_ethtool_getpause_data(struct net_device *dev, + struct ethtool_pauseparam *ep) +{ + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) + pci_get_drvdata(vdev->pdev); + + vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause); +} + +/** + * vxge_ethtool_setpause_data - set/reset pause frame generation. + * @dev : device pointer. + * @ep : pointer to the structure with pause parameters given by ethtool. + * Description: + * It can be used to set or reset Pause frame generation or reception + * support of the NIC. + * Return value: + * int, returns 0 on Success + */ +static int vxge_ethtool_setpause_data(struct net_device *dev, + struct ethtool_pauseparam *ep) +{ + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) + pci_get_drvdata(vdev->pdev); + + vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause); + + vdev->config.tx_pause_enable = ep->tx_pause; + vdev->config.rx_pause_enable = ep->rx_pause; + + return 0; +} + +static void vxge_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *tmp_stats) +{ + int j, k; + enum vxge_hw_status status; + enum vxge_hw_status swstatus; + struct vxge_vpath *vpath = NULL; + + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; + struct vxge_hw_xmac_stats *xmac_stats; + struct vxge_hw_device_stats_sw_info *sw_stats; + struct vxge_hw_device_stats_hw_info *hw_stats; + + u64 *ptr = tmp_stats; + + memset(tmp_stats, 0, + vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64)); + + xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL); + if (xmac_stats == NULL) { + vxge_debug_init(VXGE_ERR, + "%s : %d Memory Allocation failed for xmac_stats", + __func__, __LINE__); + return; + } + + sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info), + GFP_KERNEL); + if (sw_stats == NULL) { + kfree(xmac_stats); + vxge_debug_init(VXGE_ERR, + "%s : %d Memory Allocation failed for sw_stats", + __func__, __LINE__); + return; + } + + hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info), + GFP_KERNEL); + if (hw_stats == NULL) { + kfree(xmac_stats); + kfree(sw_stats); + vxge_debug_init(VXGE_ERR, + "%s : %d Memory Allocation failed for hw_stats", + __func__, __LINE__); + return; + } + + *ptr++ = 0; + status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats); + if (status != VXGE_HW_OK) { + if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) { + vxge_debug_init(VXGE_ERR, + "%s : %d Failure in getting xmac stats", + __func__, __LINE__); + } + } + swstatus = vxge_hw_driver_stats_get(hldev, sw_stats); + if (swstatus != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s : %d Failure in getting sw stats", + __func__, __LINE__); + } + + status = vxge_hw_device_stats_get(hldev, hw_stats); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s : %d hw_stats_get error", __func__, __LINE__); + } + + for (k = 0; k < vdev->no_of_vpath; k++) { + struct vxge_hw_vpath_stats_hw_info *vpath_info; + + vpath = &vdev->vpaths[k]; + j = vpath->device_id; + vpath_info = hw_stats->vpath_info[j]; + if (!vpath_info) { + memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN + + VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64)); + ptr += (VXGE_HW_VPATH_TX_STATS_LEN + + VXGE_HW_VPATH_RX_STATS_LEN); + continue; + } + + *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms; + *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets; + *ptr++ = vpath_info->tx_stats.tx_data_octets; + *ptr++ = vpath_info->tx_stats.tx_mcast_frms; + *ptr++ = vpath_info->tx_stats.tx_bcast_frms; + *ptr++ = vpath_info->tx_stats.tx_ucast_frms; + *ptr++ = vpath_info->tx_stats.tx_tagged_frms; + *ptr++ = vpath_info->tx_stats.tx_vld_ip; + *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets; + *ptr++ = vpath_info->tx_stats.tx_icmp; + *ptr++ = vpath_info->tx_stats.tx_tcp; + *ptr++ = vpath_info->tx_stats.tx_rst_tcp; + *ptr++ = vpath_info->tx_stats.tx_udp; + *ptr++ = vpath_info->tx_stats.tx_unknown_protocol; + *ptr++ = vpath_info->tx_stats.tx_lost_ip; + *ptr++ = vpath_info->tx_stats.tx_parse_error; + *ptr++ = vpath_info->tx_stats.tx_tcp_offload; + *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload; + *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload; + *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms; + *ptr++ = vpath_info->rx_stats.rx_vld_frms; + *ptr++ = vpath_info->rx_stats.rx_offload_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets; + *ptr++ = vpath_info->rx_stats.rx_data_octets; + *ptr++ = vpath_info->rx_stats.rx_offload_octets; + *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms; + *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms; + *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms; + *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms; + *ptr++ = vpath_info->rx_stats.rx_tagged_frms; + *ptr++ = vpath_info->rx_stats.rx_long_frms; + *ptr++ = vpath_info->rx_stats.rx_usized_frms; + *ptr++ = vpath_info->rx_stats.rx_osized_frms; + *ptr++ = vpath_info->rx_stats.rx_frag_frms; + *ptr++ = vpath_info->rx_stats.rx_jabber_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms; + *ptr++ = vpath_info->rx_stats.rx_ip; + *ptr++ = vpath_info->rx_stats.rx_accepted_ip; + *ptr++ = vpath_info->rx_stats.rx_ip_octets; + *ptr++ = vpath_info->rx_stats.rx_err_ip; + *ptr++ = vpath_info->rx_stats.rx_icmp; + *ptr++ = vpath_info->rx_stats.rx_tcp; + *ptr++ = vpath_info->rx_stats.rx_udp; + *ptr++ = vpath_info->rx_stats.rx_err_tcp; + *ptr++ = vpath_info->rx_stats.rx_lost_frms; + *ptr++ = vpath_info->rx_stats.rx_lost_ip; + *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload; + *ptr++ = vpath_info->rx_stats.rx_various_discard; + *ptr++ = vpath_info->rx_stats.rx_sleep_discard; + *ptr++ = vpath_info->rx_stats.rx_red_discard; + *ptr++ = vpath_info->rx_stats.rx_queue_full_discard; + *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms; + } + *ptr++ = 0; + for (k = 0; k < vdev->max_config_port; k++) { + *ptr++ = xmac_stats->aggr_stats[k].tx_frms; + *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets; + *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms; + *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms; + *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms; + *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets; + *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms; + } + *ptr++ = 0; + for (k = 0; k < vdev->max_config_port; k++) { + *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms; + *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets; + *ptr++ = xmac_stats->port_stats[k].tx_data_octets; + *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms; + *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms; + *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms; + *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms; + *ptr++ = xmac_stats->port_stats[k].tx_vld_ip; + *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets; + *ptr++ = xmac_stats->port_stats[k].tx_icmp; + *ptr++ = xmac_stats->port_stats[k].tx_tcp; + *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp; + *ptr++ = xmac_stats->port_stats[k].tx_udp; + *ptr++ = xmac_stats->port_stats[k].tx_parse_error; + *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol; + *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms; + *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms; + *ptr++ = xmac_stats->port_stats[k].tx_drop_ip; + *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match; + *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match; + *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match; + *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match; + *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms; + *ptr++ = xmac_stats->port_stats[k].tx_drop_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms; + *ptr++ = xmac_stats->port_stats[k].rx_vld_frms; + *ptr++ = xmac_stats->port_stats[k].rx_offload_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets; + *ptr++ = xmac_stats->port_stats[k].rx_data_octets; + *ptr++ = xmac_stats->port_stats[k].rx_offload_octets; + *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms; + *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms; + *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms; + *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms; + *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms; + *ptr++ = xmac_stats->port_stats[k].rx_long_frms; + *ptr++ = xmac_stats->port_stats[k].rx_usized_frms; + *ptr++ = xmac_stats->port_stats[k].rx_osized_frms; + *ptr++ = xmac_stats->port_stats[k].rx_frag_frms; + *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ip; + *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip; + *ptr++ = xmac_stats->port_stats[k].rx_ip_octets; + *ptr++ = xmac_stats->port_stats[k].rx_err_ip; + *ptr++ = xmac_stats->port_stats[k].rx_icmp; + *ptr++ = xmac_stats->port_stats[k].rx_tcp; + *ptr++ = xmac_stats->port_stats[k].rx_udp; + *ptr++ = xmac_stats->port_stats[k].rx_err_tcp; + *ptr++ = xmac_stats->port_stats[k].rx_pause_count; + *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms; + *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms; + *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms; + *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms; + *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms; + *ptr++ = xmac_stats->port_stats[k].rx_drop_frms; + *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms; + *ptr++ = xmac_stats->port_stats[k].rx_drop_ip; + *ptr++ = xmac_stats->port_stats[k].rx_drop_udp; + *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard; + *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_switch_discard; + *ptr++ = xmac_stats->port_stats[k].rx_len_discard; + *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard; + *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard; + *ptr++ = xmac_stats->port_stats[k].rx_rts_discard; + *ptr++ = xmac_stats->port_stats[k].rx_trash_discard; + *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard; + *ptr++ = xmac_stats->port_stats[k].rx_red_discard; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match; + *ptr++ = xmac_stats->port_stats[k].rx_local_fault; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match; + *ptr++ = xmac_stats->port_stats[k].rx_jettison; + *ptr++ = xmac_stats->port_stats[k].rx_remote_fault; + } + + *ptr++ = 0; + for (k = 0; k < vdev->no_of_vpath; k++) { + struct vxge_hw_vpath_stats_sw_info *vpath_info; + + vpath = &vdev->vpaths[k]; + j = vpath->device_id; + vpath_info = (struct vxge_hw_vpath_stats_sw_info *) + &sw_stats->vpath_info[j]; + *ptr++ = vpath_info->soft_reset_cnt; + *ptr++ = vpath_info->error_stats.unknown_alarms; + *ptr++ = vpath_info->error_stats.network_sustained_fault; + *ptr++ = vpath_info->error_stats.network_sustained_ok; + *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite; + *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison; + *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error; + *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow; + *ptr++ = vpath_info->error_stats.statsb_pif_chain_error; + *ptr++ = vpath_info->error_stats.statsb_drop_timeout; + *ptr++ = vpath_info->error_stats.target_illegal_access; + *ptr++ = vpath_info->error_stats.ini_serr_det; + *ptr++ = vpath_info->error_stats.prc_ring_bumps; + *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err; + *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort; + *ptr++ = vpath_info->error_stats.prc_quanta_size_err; + *ptr++ = vpath_info->ring_stats.common_stats.full_cnt; + *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt; + *ptr++ = vpath_info->ring_stats.common_stats.usage_max; + *ptr++ = vpath_info->ring_stats.common_stats. + reserve_free_swaps_cnt; + *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt; + for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++) + *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j]; + *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt; + *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt; + *ptr++ = vpath_info->fifo_stats.common_stats.usage_max; + *ptr++ = vpath_info->fifo_stats.common_stats. + reserve_free_swaps_cnt; + *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt; + *ptr++ = vpath_info->fifo_stats.total_posts; + *ptr++ = vpath_info->fifo_stats.total_buffers; + for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++) + *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j]; + } + + *ptr++ = 0; + for (k = 0; k < vdev->no_of_vpath; k++) { + struct vxge_hw_vpath_stats_hw_info *vpath_info; + vpath = &vdev->vpaths[k]; + j = vpath->device_id; + vpath_info = hw_stats->vpath_info[j]; + if (!vpath_info) { + memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64)); + ptr += VXGE_HW_VPATH_STATS_LEN; + continue; + } + *ptr++ = vpath_info->ini_num_mwr_sent; + *ptr++ = vpath_info->ini_num_mrd_sent; + *ptr++ = vpath_info->ini_num_cpl_rcvd; + *ptr++ = vpath_info->ini_num_mwr_byte_sent; + *ptr++ = vpath_info->ini_num_cpl_byte_rcvd; + *ptr++ = vpath_info->wrcrdtarb_xoff; + *ptr++ = vpath_info->rdcrdtarb_xoff; + *ptr++ = vpath_info->vpath_genstats_count0; + *ptr++ = vpath_info->vpath_genstats_count1; + *ptr++ = vpath_info->vpath_genstats_count2; + *ptr++ = vpath_info->vpath_genstats_count3; + *ptr++ = vpath_info->vpath_genstats_count4; + *ptr++ = vpath_info->vpath_genstats_count5; + *ptr++ = vpath_info->prog_event_vnum0; + *ptr++ = vpath_info->prog_event_vnum1; + *ptr++ = vpath_info->prog_event_vnum2; + *ptr++ = vpath_info->prog_event_vnum3; + *ptr++ = vpath_info->rx_multi_cast_frame_discard; + *ptr++ = vpath_info->rx_frm_transferred; + *ptr++ = vpath_info->rxd_returned; + *ptr++ = vpath_info->rx_mpa_len_fail_frms; + *ptr++ = vpath_info->rx_mpa_mrk_fail_frms; + *ptr++ = vpath_info->rx_mpa_crc_fail_frms; + *ptr++ = vpath_info->rx_permitted_frms; + *ptr++ = vpath_info->rx_vp_reset_discarded_frms; + *ptr++ = vpath_info->rx_wol_frms; + *ptr++ = vpath_info->tx_vp_reset_discarded_frms; + } + + *ptr++ = 0; + *ptr++ = vdev->stats.vpaths_open; + *ptr++ = vdev->stats.vpath_open_fail; + *ptr++ = vdev->stats.link_up; + *ptr++ = vdev->stats.link_down; + + for (k = 0; k < vdev->no_of_vpath; k++) { + *ptr += vdev->vpaths[k].fifo.stats.tx_frms; + *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors; + *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes; + *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free; + *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc; + *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms; + *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors; + *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes; + *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast; + *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail + + vdev->vpaths[k].ring.stats.pci_map_fail; + *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail; + } + + ptr += 12; + + kfree(xmac_stats); + kfree(sw_stats); + kfree(hw_stats); +} + +static void vxge_ethtool_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + int stat_size = 0; + int i, j; + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + switch (stringset) { + case ETH_SS_STATS: + vxge_add_string("VPATH STATISTICS%s\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_add_string("tx_ttl_eth_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_ttl_eth_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_mcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_bcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_ucast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tagged_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vld_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vld_ip_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_icmp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tcp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_rst_tcp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_udp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_unknown_proto_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_lost_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_parse_error_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tcp_offload_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_retx_tcp_offload_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_lost_ip_offload_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_eth_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_offload_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_eth_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_offload_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_mcast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_bcast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_ucast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_nucast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_tagged_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_long_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_usized_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_osized_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_frag_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_jabber_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_64_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_65_127_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_128_255_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_256_511_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_512_1023_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_8192_max_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_gt_max_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ip%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ip_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_err_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_icmp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_tcp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_udp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_err_tcp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_lost_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_lost_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_lost_ip_offload_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_various_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_sleep_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_red_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_queue_full_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_mpa_ok_frms_%d\t\t\t", + &stat_size, data, i); + } + + vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->max_config_port; i++) { + vxge_add_string("tx_frms_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_mcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_bcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_discarded_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_errored_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_frms_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_mcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_bcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_discarded_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_errored_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_unknown_slow_proto_frms_%d\t", + &stat_size, data, i); + } + + vxge_add_string("\nPORT STATISTICS%s\t\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->max_config_port; i++) { + vxge_add_string("tx_ttl_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_ttl_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_mcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_bcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_ucast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tagged_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vld_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vld_ip_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_icmp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tcp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_rst_tcp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_udp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_parse_error_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_unknown_protocol_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_pause_ctrl_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_marker_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_lacpdu_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_drop_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_xgmii_char2_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_xgmii_char1_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_xgmii_column2_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_xgmii_column1_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_any_err_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_drop_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_offload_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_offload_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_mcast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_bcast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_ucast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_nucast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_tagged_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_long_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_usized_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_osized_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_frag_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_jabber_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_64_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_65_127_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_128_255_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_256_511_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_512_1023_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_8192_max_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_gt_max_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ip_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ip_octets_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_err_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_icmp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_tcp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_udp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_err_tcp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_pause_count_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_pause_ctrl_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_unsup_ctrl_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_fcs_err_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_in_rng_len_err_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_out_rng_len_err_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_drop_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_discard_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_drop_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_drop_udp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_marker_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_lacpdu_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_unknown_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_fcs_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_illegal_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_switch_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_len_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_rpa_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_l2_mgmt_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_rts_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_trash_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_buff_full_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_red_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_char1_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_err_sym_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_column1_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_char2_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_local_fault_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_column2_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_jettison_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_remote_fault_%d\t\t\t", + &stat_size, data, i); + } + + vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_add_string("soft_reset_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("unknown_alarms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("network_sustained_fault_%d\t\t", + &stat_size, data, i); + vxge_add_string("network_sustained_ok_%d\t\t", + &stat_size, data, i); + vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t", + &stat_size, data, i); + vxge_add_string("kdfcctl_fifo0_poison_%d\t\t", + &stat_size, data, i); + vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t", + &stat_size, data, i); + vxge_add_string("dblgen_fifo0_overflow_%d\t\t", + &stat_size, data, i); + vxge_add_string("statsb_pif_chain_error_%d\t\t", + &stat_size, data, i); + vxge_add_string("statsb_drop_timeout_%d\t\t", + &stat_size, data, i); + vxge_add_string("target_illegal_access_%d\t\t", + &stat_size, data, i); + vxge_add_string("ini_serr_det_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prc_ring_bumps_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prc_rxdcm_sc_abort_%d\t\t", + &stat_size, data, i); + vxge_add_string("prc_quanta_size_err_%d\t\t", + &stat_size, data, i); + vxge_add_string("ring_full_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ring_usage_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ring_usage_max_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ring_reserve_free_swaps_cnt_%d\t", + &stat_size, data, i); + vxge_add_string("ring_total_compl_cnt_%d\t\t", + &stat_size, data, i); + for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++) + vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t", + &stat_size, data, j, i); + vxge_add_string("fifo_full_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("fifo_usage_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("fifo_usage_max_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t", + &stat_size, data, i); + vxge_add_string("fifo_total_compl_cnt_%d\t\t", + &stat_size, data, i); + vxge_add_string("fifo_total_posts_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("fifo_total_buffers_%d\t\t", + &stat_size, data, i); + for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++) + vxge_add_string("txd_t_code_err_cnt%d_%d\t\t", + &stat_size, data, j, i); + } + + vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_add_string("ini_num_mwr_sent_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ini_num_mrd_sent_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ini_num_mwr_byte_sent_%d\t\t", + &stat_size, data, i); + vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t", + &stat_size, data, i); + vxge_add_string("wrcrdtarb_xoff_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rdcrdtarb_xoff_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count0_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count1_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count2_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count3_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count4_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count5_%d\t\t", + &stat_size, data, i); + vxge_add_string("prog_event_vnum0_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prog_event_vnum1_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prog_event_vnum2_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prog_event_vnum3_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_multi_cast_frame_discard_%d\t", + &stat_size, data, i); + vxge_add_string("rx_frm_transferred_%d\t\t", + &stat_size, data, i); + vxge_add_string("rxd_returned_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_mpa_len_fail_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_permitted_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vp_reset_discarded_frms_%d\t", + &stat_size, data, i); + vxge_add_string("rx_wol_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vp_reset_discarded_frms_%d\t", + &stat_size, data, i); + } + + memcpy(data + stat_size, ðtool_driver_stats_keys, + sizeof(ethtool_driver_stats_keys)); + } +} + +static int vxge_ethtool_get_regs_len(struct net_device *dev) +{ + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + + return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; +} + +static u32 vxge_get_rx_csum(struct net_device *dev) +{ + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + + return vdev->rx_csum; +} + +static int vxge_set_rx_csum(struct net_device *dev, u32 data) +{ + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + + if (data) + vdev->rx_csum = 1; + else + vdev->rx_csum = 0; + + return 0; +} + +static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); + else + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return 0; +} + +static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return VXGE_TITLE_LEN + + (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) + + (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) + + (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) + + (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) + + (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) + + (vdev->no_of_vpath * VXGE_SW_STATS_LEN) + + DRIVER_STAT_LEN; + default: + return -EOPNOTSUPP; + } +} + +static const struct ethtool_ops vxge_ethtool_ops = { + .get_settings = vxge_ethtool_gset, + .set_settings = vxge_ethtool_sset, + .get_drvinfo = vxge_ethtool_gdrvinfo, + .get_regs_len = vxge_ethtool_get_regs_len, + .get_regs = vxge_ethtool_gregs, + .get_link = ethtool_op_get_link, + .get_pauseparam = vxge_ethtool_getpause_data, + .set_pauseparam = vxge_ethtool_setpause_data, + .get_rx_csum = vxge_get_rx_csum, + .set_rx_csum = vxge_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = vxge_ethtool_op_set_tso, + .get_strings = vxge_ethtool_get_strings, + .phys_id = vxge_ethtool_idnic, + .get_sset_count = vxge_ethtool_get_sset_count, + .get_ethtool_stats = vxge_get_ethtool_stats, +}; + +void initialize_ethtool_ops(struct net_device *ndev) +{ + SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops); +} diff --git a/drivers/net/vxge/vxge-ethtool.h b/drivers/net/vxge/vxge-ethtool.h new file mode 100644 index 00000000000..1c3df0a34ac --- /dev/null +++ b/drivers/net/vxge/vxge-ethtool.h @@ -0,0 +1,67 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-ethtool.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2009 Neterion Inc. + ******************************************************************************/ +#ifndef _VXGE_ETHTOOL_H +#define _VXGE_ETHTOOL_H + +#include "vxge-main.h" + +/* Ethtool related variables and Macros. */ +static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset); + +static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { + {"\n DRIVER STATISTICS"}, + {"vpaths_opened"}, + {"vpath_open_fail_cnt"}, + {"link_up_cnt"}, + {"link_down_cnt"}, + {"tx_frms"}, + {"tx_errors"}, + {"tx_bytes"}, + {"txd_not_free"}, + {"txd_out_of_desc"}, + {"rx_frms"}, + {"rx_errors"}, + {"rx_bytes"}, + {"rx_mcast"}, + {"pci_map_fail_cnt"}, + {"skb_alloc_fail_cnt"} +}; + +#define VXGE_TITLE_LEN 5 +#define VXGE_HW_VPATH_STATS_LEN 27 +#define VXGE_HW_AGGR_STATS_LEN 13 +#define VXGE_HW_PORT_STATS_LEN 94 +#define VXGE_HW_VPATH_TX_STATS_LEN 19 +#define VXGE_HW_VPATH_RX_STATS_LEN 42 +#define VXGE_SW_STATS_LEN 60 +#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\ + VXGE_HW_AGGR_STATS_LEN +\ + VXGE_HW_PORT_STATS_LEN +\ + VXGE_HW_VPATH_TX_STATS_LEN +\ + VXGE_HW_VPATH_RX_STATS_LEN) + +#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN) +#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN) + +/* Maximum flicker time of adapter LED */ +#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */ +#define VXGE_FLICKER_ON 1 +#define VXGE_FLICKER_OFF 0 + +#define vxge_add_string(fmt, size, buf, ...) {\ + snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \ + *size += ETH_GSTRING_LEN; \ +} + +#endif /*_VXGE_ETHTOOL_H*/ diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c new file mode 100644 index 00000000000..61ef1611815 --- /dev/null +++ b/drivers/net/vxge/vxge-main.c @@ -0,0 +1,4502 @@ +/****************************************************************************** +* This software may be used and distributed according to the terms of +* the GNU General Public License (GPL), incorporated herein by reference. +* Drivers based on or derived from this code fall under the GPL and must +* retain the authorship, copyright and license notice. This file is not +* a complete program and may only be used when the entire operating +* system is licensed under the GPL. +* See the file COPYING in this distribution for more information. +* +* vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O +* Virtualized Server Adapter. +* Copyright(c) 2002-2009 Neterion Inc. +* +* The module loadable parameters that are supported by the driver and a brief +* explanation of all the variables: +* vlan_tag_strip: +* Strip VLAN Tag enable/disable. Instructs the device to remove +* the VLAN tag from all received tagged frames that are not +* replicated at the internal L2 switch. +* 0 - Do not strip the VLAN tag. +* 1 - Strip the VLAN tag. +* +* addr_learn_en: +* Enable learning the mac address of the guest OS interface in +* a virtualization environment. +* 0 - DISABLE +* 1 - ENABLE +* +* max_config_port: +* Maximum number of port to be supported. +* MIN -1 and MAX - 2 +* +* max_config_vpath: +* This configures the maximum no of VPATH configures for each +* device function. +* MIN - 1 and MAX - 17 +* +* max_config_dev: +* This configures maximum no of Device function to be enabled. +* MIN - 1 and MAX - 17 +* +******************************************************************************/ + +#include <linux/if_vlan.h> +#include <linux/pci.h> +#include <net/ip.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include "vxge-main.h" +#include "vxge-reg.h" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" + "Virtualized Server Adapter"); + +static struct pci_device_id vxge_id_table[] __devinitdata = { + {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, + PCI_ANY_ID}, + {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, + PCI_ANY_ID}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, vxge_id_table); + +VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE); +VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT); +VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT); +VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT); +VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT); +VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV); + +static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] = + {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31}; +static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] = + {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF}; +module_param_array(bw_percentage, uint, NULL, 0); + +static struct vxge_drv_config *driver_config; + +static inline int is_vxge_card_up(struct vxgedev *vdev) +{ + return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); +} + +static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) +{ + unsigned long flags = 0; + struct sk_buff *skb_ptr = NULL; + struct sk_buff **temp, *head, *skb; + + if (spin_trylock_irqsave(&fifo->tx_lock, flags)) { + vxge_hw_vpath_poll_tx(fifo->handle, (void **)&skb_ptr); + spin_unlock_irqrestore(&fifo->tx_lock, flags); + } + /* free SKBs */ + head = skb_ptr; + while (head) { + skb = head; + temp = (struct sk_buff **)&skb->cb; + head = *temp; + *temp = NULL; + dev_kfree_skb_irq(skb); + } +} + +static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) +{ + int i; + + /* Complete all transmits */ + for (i = 0; i < vdev->no_of_vpath; i++) + VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo); +} + +static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) +{ + int i; + struct vxge_ring *ring; + + /* Complete all receives*/ + for (i = 0; i < vdev->no_of_vpath; i++) { + ring = &vdev->vpaths[i].ring; + vxge_hw_vpath_poll_rx(ring->handle); + } +} + +/* + * MultiQ manipulation helper functions + */ +void vxge_stop_all_tx_queue(struct vxgedev *vdev) +{ + int i; + struct net_device *dev = vdev->ndev; + + if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { + for (i = 0; i < vdev->no_of_vpath; i++) + vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP; + } + netif_tx_stop_all_queues(dev); +} + +void vxge_stop_tx_queue(struct vxge_fifo *fifo) +{ + struct net_device *dev = fifo->ndev; + + struct netdev_queue *txq = NULL; + if (fifo->tx_steering_type == TX_MULTIQ_STEERING) + txq = netdev_get_tx_queue(dev, fifo->driver_id); + else { + txq = netdev_get_tx_queue(dev, 0); + fifo->queue_state = VPATH_QUEUE_STOP; + } + + netif_tx_stop_queue(txq); +} + +void vxge_start_all_tx_queue(struct vxgedev *vdev) +{ + int i; + struct net_device *dev = vdev->ndev; + + if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { + for (i = 0; i < vdev->no_of_vpath; i++) + vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START; + } + netif_tx_start_all_queues(dev); +} + +static void vxge_wake_all_tx_queue(struct vxgedev *vdev) +{ + int i; + struct net_device *dev = vdev->ndev; + + if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { + for (i = 0; i < vdev->no_of_vpath; i++) + vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START; + } + netif_tx_wake_all_queues(dev); +} + +void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb) +{ + struct net_device *dev = fifo->ndev; + + int vpath_no = fifo->driver_id; + struct netdev_queue *txq = NULL; + if (fifo->tx_steering_type == TX_MULTIQ_STEERING) { + txq = netdev_get_tx_queue(dev, vpath_no); + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); + } else { + txq = netdev_get_tx_queue(dev, 0); + if (fifo->queue_state == VPATH_QUEUE_STOP) + if (netif_tx_queue_stopped(txq)) { + fifo->queue_state = VPATH_QUEUE_START; + netif_tx_wake_queue(txq); + } + } +} + +/* + * vxge_callback_link_up + * + * This function is called during interrupt context to notify link up state + * change. + */ +void +vxge_callback_link_up(struct __vxge_hw_device *hldev) +{ + struct net_device *dev = hldev->ndev; + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + vdev->ndev->name, __func__, __LINE__); + printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name); + vdev->stats.link_up++; + + netif_carrier_on(vdev->ndev); + vxge_wake_all_tx_queue(vdev); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); +} + +/* + * vxge_callback_link_down + * + * This function is called during interrupt context to notify link down state + * change. + */ +void +vxge_callback_link_down(struct __vxge_hw_device *hldev) +{ + struct net_device *dev = hldev->ndev; + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); + printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); + + vdev->stats.link_down++; + netif_carrier_off(vdev->ndev); + vxge_stop_all_tx_queue(vdev); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); +} + +/* + * vxge_rx_alloc + * + * Allocate SKB. + */ +static struct sk_buff* +vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) +{ + struct net_device *dev; + struct sk_buff *skb; + struct vxge_rx_priv *rx_priv; + + dev = ring->ndev; + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + + rx_priv = vxge_hw_ring_rxd_private_get(dtrh); + + /* try to allocate skb first. this one may fail */ + skb = netdev_alloc_skb(dev, skb_size + + VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); + if (skb == NULL) { + vxge_debug_mem(VXGE_ERR, + "%s: out of memory to allocate SKB", dev->name); + ring->stats.skb_alloc_fail++; + return NULL; + } + + vxge_debug_mem(VXGE_TRACE, + "%s: %s:%d Skb : 0x%p", ring->ndev->name, + __func__, __LINE__, skb); + + skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); + + rx_priv->skb = skb; + rx_priv->data_size = skb_size; + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); + + return skb; +} + +/* + * vxge_rx_map + */ +static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) +{ + struct vxge_rx_priv *rx_priv; + dma_addr_t dma_addr; + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + rx_priv = vxge_hw_ring_rxd_private_get(dtrh); + + dma_addr = pci_map_single(ring->pdev, rx_priv->skb->data, + rx_priv->data_size, PCI_DMA_FROMDEVICE); + + if (dma_addr == 0) { + ring->stats.pci_map_fail++; + return -EIO; + } + vxge_debug_mem(VXGE_TRACE, + "%s: %s:%d 1 buffer mode dma_addr = 0x%llx", + ring->ndev->name, __func__, __LINE__, + (unsigned long long)dma_addr); + vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size); + + rx_priv->data_dma = dma_addr; + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); + + return 0; +} + +/* + * vxge_rx_initial_replenish + * Allocation of RxD as an initial replenish procedure. + */ +static enum vxge_hw_status +vxge_rx_initial_replenish(void *dtrh, void *userdata) +{ + struct vxge_ring *ring = (struct vxge_ring *)userdata; + struct vxge_rx_priv *rx_priv; + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + if (vxge_rx_alloc(dtrh, ring, + VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL) + return VXGE_HW_FAIL; + + if (vxge_rx_map(dtrh, ring)) { + rx_priv = vxge_hw_ring_rxd_private_get(dtrh); + dev_kfree_skb(rx_priv->skb); + + return VXGE_HW_FAIL; + } + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); + + return VXGE_HW_OK; +} + +static inline void +vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, + int pkt_length, struct vxge_hw_ring_rxd_info *ext_info) +{ + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + skb_record_rx_queue(skb, ring->driver_id); + skb->protocol = eth_type_trans(skb, ring->ndev); + + ring->stats.rx_frms++; + ring->stats.rx_bytes += pkt_length; + + if (skb->pkt_type == PACKET_MULTICAST) + ring->stats.rx_mcast++; + + vxge_debug_rx(VXGE_TRACE, + "%s: %s:%d skb protocol = %d", + ring->ndev->name, __func__, __LINE__, skb->protocol); + + if (ring->gro_enable) { + if (ring->vlgrp && ext_info->vlan && + (ring->vlan_tag_strip == + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) + vlan_gro_receive(&ring->napi, ring->vlgrp, + ext_info->vlan, skb); + else + napi_gro_receive(&ring->napi, skb); + } else { + if (ring->vlgrp && vlan && + (ring->vlan_tag_strip == + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) + vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan); + else + netif_receive_skb(skb); + } + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); +} + +static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, + struct vxge_rx_priv *rx_priv) +{ + pci_dma_sync_single_for_device(ring->pdev, + rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); + + vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); + vxge_hw_ring_rxd_pre_post(ring->handle, dtr); +} + +static inline void vxge_post(int *dtr_cnt, void **first_dtr, + void *post_dtr, struct __vxge_hw_ring *ringh) +{ + int dtr_count = *dtr_cnt; + if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) { + if (*first_dtr) + vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr); + *first_dtr = post_dtr; + } else + vxge_hw_ring_rxd_post_post(ringh, post_dtr); + dtr_count++; + *dtr_cnt = dtr_count; +} + +/* + * vxge_rx_1b_compl + * + * If the interrupt is because of a received frame or if the receive ring + * contains fresh as yet un-processed frames, this function is called. + */ +enum vxge_hw_status +vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, + u8 t_code, void *userdata) +{ + struct vxge_ring *ring = (struct vxge_ring *)userdata; + struct net_device *dev = ring->ndev; + unsigned int dma_sizes; + void *first_dtr = NULL; + int dtr_cnt = 0; + int data_size; + dma_addr_t data_dma; + int pkt_length; + struct sk_buff *skb; + struct vxge_rx_priv *rx_priv; + struct vxge_hw_ring_rxd_info ext_info; + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + ring->pkts_processed = 0; + + vxge_hw_ring_replenish(ringh, 0); + + do { + rx_priv = vxge_hw_ring_rxd_private_get(dtr); + skb = rx_priv->skb; + data_size = rx_priv->data_size; + data_dma = rx_priv->data_dma; + + vxge_debug_rx(VXGE_TRACE, + "%s: %s:%d skb = 0x%p", + ring->ndev->name, __func__, __LINE__, skb); + + vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes); + pkt_length = dma_sizes; + + vxge_debug_rx(VXGE_TRACE, + "%s: %s:%d Packet Length = %d", + ring->ndev->name, __func__, __LINE__, pkt_length); + + vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info); + + /* check skb validity */ + vxge_assert(skb); + + prefetch((char *)skb + L1_CACHE_BYTES); + if (unlikely(t_code)) { + + if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != + VXGE_HW_OK) { + + ring->stats.rx_errors++; + vxge_debug_rx(VXGE_TRACE, + "%s: %s :%d Rx T_code is %d", + ring->ndev->name, __func__, + __LINE__, t_code); + + /* If the t_code is not supported and if the + * t_code is other than 0x5 (unparseable packet + * such as unknown UPV6 header), Drop it !!! + */ + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); + ring->stats.rx_dropped++; + continue; + } + } + + if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { + + if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { + + if (!vxge_rx_map(dtr, ring)) { + skb_put(skb, pkt_length); + + pci_unmap_single(ring->pdev, data_dma, + data_size, PCI_DMA_FROMDEVICE); + + vxge_hw_ring_rxd_pre_post(ringh, dtr); + vxge_post(&dtr_cnt, &first_dtr, dtr, + ringh); + } else { + dev_kfree_skb(rx_priv->skb); + rx_priv->skb = skb; + rx_priv->data_size = data_size; + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, + ringh); + ring->stats.rx_dropped++; + break; + } + } else { + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); + ring->stats.rx_dropped++; + break; + } + } else { + struct sk_buff *skb_up; + + skb_up = netdev_alloc_skb(dev, pkt_length + + VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); + if (skb_up != NULL) { + skb_reserve(skb_up, + VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); + + pci_dma_sync_single_for_cpu(ring->pdev, + data_dma, data_size, + PCI_DMA_FROMDEVICE); + + vxge_debug_mem(VXGE_TRACE, + "%s: %s:%d skb_up = %p", + ring->ndev->name, __func__, + __LINE__, skb); + memcpy(skb_up->data, skb->data, pkt_length); + + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, + ringh); + /* will netif_rx small SKB instead */ + skb = skb_up; + skb_put(skb, pkt_length); + } else { + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); + vxge_debug_rx(VXGE_ERR, + "%s: vxge_rx_1b_compl: out of " + "memory", dev->name); + ring->stats.skb_alloc_fail++; + break; + } + } + + if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && + !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && + ring->rx_csum && /* Offload Rx side CSUM */ + ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && + ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + + vxge_rx_complete(ring, skb, ext_info.vlan, + pkt_length, &ext_info); + + ring->budget--; + ring->pkts_processed++; + if (!ring->budget) + break; + + } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr, + &t_code) == VXGE_HW_OK); + + if (first_dtr) + vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); + + dev->last_rx = jiffies; + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", + __func__, __LINE__); + return VXGE_HW_OK; +} + +/* + * vxge_xmit_compl + * + * If an interrupt was raised to indicate DMA complete of the Tx packet, + * this function is called. It identifies the last TxD whose buffer was + * freed and frees all skbs whose data have already DMA'ed into the NICs + * internal memory. + */ +enum vxge_hw_status +vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, + enum vxge_hw_fifo_tcode t_code, void *userdata, + void **skb_ptr) +{ + struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; + struct sk_buff *skb, *head = NULL; + struct sk_buff **temp; + int pkt_cnt = 0; + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Entered....", __func__, __LINE__); + + do { + int frg_cnt; + skb_frag_t *frag; + int i = 0, j; + struct vxge_tx_priv *txd_priv = + vxge_hw_fifo_txdl_private_get(dtr); + + skb = txd_priv->skb; + frg_cnt = skb_shinfo(skb)->nr_frags; + frag = &skb_shinfo(skb)->frags[0]; + + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d fifo_hw = %p dtr = %p " + "tcode = 0x%x", fifo->ndev->name, __func__, + __LINE__, fifo_hw, dtr, t_code); + /* check skb validity */ + vxge_assert(skb); + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d", + fifo->ndev->name, __func__, __LINE__, + skb, txd_priv, frg_cnt); + if (unlikely(t_code)) { + fifo->stats.tx_errors++; + vxge_debug_tx(VXGE_ERR, + "%s: tx: dtr %p completed due to " + "error t_code %01x", fifo->ndev->name, + dtr, t_code); + vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code); + } + + /* for unfragmented skb */ + pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], + skb_headlen(skb), PCI_DMA_TODEVICE); + + for (j = 0; j < frg_cnt; j++) { + pci_unmap_page(fifo->pdev, + txd_priv->dma_buffers[i++], + frag->size, PCI_DMA_TODEVICE); + frag += 1; + } + + vxge_hw_fifo_txdl_free(fifo_hw, dtr); + + /* Updating the statistics block */ + fifo->stats.tx_frms++; + fifo->stats.tx_bytes += skb->len; + + temp = (struct sk_buff **)&skb->cb; + *temp = head; + head = skb; + + pkt_cnt++; + if (pkt_cnt > fifo->indicate_max_pkts) + break; + + } while (vxge_hw_fifo_txdl_next_completed(fifo_hw, + &dtr, &t_code) == VXGE_HW_OK); + + vxge_wake_tx_queue(fifo, skb); + + if (skb_ptr) + *skb_ptr = (void *) head; + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", + fifo->ndev->name, __func__, __LINE__); + return VXGE_HW_OK; +} + +/* select a vpath to trasmit the packet */ +static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, + int *do_lock) +{ + u16 queue_len, counter = 0; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip; + struct tcphdr *th; + + ip = ip_hdr(skb); + + if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) { + th = (struct tcphdr *)(((unsigned char *)ip) + + ip->ihl*4); + + queue_len = vdev->no_of_vpath; + counter = (ntohs(th->source) + + ntohs(th->dest)) & + vdev->vpath_selector[queue_len - 1]; + if (counter >= queue_len) + counter = queue_len - 1; + + if (ip->protocol == IPPROTO_UDP) { +#ifdef NETIF_F_LLTX + *do_lock = 0; +#endif + } + } + } + return counter; +} + +static enum vxge_hw_status vxge_search_mac_addr_in_list( + struct vxge_vpath *vpath, u64 del_mac) +{ + struct list_head *entry, *next; + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) + return TRUE; + } + return FALSE; +} + +static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) +{ + struct macInfo mac_info; + u8 *mac_address = NULL; + u64 mac_addr = 0, vpath_vector = 0; + int vpath_idx = 0; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath = NULL; + struct __vxge_hw_device *hldev; + + hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); + + mac_address = (u8 *)&mac_addr; + memcpy(mac_address, mac_header, ETH_ALEN); + + /* Is this mac address already in the list? */ + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + vpath = &vdev->vpaths[vpath_idx]; + if (vxge_search_mac_addr_in_list(vpath, mac_addr)) + return vpath_idx; + } + + memset(&mac_info, 0, sizeof(struct macInfo)); + memcpy(mac_info.macaddr, mac_header, ETH_ALEN); + + /* Any vpath has room to add mac address to its da table? */ + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + vpath = &vdev->vpaths[vpath_idx]; + if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) { + /* Add this mac address to this vpath */ + mac_info.vpath_no = vpath_idx; + mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; + status = vxge_add_mac_addr(vdev, &mac_info); + if (status != VXGE_HW_OK) + return -EPERM; + return vpath_idx; + } + } + + mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST; + vpath_idx = 0; + mac_info.vpath_no = vpath_idx; + /* Is the first vpath already selected as catch-basin ? */ + vpath = &vdev->vpaths[vpath_idx]; + if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) { + /* Add this mac address to this vpath */ + if (FALSE == vxge_mac_list_add(vpath, &mac_info)) + return -EPERM; + return vpath_idx; + } + + /* Select first vpath as catch-basin */ + vpath_vector = vxge_mBIT(vpath->device_id); + status = vxge_hw_mgmt_reg_write(vpath->vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof( + struct vxge_hw_mrpcim_reg, + rts_mgr_cbasin_cfg), + vpath_vector); + if (status != VXGE_HW_OK) { + vxge_debug_tx(VXGE_ERR, + "%s: Unable to set the vpath-%d in catch-basin mode", + VXGE_DRIVER_NAME, vpath->device_id); + return -EPERM; + } + + if (FALSE == vxge_mac_list_add(vpath, &mac_info)) + return -EPERM; + + return vpath_idx; +} + +/** + * vxge_xmit + * @skb : the socket buffer containing the Tx data. + * @dev : device pointer. + * + * This function is the Tx entry point of the driver. Neterion NIC supports + * certain protocol assist features on Tx side, namely CSO, S/G, LSO. + * NOTE: when device cant queue the pkt, just the trans_start variable will + * not be upadted. +*/ +static int +vxge_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vxge_fifo *fifo = NULL; + void *dtr_priv; + void *dtr = NULL; + struct vxgedev *vdev = NULL; + enum vxge_hw_status status; + int frg_cnt, first_frg_len; + skb_frag_t *frag; + int i = 0, j = 0, avail; + u64 dma_pointer; + struct vxge_tx_priv *txdl_priv = NULL; + struct __vxge_hw_fifo *fifo_hw; + u32 max_mss = 0x0; + int offload_type; + unsigned long flags = 0; + int vpath_no = 0; + int do_spin_tx_lock = 1; + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + dev->name, __func__, __LINE__); + + /* A buffer with no data will be dropped */ + if (unlikely(skb->len <= 0)) { + vxge_debug_tx(VXGE_ERR, + "%s: Buffer has no data..", dev->name); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + vdev = (struct vxgedev *)netdev_priv(dev); + + if (unlikely(!is_vxge_card_up(vdev))) { + vxge_debug_tx(VXGE_ERR, + "%s: vdev not initialized", dev->name); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (vdev->config.addr_learn_en) { + vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN); + if (vpath_no == -EPERM) { + vxge_debug_tx(VXGE_ERR, + "%s: Failed to store the mac address", + dev->name); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + } + + if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) + vpath_no = skb_get_queue_mapping(skb); + else if (vdev->config.tx_steering_type == TX_PORT_STEERING) + vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock); + + vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); + + if (vpath_no >= vdev->no_of_vpath) + vpath_no = 0; + + fifo = &vdev->vpaths[vpath_no].fifo; + fifo_hw = fifo->handle; + + if (do_spin_tx_lock) + spin_lock_irqsave(&fifo->tx_lock, flags); + else { + if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) + return NETDEV_TX_LOCKED; + } + + if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) { + if (netif_subqueue_stopped(dev, skb)) { + spin_unlock_irqrestore(&fifo->tx_lock, flags); + return NETDEV_TX_BUSY; + } + } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) { + if (netif_queue_stopped(dev)) { + spin_unlock_irqrestore(&fifo->tx_lock, flags); + return NETDEV_TX_BUSY; + } + } + avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); + if (avail == 0) { + vxge_debug_tx(VXGE_ERR, + "%s: No free TXDs available", dev->name); + fifo->stats.txd_not_free++; + vxge_stop_tx_queue(fifo); + goto _exit2; + } + + status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); + if (unlikely(status != VXGE_HW_OK)) { + vxge_debug_tx(VXGE_ERR, + "%s: Out of descriptors .", dev->name); + fifo->stats.txd_out_of_desc++; + vxge_stop_tx_queue(fifo); + goto _exit2; + } + + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p", + dev->name, __func__, __LINE__, + fifo_hw, dtr, dtr_priv); + + if (vdev->vlgrp && vlan_tx_tag_present(skb)) { + u16 vlan_tag = vlan_tx_tag_get(skb); + vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); + } + + first_frg_len = skb_headlen(skb); + + dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, + PCI_DMA_TODEVICE); + + if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { + vxge_hw_fifo_txdl_free(fifo_hw, dtr); + vxge_stop_tx_queue(fifo); + fifo->stats.pci_map_fail++; + goto _exit2; + } + + txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); + txdl_priv->skb = skb; + txdl_priv->dma_buffers[j] = dma_pointer; + + frg_cnt = skb_shinfo(skb)->nr_frags; + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d skb = %p txdl_priv = %p " + "frag_cnt = %d dma_pointer = 0x%llx", dev->name, + __func__, __LINE__, skb, txdl_priv, + frg_cnt, (unsigned long long)dma_pointer); + + vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, + first_frg_len); + + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < frg_cnt; i++) { + /* ignore 0 length fragment */ + if (!frag->size) + continue; + + dma_pointer = + (u64)pci_map_page(fifo->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) + goto _exit0; + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d frag = %d dma_pointer = 0x%llx", + dev->name, __func__, __LINE__, i, + (unsigned long long)dma_pointer); + + txdl_priv->dma_buffers[j] = dma_pointer; + vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, + frag->size); + frag += 1; + } + + offload_type = vxge_offload_type(skb); + + if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + + int mss = vxge_tcp_mss(skb); + if (mss) { + max_mss = dev->mtu + ETH_HLEN - + VXGE_HW_TCPIP_HEADER_MAX_SIZE; + if (mss > max_mss) + mss = max_mss; + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d mss = %d", + dev->name, __func__, __LINE__, mss); + vxge_hw_fifo_txdl_mss_set(dtr, mss); + } else { + vxge_assert(skb->len <= + dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE); + vxge_assert(0); + goto _exit1; + } + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + vxge_hw_fifo_txdl_cksum_set_bits(dtr, + VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN | + VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN | + VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); + + vxge_hw_fifo_txdl_post(fifo_hw, dtr); + dev->trans_start = jiffies; + spin_unlock_irqrestore(&fifo->tx_lock, flags); + + VXGE_COMPLETE_VPATH_TX(fifo); + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", + dev->name, __func__, __LINE__); + return 0; + +_exit0: + vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); + +_exit1: + j = 0; + frag = &skb_shinfo(skb)->frags[0]; + + pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], + skb_headlen(skb), PCI_DMA_TODEVICE); + + for (; j < i; j++) { + pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], + frag->size, PCI_DMA_TODEVICE); + frag += 1; + } + + vxge_hw_fifo_txdl_free(fifo_hw, dtr); +_exit2: + dev_kfree_skb(skb); + spin_unlock_irqrestore(&fifo->tx_lock, flags); + VXGE_COMPLETE_VPATH_TX(fifo); + + return 0; +} + +/* + * vxge_rx_term + * + * Function will be called by hw function to abort all outstanding receive + * descriptors. + */ +static void +vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata) +{ + struct vxge_ring *ring = (struct vxge_ring *)userdata; + struct vxge_rx_priv *rx_priv = + vxge_hw_ring_rxd_private_get(dtrh); + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + if (state != VXGE_HW_RXD_STATE_POSTED) + return; + + pci_unmap_single(ring->pdev, rx_priv->data_dma, + rx_priv->data_size, PCI_DMA_FROMDEVICE); + + dev_kfree_skb(rx_priv->skb); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", + ring->ndev->name, __func__, __LINE__); +} + +/* + * vxge_tx_term + * + * Function will be called to abort all outstanding tx descriptors + */ +static void +vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) +{ + struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; + skb_frag_t *frag; + int i = 0, j, frg_cnt; + struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh); + struct sk_buff *skb = txd_priv->skb; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + if (state != VXGE_HW_TXDL_STATE_POSTED) + return; + + /* check skb validity */ + vxge_assert(skb); + frg_cnt = skb_shinfo(skb)->nr_frags; + frag = &skb_shinfo(skb)->frags[0]; + + /* for unfragmented skb */ + pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], + skb_headlen(skb), PCI_DMA_TODEVICE); + + for (j = 0; j < frg_cnt; j++) { + pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], + frag->size, PCI_DMA_TODEVICE); + frag += 1; + } + + dev_kfree_skb(skb); + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); +} + +/** + * vxge_set_multicast + * @dev: pointer to the device structure + * + * Entry point for multicast address enable/disable + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. This also gets + * called to set/reset promiscuous mode. Depending on the deivce flag, we + * determine, if multicast address must be enabled or if promiscuous mode + * is to be disabled etc. + */ +static void vxge_set_multicast(struct net_device *dev) +{ + struct dev_mc_list *mclist; + struct vxgedev *vdev; + int i, mcast_cnt = 0; + struct __vxge_hw_device *hldev; + enum vxge_hw_status status = VXGE_HW_OK; + struct macInfo mac_info; + int vpath_idx = 0; + struct vxge_mac_addrs *mac_entry; + struct list_head *list_head; + struct list_head *entry, *next; + u8 *mac_address = NULL; + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d", __func__, __LINE__); + + vdev = (struct vxgedev *)netdev_priv(dev); + hldev = (struct __vxge_hw_device *)vdev->devh; + + if (unlikely(!is_vxge_card_up(vdev))) + return; + + if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_assert(vdev->vpaths[i].is_open); + status = vxge_hw_vpath_mcast_enable( + vdev->vpaths[i].handle); + vdev->all_multi_flg = 1; + } + } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_assert(vdev->vpaths[i].is_open); + status = vxge_hw_vpath_mcast_disable( + vdev->vpaths[i].handle); + vdev->all_multi_flg = 1; + } + } + + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, + "failed to %s multicast, status %d", + dev->flags & IFF_ALLMULTI ? + "enable" : "disable", status); + + if (!vdev->config.addr_learn_en) { + if (dev->flags & IFF_PROMISC) { + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_assert(vdev->vpaths[i].is_open); + status = vxge_hw_vpath_promisc_enable( + vdev->vpaths[i].handle); + } + } else { + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_assert(vdev->vpaths[i].is_open); + status = vxge_hw_vpath_promisc_disable( + vdev->vpaths[i].handle); + } + } + } + + memset(&mac_info, 0, sizeof(struct macInfo)); + /* Update individual M_CAST address list */ + if ((!vdev->all_multi_flg) && dev->mc_count) { + + mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; + list_head = &vdev->vpaths[0].mac_addr_list; + if ((dev->mc_count + + (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) > + vdev->vpaths[0].max_mac_addr_cnt) + goto _set_all_mcast; + + /* Delete previous MC's */ + for (i = 0; i < mcast_cnt; i++) { + if (!list_empty(list_head)) + mac_entry = (struct vxge_mac_addrs *) + list_first_entry(list_head, + struct vxge_mac_addrs, + item); + + list_for_each_safe(entry, next, list_head) { + + mac_entry = (struct vxge_mac_addrs *) entry; + /* Copy the mac address to delete */ + mac_address = (u8 *)&mac_entry->macaddr; + memcpy(mac_info.macaddr, mac_address, ETH_ALEN); + + /* Is this a multicast address */ + if (0x01 & mac_info.macaddr[0]) { + for (vpath_idx = 0; vpath_idx < + vdev->no_of_vpath; + vpath_idx++) { + mac_info.vpath_no = vpath_idx; + status = vxge_del_mac_addr( + vdev, + &mac_info); + } + } + } + } + + /* Add new ones */ + for (i = 0, mclist = dev->mc_list; i < dev->mc_count; + i++, mclist = mclist->next) { + + memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; + vpath_idx++) { + mac_info.vpath_no = vpath_idx; + mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; + status = vxge_add_mac_addr(vdev, &mac_info); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s:%d Setting individual" + "multicast address failed", + __func__, __LINE__); + goto _set_all_mcast; + } + } + } + + return; +_set_all_mcast: + mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; + /* Delete previous MC's */ + for (i = 0; i < mcast_cnt; i++) { + + list_for_each_safe(entry, next, list_head) { + + mac_entry = (struct vxge_mac_addrs *) entry; + /* Copy the mac address to delete */ + mac_address = (u8 *)&mac_entry->macaddr; + memcpy(mac_info.macaddr, mac_address, ETH_ALEN); + + /* Is this a multicast address */ + if (0x01 & mac_info.macaddr[0]) + break; + } + + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; + vpath_idx++) { + mac_info.vpath_no = vpath_idx; + status = vxge_del_mac_addr(vdev, &mac_info); + } + } + + /* Enable all multicast */ + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_assert(vdev->vpaths[i].is_open); + status = vxge_hw_vpath_mcast_enable( + vdev->vpaths[i].handle); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s:%d Enabling all multicasts failed", + __func__, __LINE__); + } + vdev->all_multi_flg = 1; + } + dev->flags |= IFF_ALLMULTI; + } + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); +} + +/** + * vxge_set_mac_addr + * @dev: pointer to the device structure + * + * Update entry "0" (default MAC addr) + */ +static int vxge_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct vxgedev *vdev; + struct __vxge_hw_device *hldev; + enum vxge_hw_status status = VXGE_HW_OK; + struct macInfo mac_info_new, mac_info_old; + int vpath_idx = 0; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + vdev = (struct vxgedev *)netdev_priv(dev); + hldev = vdev->devh; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memset(&mac_info_new, 0, sizeof(struct macInfo)); + memset(&mac_info_old, 0, sizeof(struct macInfo)); + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", + __func__, __LINE__); + + /* Get the old address */ + memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len); + + /* Copy the new address */ + memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len); + + /* First delete the old mac address from all the vpaths + as we can't specify the index while adding new mac address */ + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx]; + if (!vpath->is_open) { + /* This can happen when this interface is added/removed + to the bonding interface. Delete this station address + from the linked list */ + vxge_mac_list_del(vpath, &mac_info_old); + + /* Add this new address to the linked list + for later restoring */ + vxge_mac_list_add(vpath, &mac_info_new); + + continue; + } + /* Delete the station address */ + mac_info_old.vpath_no = vpath_idx; + status = vxge_del_mac_addr(vdev, &mac_info_old); + } + + if (unlikely(!is_vxge_card_up(vdev))) { + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return VXGE_HW_OK; + } + + /* Set this mac address to all the vpaths */ + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + mac_info_new.vpath_no = vpath_idx; + mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; + status = vxge_add_mac_addr(vdev, &mac_info_new); + if (status != VXGE_HW_OK) + return -EINVAL; + } + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + return status; +} + +/* + * vxge_vpath_intr_enable + * @vdev: pointer to vdev + * @vp_id: vpath for which to enable the interrupts + * + * Enables the interrupts for the vpath +*/ +void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) +{ + struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; + int msix_id, alarm_msix_id; + int tim_msix_id[4] = {[0 ...3] = 0}; + + vxge_hw_vpath_intr_enable(vpath->handle); + + if (vdev->config.intr_type == INTA) + vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); + else { + msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; + alarm_msix_id = + VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; + + tim_msix_id[0] = msix_id; + tim_msix_id[1] = msix_id + 1; + vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, + alarm_msix_id); + + vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); + vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); + + /* enable the alarm vector */ + vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id); + } +} + +/* + * vxge_vpath_intr_disable + * @vdev: pointer to vdev + * @vp_id: vpath for which to disable the interrupts + * + * Disables the interrupts for the vpath +*/ +void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) +{ + struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; + int msix_id; + + vxge_hw_vpath_intr_disable(vpath->handle); + + if (vdev->config.intr_type == INTA) + vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); + else { + msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; + vxge_hw_vpath_msix_mask(vpath->handle, msix_id); + vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); + + /* disable the alarm vector */ + msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; + vxge_hw_vpath_msix_mask(vpath->handle, msix_id); + } +} + +/* + * vxge_reset_vpath + * @vdev: pointer to vdev + * @vp_id: vpath to reset + * + * Resets the vpath +*/ +static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) +{ + enum vxge_hw_status status = VXGE_HW_OK; + int ret = 0; + + /* check if device is down already */ + if (unlikely(!is_vxge_card_up(vdev))) + return 0; + + /* is device reset already scheduled */ + if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) + return 0; + + if (vdev->vpaths[vp_id].handle) { + if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle) + == VXGE_HW_OK) { + if (is_vxge_card_up(vdev) && + vxge_hw_vpath_recover_from_reset( + vdev->vpaths[vp_id].handle) + != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_recover_from_reset" + "failed for vpath:%d", vp_id); + return status; + } + } else { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_reset failed for" + "vpath:%d", vp_id); + return status; + } + } else + return VXGE_HW_FAIL; + + vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); + vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); + + /* Enable all broadcast */ + vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle); + + /* Enable the interrupts */ + vxge_vpath_intr_enable(vdev, vp_id); + + smp_wmb(); + + /* Enable the flow of traffic through the vpath */ + vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle); + + smp_wmb(); + vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle); + vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK; + + /* Vpath reset done */ + clear_bit(vp_id, &vdev->vp_reset); + + /* Start the vpath queue */ + vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL); + + return ret; +} + +static int do_vxge_reset(struct vxgedev *vdev, int event) +{ + enum vxge_hw_status status; + int ret = 0, vp_id, i; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) { + /* check if device is down already */ + if (unlikely(!is_vxge_card_up(vdev))) + return 0; + + /* is reset already scheduled */ + if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) + return 0; + } + + if (event == VXGE_LL_FULL_RESET) { + /* wait for all the vpath reset to complete */ + for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { + while (test_bit(vp_id, &vdev->vp_reset)) + msleep(50); + } + + /* if execution mode is set to debug, don't reset the adapter */ + if (unlikely(vdev->exec_mode)) { + vxge_debug_init(VXGE_ERR, + "%s: execution mode is debug, returning..", + vdev->ndev->name); + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + vxge_stop_all_tx_queue(vdev); + return 0; + } + } + + if (event == VXGE_LL_FULL_RESET) { + vxge_hw_device_intr_disable(vdev->devh); + + switch (vdev->cric_err_event) { + case VXGE_HW_EVENT_UNKNOWN: + vxge_stop_all_tx_queue(vdev); + vxge_debug_init(VXGE_ERR, + "fatal: %s: Disabling device due to" + "unknown error", + vdev->ndev->name); + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_RESET_START: + break; + case VXGE_HW_EVENT_RESET_COMPLETE: + case VXGE_HW_EVENT_LINK_DOWN: + case VXGE_HW_EVENT_LINK_UP: + case VXGE_HW_EVENT_ALARM_CLEARED: + case VXGE_HW_EVENT_ECCERR: + case VXGE_HW_EVENT_MRPCIM_ECCERR: + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_FIFO_ERR: + case VXGE_HW_EVENT_VPATH_ERR: + break; + case VXGE_HW_EVENT_CRITICAL_ERR: + vxge_stop_all_tx_queue(vdev); + vxge_debug_init(VXGE_ERR, + "fatal: %s: Disabling device due to" + "serious error", + vdev->ndev->name); + /* SOP or device reset required */ + /* This event is not currently used */ + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_SERR: + vxge_stop_all_tx_queue(vdev); + vxge_debug_init(VXGE_ERR, + "fatal: %s: Disabling device due to" + "serious error", + vdev->ndev->name); + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_SRPCIM_SERR: + case VXGE_HW_EVENT_MRPCIM_SERR: + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_SLOT_FREEZE: + vxge_stop_all_tx_queue(vdev); + vxge_debug_init(VXGE_ERR, + "fatal: %s: Disabling device due to" + "slot freeze", + vdev->ndev->name); + ret = -EPERM; + goto out; + default: + break; + + } + } + + if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) + vxge_stop_all_tx_queue(vdev); + + if (event == VXGE_LL_FULL_RESET) { + status = vxge_reset_all_vpaths(vdev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "fatal: %s: can not reset vpaths", + vdev->ndev->name); + ret = -EPERM; + goto out; + } + } + + if (event == VXGE_LL_COMPL_RESET) { + for (i = 0; i < vdev->no_of_vpath; i++) + if (vdev->vpaths[i].handle) { + if (vxge_hw_vpath_recover_from_reset( + vdev->vpaths[i].handle) + != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_recover_" + "from_reset failed for vpath: " + "%d", i); + ret = -EPERM; + goto out; + } + } else { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_reset failed for " + "vpath:%d", i); + ret = -EPERM; + goto out; + } + } + + if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) { + /* Reprogram the DA table with populated mac addresses */ + for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { + vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); + vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); + } + + /* enable vpath interrupts */ + for (i = 0; i < vdev->no_of_vpath; i++) + vxge_vpath_intr_enable(vdev, i); + + vxge_hw_device_intr_enable(vdev->devh); + + smp_wmb(); + + /* Indicate card up */ + set_bit(__VXGE_STATE_CARD_UP, &vdev->state); + + /* Get the traffic to flow through the vpaths */ + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_hw_vpath_enable(vdev->vpaths[i].handle); + smp_wmb(); + vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); + } + + vxge_wake_all_tx_queue(vdev); + } + +out: + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); + + /* Indicate reset done */ + if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) + clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); + return ret; +} + +/* + * vxge_reset + * @vdev: pointer to ll device + * + * driver may reset the chip on events of serr, eccerr, etc + */ +int vxge_reset(struct vxgedev *vdev) +{ + do_vxge_reset(vdev, VXGE_LL_FULL_RESET); + return 0; +} + +/** + * vxge_poll - Receive handler when Receive Polling is used. + * @dev: pointer to the device structure. + * @budget: Number of packets budgeted to be processed in this iteration. + * + * This function comes into picture only if Receive side is being handled + * through polling (called NAPI in linux). It mostly does what the normal + * Rx interrupt handler does in terms of descriptor and packet processing + * but not in an interrupt context. Also it will process a specified number + * of packets at most in one iteration. This value is passed down by the + * kernel as the function argument 'budget'. + */ +static int vxge_poll_msix(struct napi_struct *napi, int budget) +{ + struct vxge_ring *ring = + container_of(napi, struct vxge_ring, napi); + int budget_org = budget; + ring->budget = budget; + + vxge_hw_vpath_poll_rx(ring->handle); + + if (ring->pkts_processed < budget_org) { + napi_complete(napi); + /* Re enable the Rx interrupts for the vpath */ + vxge_hw_channel_msix_unmask( + (struct __vxge_hw_channel *)ring->handle, + ring->rx_vector_no); + } + + return ring->pkts_processed; +} + +static int vxge_poll_inta(struct napi_struct *napi, int budget) +{ + struct vxgedev *vdev = container_of(napi, struct vxgedev, napi); + int pkts_processed = 0; + int i; + int budget_org = budget; + struct vxge_ring *ring; + + struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) + pci_get_drvdata(vdev->pdev); + + for (i = 0; i < vdev->no_of_vpath; i++) { + ring = &vdev->vpaths[i].ring; + ring->budget = budget; + vxge_hw_vpath_poll_rx(ring->handle); + pkts_processed += ring->pkts_processed; + budget -= ring->pkts_processed; + if (budget <= 0) + break; + } + + VXGE_COMPLETE_ALL_TX(vdev); + + if (pkts_processed < budget_org) { + napi_complete(napi); + /* Re enable the Rx interrupts for the ring */ + vxge_hw_device_unmask_all(hldev); + vxge_hw_device_flush_io(hldev); + } + + return pkts_processed; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * vxge_netpoll - netpoll event handler entry point + * @dev : pointer to the device structure. + * Description: + * This function will be called by upper layer to check for events on the + * interface in situations where interrupts are disabled. It is used for + * specific in-kernel networking tasks, such as remote consoles and kernel + * debugging over the network (example netdump in RedHat). + */ +static void vxge_netpoll(struct net_device *dev) +{ + struct __vxge_hw_device *hldev; + struct vxgedev *vdev; + + vdev = (struct vxgedev *)netdev_priv(dev); + hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + if (pci_channel_offline(vdev->pdev)) + return; + + disable_irq(dev->irq); + vxge_hw_device_clear_tx_rx(hldev); + + vxge_hw_device_clear_tx_rx(hldev); + VXGE_COMPLETE_ALL_RX(vdev); + VXGE_COMPLETE_ALL_TX(vdev); + + enable_irq(dev->irq); + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); + return; +} +#endif + +/* RTH configuration */ +static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_rth_hash_types hash_types; + u8 itable[256] = {0}; /* indirection table */ + u8 mtable[256] = {0}; /* CPU to vpath mapping */ + int index; + + /* + * Filling + * - itable with bucket numbers + * - mtable with bucket-to-vpath mapping + */ + for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) { + itable[index] = index; + mtable[index] = index % vdev->no_of_vpath; + } + + /* Fill RTH hash types */ + hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; + hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; + hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; + hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; + hash_types.hash_type_tcpipv6ex_en = + vdev->config.rth_hash_type_tcpipv6ex; + hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; + + /* set indirection table, bucket-to-vpath mapping */ + status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, + vdev->no_of_vpath, + mtable, itable, + vdev->config.rth_bkt_sz); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "RTH indirection table configuration failed " + "for vpath:%d", vdev->vpaths[0].device_id); + return status; + } + + /* + * Because the itable_set() method uses the active_table field + * for the target virtual path the RTH config should be updated + * for all VPATHs. The h/w only uses the lowest numbered VPATH + * when steering frames. + */ + for (index = 0; index < vdev->no_of_vpath; index++) { + status = vxge_hw_vpath_rts_rth_set( + vdev->vpaths[index].handle, + vdev->config.rth_algorithm, + &hash_types, + vdev->config.rth_bkt_sz); + + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "RTH configuration failed for vpath:%d", + vdev->vpaths[index].device_id); + return status; + } + } + + return status; +} + +int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) +{ + struct vxge_mac_addrs *new_mac_entry; + u8 *mac_address = NULL; + + if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) + return TRUE; + + new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); + if (!new_mac_entry) { + vxge_debug_mem(VXGE_ERR, + "%s: memory allocation failed", + VXGE_DRIVER_NAME); + return FALSE; + } + + list_add(&new_mac_entry->item, &vpath->mac_addr_list); + + /* Copy the new mac address to the list */ + mac_address = (u8 *)&new_mac_entry->macaddr; + memcpy(mac_address, mac->macaddr, ETH_ALEN); + + new_mac_entry->state = mac->state; + vpath->mac_addr_cnt++; + + /* Is this a multicast address */ + if (0x01 & mac->macaddr[0]) + vpath->mcast_addr_cnt++; + + return TRUE; +} + +/* Add a mac address to DA table */ +enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; + + if (0x01 & mac->macaddr[0]) /* multicast address */ + duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; + else + duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; + + vpath = &vdev->vpaths[mac->vpath_no]; + status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, + mac->macmask, duplicate_mode); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA config add entry failed for vpath:%d", + vpath->device_id); + } else + if (FALSE == vxge_mac_list_add(vpath, mac)) + status = -EPERM; + + return status; +} + +int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) +{ + struct list_head *entry, *next; + u64 del_mac = 0; + u8 *mac_address = (u8 *) (&del_mac); + + /* Copy the mac address to delete from the list */ + memcpy(mac_address, mac->macaddr, ETH_ALEN); + + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { + list_del(entry); + kfree((struct vxge_mac_addrs *)entry); + vpath->mac_addr_cnt--; + + /* Is this a multicast address */ + if (0x01 & mac->macaddr[0]) + vpath->mcast_addr_cnt--; + return TRUE; + } + } + + return FALSE; +} +/* delete a mac address from DA table */ +enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + + vpath = &vdev->vpaths[mac->vpath_no]; + status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, + mac->macmask); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA config delete entry failed for vpath:%d", + vpath->device_id); + } else + vxge_mac_list_del(vpath, mac); + return status; +} + +/* list all mac addresses from DA table */ +enum vxge_hw_status +static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, + struct macInfo *mac) +{ + enum vxge_hw_status status = VXGE_HW_OK; + unsigned char macmask[ETH_ALEN]; + unsigned char macaddr[ETH_ALEN]; + + status = vxge_hw_vpath_mac_addr_get(vpath->handle, + macaddr, macmask); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA config list entry failed for vpath:%d", + vpath->device_id); + return status; + } + + while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { + + status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, + macaddr, macmask); + if (status != VXGE_HW_OK) + break; + } + + return status; +} + +/* Store all vlan ids from the list to the vid table */ +enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxgedev *vdev = vpath->vdev; + u16 vid; + + if (vdev->vlgrp && vpath->is_open) { + + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + if (!vlan_group_get_device(vdev->vlgrp, vid)) + continue; + /* Add these vlan to the vid table */ + status = vxge_hw_vpath_vid_add(vpath->handle, vid); + } + } + + return status; +} + +/* Store all mac addresses from the list to the DA table */ +enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct macInfo mac_info; + u8 *mac_address = NULL; + struct list_head *entry, *next; + + memset(&mac_info, 0, sizeof(struct macInfo)); + + if (vpath->is_open) { + + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + mac_address = + (u8 *)& + ((struct vxge_mac_addrs *)entry)->macaddr; + memcpy(mac_info.macaddr, mac_address, ETH_ALEN); + ((struct vxge_mac_addrs *)entry)->state = + VXGE_LL_MAC_ADDR_IN_DA_TABLE; + /* does this mac address already exist in da table? */ + status = vxge_search_mac_addr_in_da_table(vpath, + &mac_info); + if (status != VXGE_HW_OK) { + /* Add this mac address to the DA table */ + status = vxge_hw_vpath_mac_addr_add( + vpath->handle, mac_info.macaddr, + mac_info.macmask, + VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA add entry failed for vpath:%d", + vpath->device_id); + ((struct vxge_mac_addrs *)entry)->state + = VXGE_LL_MAC_ADDR_IN_LIST; + } + } + } + } + + return status; +} + +/* reset vpaths */ +enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) +{ + int i; + enum vxge_hw_status status = VXGE_HW_OK; + + for (i = 0; i < vdev->no_of_vpath; i++) + if (vdev->vpaths[i].handle) { + if (vxge_hw_vpath_reset(vdev->vpaths[i].handle) + == VXGE_HW_OK) { + if (is_vxge_card_up(vdev) && + vxge_hw_vpath_recover_from_reset( + vdev->vpaths[i].handle) + != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_recover_" + "from_reset failed for vpath: " + "%d", i); + return status; + } + } else { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_reset failed for " + "vpath:%d", i); + return status; + } + } + return status; +} + +/* close vpaths */ +void vxge_close_vpaths(struct vxgedev *vdev, int index) +{ + int i; + for (i = index; i < vdev->no_of_vpath; i++) { + if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) { + vxge_hw_vpath_close(vdev->vpaths[i].handle); + vdev->stats.vpaths_open--; + } + vdev->vpaths[i].is_open = 0; + vdev->vpaths[i].handle = NULL; + } +} + +/* open vpaths */ +int vxge_open_vpaths(struct vxgedev *vdev) +{ + enum vxge_hw_status status; + int i; + u32 vp_id = 0; + struct vxge_hw_vpath_attr attr; + + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_assert(vdev->vpaths[i].is_configured); + attr.vp_id = vdev->vpaths[i].device_id; + attr.fifo_attr.callback = vxge_xmit_compl; + attr.fifo_attr.txdl_term = vxge_tx_term; + attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); + attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo; + + attr.ring_attr.callback = vxge_rx_1b_compl; + attr.ring_attr.rxd_init = vxge_rx_initial_replenish; + attr.ring_attr.rxd_term = vxge_rx_term; + attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); + attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring; + + vdev->vpaths[i].ring.ndev = vdev->ndev; + vdev->vpaths[i].ring.pdev = vdev->pdev; + status = vxge_hw_vpath_open(vdev->devh, &attr, + &(vdev->vpaths[i].handle)); + if (status == VXGE_HW_OK) { + vdev->vpaths[i].fifo.handle = + (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; + vdev->vpaths[i].ring.handle = + (struct __vxge_hw_ring *)attr.ring_attr.userdata; + vdev->vpaths[i].fifo.tx_steering_type = + vdev->config.tx_steering_type; + vdev->vpaths[i].fifo.ndev = vdev->ndev; + vdev->vpaths[i].fifo.pdev = vdev->pdev; + vdev->vpaths[i].fifo.indicate_max_pkts = + vdev->config.fifo_indicate_max_pkts; + vdev->vpaths[i].ring.rx_vector_no = 0; + vdev->vpaths[i].ring.rx_csum = vdev->rx_csum; + vdev->vpaths[i].is_open = 1; + vdev->vp_handles[i] = vdev->vpaths[i].handle; + vdev->vpaths[i].ring.gro_enable = + vdev->config.gro_enable; + vdev->vpaths[i].ring.vlan_tag_strip = + vdev->vlan_tag_strip; + vdev->stats.vpaths_open++; + } else { + vdev->stats.vpath_open_fail++; + vxge_debug_init(VXGE_ERR, + "%s: vpath: %d failed to open " + "with status: %d", + vdev->ndev->name, vdev->vpaths[i].device_id, + status); + vxge_close_vpaths(vdev, 0); + return -EPERM; + } + + vp_id = + ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)-> + vpath->vp_id; + vdev->vpaths_deployed |= vxge_mBIT(vp_id); + } + return VXGE_HW_OK; +} + +/* + * vxge_isr_napi + * @irq: the irq of the device. + * @dev_id: a void pointer to the hldev structure of the Titan device + * @ptregs: pointer to the registers pushed on the stack. + * + * This function is the ISR handler of the device when napi is enabled. It + * identifies the reason for the interrupt and calls the relevant service + * routines. + */ +static irqreturn_t vxge_isr_napi(int irq, void *dev_id) +{ + struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)dev_id; + struct vxgedev *vdev; + struct net_device *dev; + u64 reason; + enum vxge_hw_status status; + + vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + dev = hldev->ndev; + vdev = netdev_priv(dev); + + if (pci_channel_offline(vdev->pdev)) + return IRQ_NONE; + + if (unlikely(!is_vxge_card_up(vdev))) + return IRQ_NONE; + + status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, + &reason); + if (status == VXGE_HW_OK) { + vxge_hw_device_mask_all(hldev); + + if (reason & + VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT( + vdev->vpaths_deployed >> + (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) { + + vxge_hw_device_clear_tx_rx(hldev); + napi_schedule(&vdev->napi); + vxge_debug_intr(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); + return IRQ_HANDLED; + } else + vxge_hw_device_unmask_all(hldev); + } else if (unlikely((status == VXGE_HW_ERR_VPATH) || + (status == VXGE_HW_ERR_CRITICAL) || + (status == VXGE_HW_ERR_FIFO))) { + vxge_hw_device_mask_all(hldev); + vxge_hw_device_flush_io(hldev); + return IRQ_HANDLED; + } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE)) + return IRQ_HANDLED; + + vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); + return IRQ_NONE; +} + +#ifdef CONFIG_PCI_MSI + +static irqreturn_t +vxge_tx_msix_handle(int irq, void *dev_id) +{ + struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; + + VXGE_COMPLETE_VPATH_TX(fifo); + + return IRQ_HANDLED; +} + +static irqreturn_t +vxge_rx_msix_napi_handle(int irq, void *dev_id) +{ + struct vxge_ring *ring = (struct vxge_ring *)dev_id; + + /* MSIX_IDX for Rx is 1 */ + vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, + ring->rx_vector_no); + + napi_schedule(&ring->napi); + return IRQ_HANDLED; +} + +static irqreturn_t +vxge_alarm_msix_handle(int irq, void *dev_id) +{ + int i; + enum vxge_hw_status status; + struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; + struct vxgedev *vdev = vpath->vdev; + int alarm_msix_id = + VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; + + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, + alarm_msix_id); + + status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, + vdev->exec_mode); + if (status == VXGE_HW_OK) { + + vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, + alarm_msix_id); + continue; + } + vxge_debug_intr(VXGE_ERR, + "%s: vxge_hw_vpath_alarm_process failed %x ", + VXGE_DRIVER_NAME, status); + } + return IRQ_HANDLED; +} + +static int vxge_alloc_msix(struct vxgedev *vdev) +{ + int j, i, ret = 0; + int intr_cnt = 0; + int alarm_msix_id = 0, msix_intr_vect = 0; + vdev->intr_cnt = 0; + + /* Tx/Rx MSIX Vectors count */ + vdev->intr_cnt = vdev->no_of_vpath * 2; + + /* Alarm MSIX Vectors count */ + vdev->intr_cnt++; + + intr_cnt = (vdev->max_vpath_supported * 2) + 1; + vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry), + GFP_KERNEL); + if (!vdev->entries) { + vxge_debug_init(VXGE_ERR, + "%s: memory allocation failed", + VXGE_DRIVER_NAME); + return -ENOMEM; + } + + vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry), + GFP_KERNEL); + if (!vdev->vxge_entries) { + vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", + VXGE_DRIVER_NAME); + kfree(vdev->entries); + return -ENOMEM; + } + + /* Last vector in the list is used for alarm */ + alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; + for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) { + + msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; + + /* Initialize the fifo vector */ + vdev->entries[j].entry = msix_intr_vect; + vdev->vxge_entries[j].entry = msix_intr_vect; + vdev->vxge_entries[j].in_use = 0; + j++; + + /* Initialize the ring vector */ + vdev->entries[j].entry = msix_intr_vect + 1; + vdev->vxge_entries[j].entry = msix_intr_vect + 1; + vdev->vxge_entries[j].in_use = 0; + j++; + } + + /* Initialize the alarm vector */ + vdev->entries[j].entry = alarm_msix_id; + vdev->vxge_entries[j].entry = alarm_msix_id; + vdev->vxge_entries[j].in_use = 0; + + ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); + /* if driver request exceeeds available irq's, request with a small + * number. + */ + if (ret > 0) { + vxge_debug_init(VXGE_ERR, + "%s: MSI-X enable failed for %d vectors, available: %d", + VXGE_DRIVER_NAME, intr_cnt, ret); + vdev->max_vpath_supported = vdev->no_of_vpath; + intr_cnt = (vdev->max_vpath_supported * 2) + 1; + + /* Reset the alarm vector setting */ + vdev->entries[j].entry = 0; + vdev->vxge_entries[j].entry = 0; + + /* Initialize the alarm vector with new setting */ + vdev->entries[intr_cnt - 1].entry = alarm_msix_id; + vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id; + vdev->vxge_entries[intr_cnt - 1].in_use = 0; + + ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); + if (!ret) + vxge_debug_init(VXGE_ERR, + "%s: MSI-X enabled for %d vectors", + VXGE_DRIVER_NAME, intr_cnt); + } + + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s: MSI-X enable failed for %d vectors, ret: %d", + VXGE_DRIVER_NAME, intr_cnt, ret); + kfree(vdev->entries); + kfree(vdev->vxge_entries); + vdev->entries = NULL; + vdev->vxge_entries = NULL; + return -ENODEV; + } + return 0; +} + +static int vxge_enable_msix(struct vxgedev *vdev) +{ + + int i, ret = 0; + enum vxge_hw_status status; + /* 0 - Tx, 1 - Rx */ + int tim_msix_id[4]; + int alarm_msix_id = 0, msix_intr_vect = 0;; + vdev->intr_cnt = 0; + + /* allocate msix vectors */ + ret = vxge_alloc_msix(vdev); + if (!ret) { + /* Last vector in the list is used for alarm */ + alarm_msix_id = + VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; + for (i = 0; i < vdev->no_of_vpath; i++) { + + /* If fifo or ring are not enabled + the MSIX vector for that should be set to 0 + Hence initializeing this array to all 0s. + */ + memset(tim_msix_id, 0, sizeof(tim_msix_id)); + msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; + tim_msix_id[0] = msix_intr_vect; + + tim_msix_id[1] = msix_intr_vect + 1; + vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1]; + + status = vxge_hw_vpath_msix_set( + vdev->vpaths[i].handle, + tim_msix_id, alarm_msix_id); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_msix_set " + "failed with status : %x", status); + kfree(vdev->entries); + kfree(vdev->vxge_entries); + pci_disable_msix(vdev->pdev); + return -ENODEV; + } + } + } + + return ret; +} + +static void vxge_rem_msix_isr(struct vxgedev *vdev) +{ + int intr_cnt; + + for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1); + intr_cnt++) { + if (vdev->vxge_entries[intr_cnt].in_use) { + synchronize_irq(vdev->entries[intr_cnt].vector); + free_irq(vdev->entries[intr_cnt].vector, + vdev->vxge_entries[intr_cnt].arg); + vdev->vxge_entries[intr_cnt].in_use = 0; + } + } + + kfree(vdev->entries); + kfree(vdev->vxge_entries); + vdev->entries = NULL; + vdev->vxge_entries = NULL; + + if (vdev->config.intr_type == MSI_X) + pci_disable_msix(vdev->pdev); +} +#endif + +static void vxge_rem_isr(struct vxgedev *vdev) +{ + struct __vxge_hw_device *hldev; + hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); + +#ifdef CONFIG_PCI_MSI + if (vdev->config.intr_type == MSI_X) { + vxge_rem_msix_isr(vdev); + } else +#endif + if (vdev->config.intr_type == INTA) { + synchronize_irq(vdev->pdev->irq); + free_irq(vdev->pdev->irq, hldev); + } +} + +static int vxge_add_isr(struct vxgedev *vdev) +{ + int ret = 0; + struct __vxge_hw_device *hldev = + (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); +#ifdef CONFIG_PCI_MSI + int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; + u64 function_mode = vdev->config.device_hw_info.function_mode; + int pci_fun = PCI_FUNC(vdev->pdev->devfn); + + if (vdev->config.intr_type == MSI_X) + ret = vxge_enable_msix(vdev); + + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME); + if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && + test_and_set_bit(__VXGE_STATE_CARD_UP, + &driver_config->inta_dev_open)) + return VXGE_HW_FAIL; + else { + vxge_debug_init(VXGE_ERR, + "%s: Defaulting to INTA", VXGE_DRIVER_NAME); + vdev->config.intr_type = INTA; + vxge_hw_device_set_intr_type(vdev->devh, + VXGE_HW_INTR_MODE_IRQLINE); + vxge_close_vpaths(vdev, 1); + vdev->no_of_vpath = 1; + vdev->stats.vpaths_open = 1; + } + } + + if (vdev->config.intr_type == MSI_X) { + for (intr_idx = 0; + intr_idx < (vdev->no_of_vpath * + VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { + + msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE; + irq_req = 0; + + switch (msix_idx) { + case 0: + snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, + "%s:vxge fn: %d vpath: %d Tx MSI-X: %d", + vdev->ndev->name, pci_fun, vp_idx, + vdev->entries[intr_cnt].entry); + ret = request_irq( + vdev->entries[intr_cnt].vector, + vxge_tx_msix_handle, 0, + vdev->desc[intr_cnt], + &vdev->vpaths[vp_idx].fifo); + vdev->vxge_entries[intr_cnt].arg = + &vdev->vpaths[vp_idx].fifo; + irq_req = 1; + break; + case 1: + snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, + "%s:vxge fn: %d vpath: %d Rx MSI-X: %d", + vdev->ndev->name, pci_fun, vp_idx, + vdev->entries[intr_cnt].entry); + ret = request_irq( + vdev->entries[intr_cnt].vector, + vxge_rx_msix_napi_handle, + 0, + vdev->desc[intr_cnt], + &vdev->vpaths[vp_idx].ring); + vdev->vxge_entries[intr_cnt].arg = + &vdev->vpaths[vp_idx].ring; + irq_req = 1; + break; + } + + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s: MSIX - %d Registration failed", + vdev->ndev->name, intr_cnt); + vxge_rem_msix_isr(vdev); + if ((function_mode == + VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && + test_and_set_bit(__VXGE_STATE_CARD_UP, + &driver_config->inta_dev_open)) + return VXGE_HW_FAIL; + else { + vxge_hw_device_set_intr_type( + vdev->devh, + VXGE_HW_INTR_MODE_IRQLINE); + vdev->config.intr_type = INTA; + vxge_debug_init(VXGE_ERR, + "%s: Defaulting to INTA" + , vdev->ndev->name); + vxge_close_vpaths(vdev, 1); + vdev->no_of_vpath = 1; + vdev->stats.vpaths_open = 1; + goto INTA_MODE; + } + } + + if (irq_req) { + /* We requested for this msix interrupt */ + vdev->vxge_entries[intr_cnt].in_use = 1; + vxge_hw_vpath_msix_unmask( + vdev->vpaths[vp_idx].handle, + intr_idx); + intr_cnt++; + } + + /* Point to next vpath handler */ + if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) + && (vp_idx < (vdev->no_of_vpath - 1))) + vp_idx++; + } + + intr_cnt = vdev->max_vpath_supported * 2; + snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, + "%s:vxge Alarm fn: %d MSI-X: %d", + vdev->ndev->name, pci_fun, + vdev->entries[intr_cnt].entry); + /* For Alarm interrupts */ + ret = request_irq(vdev->entries[intr_cnt].vector, + vxge_alarm_msix_handle, 0, + vdev->desc[intr_cnt], + &vdev->vpaths[vp_idx]); + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s: MSIX - %d Registration failed", + vdev->ndev->name, intr_cnt); + vxge_rem_msix_isr(vdev); + if ((function_mode == + VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && + test_and_set_bit(__VXGE_STATE_CARD_UP, + &driver_config->inta_dev_open)) + return VXGE_HW_FAIL; + else { + vxge_hw_device_set_intr_type(vdev->devh, + VXGE_HW_INTR_MODE_IRQLINE); + vdev->config.intr_type = INTA; + vxge_debug_init(VXGE_ERR, + "%s: Defaulting to INTA", + vdev->ndev->name); + vxge_close_vpaths(vdev, 1); + vdev->no_of_vpath = 1; + vdev->stats.vpaths_open = 1; + goto INTA_MODE; + } + } + + vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, + intr_idx - 2); + vdev->vxge_entries[intr_cnt].in_use = 1; + vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx]; + } +INTA_MODE: +#endif + snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name); + + if (vdev->config.intr_type == INTA) { + ret = request_irq((int) vdev->pdev->irq, + vxge_isr_napi, + IRQF_SHARED, vdev->desc[0], hldev); + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s %s-%d: ISR registration failed", + VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq); + return -ENODEV; + } + vxge_debug_init(VXGE_TRACE, + "new %s-%d line allocated", + "IRQ", vdev->pdev->irq); + } + + return VXGE_HW_OK; +} + +static void vxge_poll_vp_reset(unsigned long data) +{ + struct vxgedev *vdev = (struct vxgedev *)data; + int i, j = 0; + + for (i = 0; i < vdev->no_of_vpath; i++) { + if (test_bit(i, &vdev->vp_reset)) { + vxge_reset_vpath(vdev, i); + j++; + } + } + if (j && (vdev->config.intr_type != MSI_X)) { + vxge_hw_device_unmask_all(vdev->devh); + vxge_hw_device_flush_io(vdev->devh); + } + + mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2); +} + +static void vxge_poll_vp_lockup(unsigned long data) +{ + struct vxgedev *vdev = (struct vxgedev *)data; + int i; + struct vxge_ring *ring; + enum vxge_hw_status status = VXGE_HW_OK; + + for (i = 0; i < vdev->no_of_vpath; i++) { + ring = &vdev->vpaths[i].ring; + /* Did this vpath received any packets */ + if (ring->stats.prev_rx_frms == ring->stats.rx_frms) { + status = vxge_hw_vpath_check_leak(ring->handle); + + /* Did it received any packets last time */ + if ((VXGE_HW_FAIL == status) && + (VXGE_HW_FAIL == ring->last_status)) { + + /* schedule vpath reset */ + if (!test_and_set_bit(i, &vdev->vp_reset)) { + + /* disable interrupts for this vpath */ + vxge_vpath_intr_disable(vdev, i); + + /* stop the queue for this vpath */ + vxge_stop_tx_queue(&vdev->vpaths[i]. + fifo); + continue; + } + } + } + ring->stats.prev_rx_frms = ring->stats.rx_frms; + ring->last_status = status; + } + + /* Check every 1 milli second */ + mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); +} + +/** + * vxge_open + * @dev: pointer to the device structure. + * + * This function is the open entry point of the driver. It mainly calls a + * function to allocate Rx buffers and inserts them into the buffer + * descriptors and then enables the Rx part of the NIC. + * Return value: '0' on success and an appropriate (-)ve integer as + * defined in errno.h file on failure. + */ +int +vxge_open(struct net_device *dev) +{ + enum vxge_hw_status status; + struct vxgedev *vdev; + struct __vxge_hw_device *hldev; + int ret = 0; + int i; + u64 val64, function_mode; + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d", dev->name, __func__, __LINE__); + + vdev = (struct vxgedev *)netdev_priv(dev); + hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); + function_mode = vdev->config.device_hw_info.function_mode; + + /* make sure you have link off by default every time Nic is + * initialized */ + netif_carrier_off(dev); + + /* Check for another device already opn with INTA */ + if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && + test_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open)) { + ret = -EPERM; + goto out0; + } + + /* Open VPATHs */ + status = vxge_open_vpaths(vdev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: fatal: Vpath open failed", vdev->ndev->name); + ret = -EPERM; + goto out0; + } + + vdev->mtu = dev->mtu; + + status = vxge_add_isr(vdev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: fatal: ISR add failed", dev->name); + ret = -EPERM; + goto out1; + } + + + if (vdev->config.intr_type != MSI_X) { + netif_napi_add(dev, &vdev->napi, vxge_poll_inta, + vdev->config.napi_weight); + napi_enable(&vdev->napi); + } else { + for (i = 0; i < vdev->no_of_vpath; i++) { + netif_napi_add(dev, &vdev->vpaths[i].ring.napi, + vxge_poll_msix, vdev->config.napi_weight); + napi_enable(&vdev->vpaths[i].ring.napi); + } + } + + /* configure RTH */ + if (vdev->config.rth_steering) { + status = vxge_rth_configure(vdev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: fatal: RTH configuration failed", + dev->name); + ret = -EPERM; + goto out2; + } + } + + for (i = 0; i < vdev->no_of_vpath; i++) { + /* set initial mtu before enabling the device */ + status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle, + vdev->mtu); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: fatal: can not set new MTU", dev->name); + ret = -EPERM; + goto out2; + } + } + + VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev); + vxge_debug_init(vdev->level_trace, + "%s: MTU is %d", vdev->ndev->name, vdev->mtu); + VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); + + /* Reprogram the DA table with populated mac addresses */ + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_restore_vpath_mac_addr(&vdev->vpaths[i]); + vxge_restore_vpath_vid_table(&vdev->vpaths[i]); + } + + /* Enable vpath to sniff all unicast/multicast traffic that not + * addressed to them. We allow promiscous mode for PF only + */ + + val64 = 0; + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) + val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i); + + vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof(struct vxge_hw_mrpcim_reg, + rxmac_authorize_all_addr), + val64); + + vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof(struct vxge_hw_mrpcim_reg, + rxmac_authorize_all_vid), + val64); + + vxge_set_multicast(dev); + + /* Enabling Bcast and mcast for all vpath */ + for (i = 0; i < vdev->no_of_vpath; i++) { + status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, + "%s : Can not enable bcast for vpath " + "id %d", dev->name, i); + if (vdev->config.addr_learn_en) { + status = + vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, + "%s : Can not enable mcast for vpath " + "id %d", dev->name, i); + } + } + + vxge_hw_device_setpause_data(vdev->devh, 0, + vdev->config.tx_pause_enable, + vdev->config.rx_pause_enable); + + if (vdev->vp_reset_timer.function == NULL) + vxge_os_timer(vdev->vp_reset_timer, + vxge_poll_vp_reset, vdev, (HZ/2)); + + if (vdev->vp_lockup_timer.function == NULL) + vxge_os_timer(vdev->vp_lockup_timer, + vxge_poll_vp_lockup, vdev, (HZ/2)); + + set_bit(__VXGE_STATE_CARD_UP, &vdev->state); + + smp_wmb(); + + if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) { + netif_carrier_on(vdev->ndev); + printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name); + vdev->stats.link_up++; + } + + vxge_hw_device_intr_enable(vdev->devh); + + smp_wmb(); + + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_hw_vpath_enable(vdev->vpaths[i].handle); + smp_wmb(); + vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); + } + + vxge_start_all_tx_queue(vdev); + goto out0; + +out2: + vxge_rem_isr(vdev); + + /* Disable napi */ + if (vdev->config.intr_type != MSI_X) + napi_disable(&vdev->napi); + else { + for (i = 0; i < vdev->no_of_vpath; i++) + napi_disable(&vdev->vpaths[i].ring.napi); + } + +out1: + vxge_close_vpaths(vdev, 0); +out0: + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", + dev->name, __func__, __LINE__); + return ret; +} + +/* Loop throught the mac address list and delete all the entries */ +void vxge_free_mac_add_list(struct vxge_vpath *vpath) +{ + + struct list_head *entry, *next; + if (list_empty(&vpath->mac_addr_list)) + return; + + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + list_del(entry); + kfree((struct vxge_mac_addrs *)entry); + } +} + +static void vxge_napi_del_all(struct vxgedev *vdev) +{ + int i; + if (vdev->config.intr_type != MSI_X) + netif_napi_del(&vdev->napi); + else { + for (i = 0; i < vdev->no_of_vpath; i++) + netif_napi_del(&vdev->vpaths[i].ring.napi); + } + return; +} + +int do_vxge_close(struct net_device *dev, int do_io) +{ + enum vxge_hw_status status; + struct vxgedev *vdev; + struct __vxge_hw_device *hldev; + int i; + u64 val64, vpath_vector; + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + dev->name, __func__, __LINE__); + + vdev = (struct vxgedev *)netdev_priv(dev); + hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); + + /* If vxge_handle_crit_err task is executing, + * wait till it completes. */ + while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) + msleep(50); + + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + if (do_io) { + /* Put the vpath back in normal mode */ + vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); + status = vxge_hw_mgmt_reg_read(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof( + struct vxge_hw_mrpcim_reg, + rts_mgr_cbasin_cfg), + &val64); + + if (status == VXGE_HW_OK) { + val64 &= ~vpath_vector; + status = vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof( + struct vxge_hw_mrpcim_reg, + rts_mgr_cbasin_cfg), + val64); + } + + /* Remove the function 0 from promiscous mode */ + vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof(struct vxge_hw_mrpcim_reg, + rxmac_authorize_all_addr), + 0); + + vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof(struct vxge_hw_mrpcim_reg, + rxmac_authorize_all_vid), + 0); + + smp_wmb(); + } + del_timer_sync(&vdev->vp_lockup_timer); + + del_timer_sync(&vdev->vp_reset_timer); + + /* Disable napi */ + if (vdev->config.intr_type != MSI_X) + napi_disable(&vdev->napi); + else { + for (i = 0; i < vdev->no_of_vpath; i++) + napi_disable(&vdev->vpaths[i].ring.napi); + } + + netif_carrier_off(vdev->ndev); + printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); + vxge_stop_all_tx_queue(vdev); + + /* Note that at this point xmit() is stopped by upper layer */ + if (do_io) + vxge_hw_device_intr_disable(vdev->devh); + + mdelay(1000); + + vxge_rem_isr(vdev); + + vxge_napi_del_all(vdev); + + if (do_io) + vxge_reset_all_vpaths(vdev); + + vxge_close_vpaths(vdev, 0); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); + + clear_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open); + clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); + + return 0; +} + +/** + * vxge_close + * @dev: device pointer. + * + * This is the stop entry point of the driver. It needs to undo exactly + * whatever was done by the open entry point, thus it's usually referred to + * as the close function.Among other things this function mainly stops the + * Rx side of the NIC and frees all the Rx buffers in the Rx rings. + * Return value: '0' on success and an appropriate (-)ve integer as + * defined in errno.h file on failure. + */ +int +vxge_close(struct net_device *dev) +{ + do_vxge_close(dev, 1); + return 0; +} + +/** + * vxge_change_mtu + * @dev: net device pointer. + * @new_mtu :the new MTU size for the device. + * + * A driver entry point to change MTU size for the device. Before changing + * the MTU the device must be stopped. + */ +static int vxge_change_mtu(struct net_device *dev, int new_mtu) +{ + struct vxgedev *vdev = netdev_priv(dev); + + vxge_debug_entryexit(vdev->level_trace, + "%s:%d", __func__, __LINE__); + if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) { + vxge_debug_init(vdev->level_err, + "%s: mtu size is invalid", dev->name); + return -EPERM; + } + + /* check if device is down already */ + if (unlikely(!is_vxge_card_up(vdev))) { + /* just store new value, will use later on open() */ + dev->mtu = new_mtu; + vxge_debug_init(vdev->level_err, + "%s", "device is down on MTU change"); + return 0; + } + + vxge_debug_init(vdev->level_trace, + "trying to apply new MTU %d", new_mtu); + + if (vxge_close(dev)) + return -EIO; + + dev->mtu = new_mtu; + vdev->mtu = new_mtu; + + if (vxge_open(dev)) + return -EIO; + + vxge_debug_init(vdev->level_trace, + "%s: MTU changed to %d", vdev->ndev->name, new_mtu); + + vxge_debug_entryexit(vdev->level_trace, + "%s:%d Exiting...", __func__, __LINE__); + + return 0; +} + +/** + * vxge_get_stats + * @dev: pointer to the device structure + * + * Updates the device statistics structure. This function updates the device + * statistics structure in the net_device structure and returns a pointer + * to the same. + */ +static struct net_device_stats * +vxge_get_stats(struct net_device *dev) +{ + struct vxgedev *vdev; + struct net_device_stats *net_stats; + int k; + + vdev = netdev_priv(dev); + + net_stats = &vdev->stats.net_stats; + + memset(net_stats, 0, sizeof(struct net_device_stats)); + + for (k = 0; k < vdev->no_of_vpath; k++) { + net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; + net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; + net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; + net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; + net_stats->rx_dropped += + vdev->vpaths[k].ring.stats.rx_dropped; + + net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; + net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; + net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; + } + + return net_stats; +} + +/** + * vxge_ioctl + * @dev: Device pointer. + * @ifr: An IOCTL specific structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd: This is used to distinguish between the different commands that + * can be passed to the IOCTL functions. + * + * Entry point for the Ioctl. + */ +static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + return -EOPNOTSUPP; +} + +/** + * vxge_tx_watchdog + * @dev: pointer to net device structure + * + * Watchdog for transmit side. + * This function is triggered if the Tx Queue is stopped + * for a pre-defined amount of time when the Interface is still up. + */ +static void +vxge_tx_watchdog(struct net_device *dev) +{ + struct vxgedev *vdev; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + vdev = (struct vxgedev *)netdev_priv(dev); + + vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; + + vxge_reset(vdev); + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); +} + +/** + * vxge_vlan_rx_register + * @dev: net device pointer. + * @grp: vlan group + * + * Vlan group registration + */ +static void +vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct vxgedev *vdev; + struct vxge_vpath *vpath; + int vp; + u64 vid; + enum vxge_hw_status status; + int i; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + vdev = (struct vxgedev *)netdev_priv(dev); + + vpath = &vdev->vpaths[0]; + if ((NULL == grp) && (vpath->is_open)) { + /* Get the first vlan */ + status = vxge_hw_vpath_vid_get(vpath->handle, &vid); + + while (status == VXGE_HW_OK) { + + /* Delete this vlan from the vid table */ + for (vp = 0; vp < vdev->no_of_vpath; vp++) { + vpath = &vdev->vpaths[vp]; + if (!vpath->is_open) + continue; + + vxge_hw_vpath_vid_delete(vpath->handle, vid); + } + + /* Get the next vlan to be deleted */ + vpath = &vdev->vpaths[0]; + status = vxge_hw_vpath_vid_get(vpath->handle, &vid); + } + } + + vdev->vlgrp = grp; + + for (i = 0; i < vdev->no_of_vpath; i++) { + if (vdev->vpaths[i].is_configured) + vdev->vpaths[i].ring.vlgrp = grp; + } + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); +} + +/** + * vxge_vlan_rx_add_vid + * @dev: net device pointer. + * @vid: vid + * + * Add the vlan id to the devices vlan id table + */ +static void +vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +{ + struct vxgedev *vdev; + struct vxge_vpath *vpath; + int vp_id; + + vdev = (struct vxgedev *)netdev_priv(dev); + + /* Add these vlan to the vid table */ + for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { + vpath = &vdev->vpaths[vp_id]; + if (!vpath->is_open) + continue; + vxge_hw_vpath_vid_add(vpath->handle, vid); + } +} + +/** + * vxge_vlan_rx_add_vid + * @dev: net device pointer. + * @vid: vid + * + * Remove the vlan id from the device's vlan id table + */ +static void +vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct vxgedev *vdev; + struct vxge_vpath *vpath; + int vp_id; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + vdev = (struct vxgedev *)netdev_priv(dev); + + vlan_group_set_device(vdev->vlgrp, vid, NULL); + + /* Delete this vlan from the vid table */ + for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { + vpath = &vdev->vpaths[vp_id]; + if (!vpath->is_open) + continue; + vxge_hw_vpath_vid_delete(vpath->handle, vid); + } + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); +} + +static const struct net_device_ops vxge_netdev_ops = { + .ndo_open = vxge_open, + .ndo_stop = vxge_close, + .ndo_get_stats = vxge_get_stats, + .ndo_start_xmit = vxge_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = vxge_set_multicast, + + .ndo_do_ioctl = vxge_ioctl, + + .ndo_set_mac_address = vxge_set_mac_addr, + .ndo_change_mtu = vxge_change_mtu, + .ndo_vlan_rx_register = vxge_vlan_rx_register, + .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, + .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, + + .ndo_tx_timeout = vxge_tx_watchdog, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vxge_netpoll, +#endif +}; + +int __devinit vxge_device_register(struct __vxge_hw_device *hldev, + struct vxge_config *config, + int high_dma, int no_of_vpath, + struct vxgedev **vdev_out) +{ + struct net_device *ndev; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxgedev *vdev; + int i, ret = 0, no_of_queue = 1; + u64 stat; + + *vdev_out = NULL; + if (config->tx_steering_type == TX_MULTIQ_STEERING) + no_of_queue = no_of_vpath; + + ndev = alloc_etherdev_mq(sizeof(struct vxgedev), + no_of_queue); + if (ndev == NULL) { + vxge_debug_init( + vxge_hw_device_trace_level_get(hldev), + "%s : device allocation failed", __func__); + ret = -ENODEV; + goto _out0; + } + + vxge_debug_entryexit( + vxge_hw_device_trace_level_get(hldev), + "%s: %s:%d Entering...", + ndev->name, __func__, __LINE__); + + vdev = netdev_priv(ndev); + memset(vdev, 0, sizeof(struct vxgedev)); + + vdev->ndev = ndev; + vdev->devh = hldev; + vdev->pdev = hldev->pdev; + memcpy(&vdev->config, config, sizeof(struct vxge_config)); + vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ + + SET_NETDEV_DEV(ndev, &vdev->pdev->dev); + + ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + /* Driver entry points */ + ndev->irq = vdev->pdev->irq; + ndev->base_addr = (unsigned long) hldev->bar0; + + ndev->netdev_ops = &vxge_netdev_ops; + + ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; + + initialize_ethtool_ops(ndev); + + /* Allocate memory for vpath */ + vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * + no_of_vpath, GFP_KERNEL); + if (!vdev->vpaths) { + vxge_debug_init(VXGE_ERR, + "%s: vpath memory allocation failed", + vdev->ndev->name); + ret = -ENODEV; + goto _out1; + } + + ndev->features |= NETIF_F_SG; + + ndev->features |= NETIF_F_HW_CSUM; + vxge_debug_init(vxge_hw_device_trace_level_get(hldev), + "%s : checksuming enabled", __func__); + + if (high_dma) { + ndev->features |= NETIF_F_HIGHDMA; + vxge_debug_init(vxge_hw_device_trace_level_get(hldev), + "%s : using High DMA", __func__); + } + + ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; + + if (vdev->config.gro_enable) + ndev->features |= NETIF_F_GRO; + + if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) + ndev->real_num_tx_queues = no_of_vpath; + +#ifdef NETIF_F_LLTX + ndev->features |= NETIF_F_LLTX; +#endif + + for (i = 0; i < no_of_vpath; i++) + spin_lock_init(&vdev->vpaths[i].fifo.tx_lock); + + if (register_netdev(ndev)) { + vxge_debug_init(vxge_hw_device_trace_level_get(hldev), + "%s: %s : device registration failed!", + ndev->name, __func__); + ret = -ENODEV; + goto _out2; + } + + /* Set the factory defined MAC address initially */ + ndev->addr_len = ETH_ALEN; + + /* Make Link state as off at this point, when the Link change + * interrupt comes the state will be automatically changed to + * the right state. + */ + netif_carrier_off(ndev); + + vxge_debug_init(vxge_hw_device_trace_level_get(hldev), + "%s: Ethernet device registered", + ndev->name); + + *vdev_out = vdev; + + /* Resetting the Device stats */ + status = vxge_hw_mrpcim_stats_access( + hldev, + VXGE_HW_STATS_OP_CLEAR_ALL_STATS, + 0, + 0, + &stat); + + if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION) + vxge_debug_init( + vxge_hw_device_trace_level_get(hldev), + "%s: device stats clear returns" + "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name); + + vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev), + "%s: %s:%d Exiting...", + ndev->name, __func__, __LINE__); + + return ret; +_out2: + kfree(vdev->vpaths); +_out1: + free_netdev(ndev); +_out0: + return ret; +} + +/* + * vxge_device_unregister + * + * This function will unregister and free network device + */ +void +vxge_device_unregister(struct __vxge_hw_device *hldev) +{ + struct vxgedev *vdev; + struct net_device *dev; + char buf[IFNAMSIZ]; +#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ + (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) + u32 level_trace; +#endif + + dev = hldev->ndev; + vdev = netdev_priv(dev); +#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ + (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) + level_trace = vdev->level_trace; +#endif + vxge_debug_entryexit(level_trace, + "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); + + memcpy(buf, vdev->ndev->name, IFNAMSIZ); + + /* in 2.6 will call stop() if device is up */ + unregister_netdev(dev); + + flush_scheduled_work(); + + vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); + vxge_debug_entryexit(level_trace, + "%s: %s:%d Exiting...", buf, __func__, __LINE__); +} + +/* + * vxge_callback_crit_err + * + * This function is called by the alarm handler in interrupt context. + * Driver must analyze it based on the event type. + */ +static void +vxge_callback_crit_err(struct __vxge_hw_device *hldev, + enum vxge_hw_event type, u64 vp_id) +{ + struct net_device *dev = hldev->ndev; + struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); + int vpath_idx; + + vxge_debug_entryexit(vdev->level_trace, + "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); + + /* Note: This event type should be used for device wide + * indications only - Serious errors, Slot freeze and critical errors + */ + vdev->cric_err_event = type; + + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) + if (vdev->vpaths[vpath_idx].device_id == vp_id) + break; + + if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { + if (type == VXGE_HW_EVENT_SLOT_FREEZE) { + vxge_debug_init(VXGE_ERR, + "%s: Slot is frozen", vdev->ndev->name); + } else if (type == VXGE_HW_EVENT_SERR) { + vxge_debug_init(VXGE_ERR, + "%s: Encountered Serious Error", + vdev->ndev->name); + } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) + vxge_debug_init(VXGE_ERR, + "%s: Encountered Critical Error", + vdev->ndev->name); + } + + if ((type == VXGE_HW_EVENT_SERR) || + (type == VXGE_HW_EVENT_SLOT_FREEZE)) { + if (unlikely(vdev->exec_mode)) + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) { + vxge_hw_device_mask_all(hldev); + if (unlikely(vdev->exec_mode)) + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + } else if ((type == VXGE_HW_EVENT_FIFO_ERR) || + (type == VXGE_HW_EVENT_VPATH_ERR)) { + + if (unlikely(vdev->exec_mode)) + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + else { + /* check if this vpath is already set for reset */ + if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) { + + /* disable interrupts for this vpath */ + vxge_vpath_intr_disable(vdev, vpath_idx); + + /* stop the queue for this vpath */ + vxge_stop_tx_queue(&vdev->vpaths[vpath_idx]. + fifo); + } + } + } + + vxge_debug_entryexit(vdev->level_trace, + "%s: %s:%d Exiting...", + vdev->ndev->name, __func__, __LINE__); +} + +static void verify_bandwidth(void) +{ + int i, band_width, total = 0, equal_priority = 0; + + /* 1. If user enters 0 for some fifo, give equal priority to all */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (bw_percentage[i] == 0) { + equal_priority = 1; + break; + } + } + + if (!equal_priority) { + /* 2. If sum exceeds 100, give equal priority to all */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (bw_percentage[i] == 0xFF) + break; + + total += bw_percentage[i]; + if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) { + equal_priority = 1; + break; + } + } + } + + if (!equal_priority) { + /* Is all the bandwidth consumed? */ + if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) { + if (i < VXGE_HW_MAX_VIRTUAL_PATHS) { + /* Split rest of bw equally among next VPs*/ + band_width = + (VXGE_HW_VPATH_BANDWIDTH_MAX - total) / + (VXGE_HW_MAX_VIRTUAL_PATHS - i); + if (band_width < 2) /* min of 2% */ + equal_priority = 1; + else { + for (; i < VXGE_HW_MAX_VIRTUAL_PATHS; + i++) + bw_percentage[i] = + band_width; + } + } + } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS) + equal_priority = 1; + } + + if (equal_priority) { + vxge_debug_init(VXGE_ERR, + "%s: Assigning equal bandwidth to all the vpaths", + VXGE_DRIVER_NAME); + bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX / + VXGE_HW_MAX_VIRTUAL_PATHS; + for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) + bw_percentage[i] = bw_percentage[0]; + } + + return; +} + +/* + * Vpath configuration + */ +static int __devinit vxge_config_vpaths( + struct vxge_hw_device_config *device_config, + u64 vpath_mask, struct vxge_config *config_param) +{ + int i, no_of_vpaths = 0, default_no_vpath = 0, temp; + u32 txdl_size, txdl_per_memblock; + + temp = driver_config->vpath_per_dev; + if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) && + (max_config_dev == VXGE_MAX_CONFIG_DEV)) { + /* No more CPU. Return vpath number as zero.*/ + if (driver_config->g_no_cpus == -1) + return 0; + + if (!driver_config->g_no_cpus) + driver_config->g_no_cpus = num_online_cpus(); + + driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1; + if (!driver_config->vpath_per_dev) + driver_config->vpath_per_dev = 1; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) + if (!vxge_bVALn(vpath_mask, i, 1)) + continue; + else + default_no_vpath++; + if (default_no_vpath < driver_config->vpath_per_dev) + driver_config->vpath_per_dev = default_no_vpath; + + driver_config->g_no_cpus = driver_config->g_no_cpus - + (driver_config->vpath_per_dev * 2); + if (driver_config->g_no_cpus <= 0) + driver_config->g_no_cpus = -1; + } + + if (driver_config->vpath_per_dev == 1) { + vxge_debug_ll_config(VXGE_TRACE, + "%s: Disable tx and rx steering, " + "as single vpath is configured", VXGE_DRIVER_NAME); + config_param->rth_steering = NO_STEERING; + config_param->tx_steering_type = NO_STEERING; + device_config->rth_en = 0; + } + + /* configure bandwidth */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) + device_config->vp_config[i].min_bandwidth = bw_percentage[i]; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + device_config->vp_config[i].vp_id = i; + device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU; + if (no_of_vpaths < driver_config->vpath_per_dev) { + if (!vxge_bVALn(vpath_mask, i, 1)) { + vxge_debug_ll_config(VXGE_TRACE, + "%s: vpath: %d is not available", + VXGE_DRIVER_NAME, i); + continue; + } else { + vxge_debug_ll_config(VXGE_TRACE, + "%s: vpath: %d available", + VXGE_DRIVER_NAME, i); + no_of_vpaths++; + } + } else { + vxge_debug_ll_config(VXGE_TRACE, + "%s: vpath: %d is not configured, " + "max_config_vpath exceeded", + VXGE_DRIVER_NAME, i); + break; + } + + /* Configure Tx fifo's */ + device_config->vp_config[i].fifo.enable = + VXGE_HW_FIFO_ENABLE; + device_config->vp_config[i].fifo.max_frags = + MAX_SKB_FRAGS; + device_config->vp_config[i].fifo.memblock_size = + VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; + + txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd); + txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; + + device_config->vp_config[i].fifo.fifo_blocks = + ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1; + + device_config->vp_config[i].fifo.intr = + VXGE_HW_FIFO_QUEUE_INTR_DISABLE; + + /* Configure tti properties */ + device_config->vp_config[i].tti.intr_enable = + VXGE_HW_TIM_INTR_ENABLE; + + device_config->vp_config[i].tti.btimer_val = + (VXGE_TTI_BTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].tti.timer_ac_en = + VXGE_HW_TIM_TIMER_AC_ENABLE; + + /* For msi-x with napi (each vector + has a handler of its own) - + Set CI to OFF for all vpaths */ + device_config->vp_config[i].tti.timer_ci_en = + VXGE_HW_TIM_TIMER_CI_DISABLE; + + device_config->vp_config[i].tti.timer_ri_en = + VXGE_HW_TIM_TIMER_RI_DISABLE; + + device_config->vp_config[i].tti.util_sel = + VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL; + + device_config->vp_config[i].tti.ltimer_val = + (VXGE_TTI_LTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].tti.rtimer_val = + (VXGE_TTI_RTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A; + device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B; + device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C; + device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A; + device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B; + device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C; + device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D; + + /* Configure Rx rings */ + device_config->vp_config[i].ring.enable = + VXGE_HW_RING_ENABLE; + + device_config->vp_config[i].ring.ring_blocks = + VXGE_HW_DEF_RING_BLOCKS; + device_config->vp_config[i].ring.buffer_mode = + VXGE_HW_RING_RXD_BUFFER_MODE_1; + device_config->vp_config[i].ring.rxds_limit = + VXGE_HW_DEF_RING_RXDS_LIMIT; + device_config->vp_config[i].ring.scatter_mode = + VXGE_HW_RING_SCATTER_MODE_A; + + /* Configure rti properties */ + device_config->vp_config[i].rti.intr_enable = + VXGE_HW_TIM_INTR_ENABLE; + + device_config->vp_config[i].rti.btimer_val = + (VXGE_RTI_BTIMER_VAL * 1000)/272; + + device_config->vp_config[i].rti.timer_ac_en = + VXGE_HW_TIM_TIMER_AC_ENABLE; + + device_config->vp_config[i].rti.timer_ci_en = + VXGE_HW_TIM_TIMER_CI_DISABLE; + + device_config->vp_config[i].rti.timer_ri_en = + VXGE_HW_TIM_TIMER_RI_DISABLE; + + device_config->vp_config[i].rti.util_sel = + VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL; + + device_config->vp_config[i].rti.urange_a = + RTI_RX_URANGE_A; + device_config->vp_config[i].rti.urange_b = + RTI_RX_URANGE_B; + device_config->vp_config[i].rti.urange_c = + RTI_RX_URANGE_C; + device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A; + device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B; + device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C; + device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D; + + device_config->vp_config[i].rti.rtimer_val = + (VXGE_RTI_RTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].rti.ltimer_val = + (VXGE_RTI_LTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].rpa_strip_vlan_tag = + vlan_tag_strip; + } + + driver_config->vpath_per_dev = temp; + return no_of_vpaths; +} + +/* initialize device configuratrions */ +static void __devinit vxge_device_config_init( + struct vxge_hw_device_config *device_config, + int *intr_type) +{ + /* Used for CQRQ/SRQ. */ + device_config->dma_blockpool_initial = + VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; + + device_config->dma_blockpool_max = + VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; + + if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) + max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; + +#ifndef CONFIG_PCI_MSI + vxge_debug_init(VXGE_ERR, + "%s: This Kernel does not support " + "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); + *intr_type = INTA; +#endif + + /* Configure whether MSI-X or IRQL. */ + switch (*intr_type) { + case INTA: + device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE; + break; + + case MSI_X: + device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; + break; + } + /* Timer period between device poll */ + device_config->device_poll_millis = VXGE_TIMER_DELAY; + + /* Configure mac based steering. */ + device_config->rts_mac_en = addr_learn_en; + + /* Configure Vpaths */ + device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT; + + vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", + __func__); + vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d", + device_config->dma_blockpool_initial); + vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d", + device_config->dma_blockpool_max); + vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", + device_config->intr_mode); + vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", + device_config->device_poll_millis); + vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d", + device_config->rts_mac_en); + vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", + device_config->rth_en); + vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", + device_config->rth_it_type); +} + +static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) +{ + int i; + + vxge_debug_init(VXGE_TRACE, + "%s: %d Vpath(s) opened", + vdev->ndev->name, vdev->no_of_vpath); + + switch (vdev->config.intr_type) { + case INTA: + vxge_debug_init(VXGE_TRACE, + "%s: Interrupt type INTA", vdev->ndev->name); + break; + + case MSI_X: + vxge_debug_init(VXGE_TRACE, + "%s: Interrupt type MSI-X", vdev->ndev->name); + break; + } + + if (vdev->config.rth_steering) { + vxge_debug_init(VXGE_TRACE, + "%s: RTH steering enabled for TCP_IPV4", + vdev->ndev->name); + } else { + vxge_debug_init(VXGE_TRACE, + "%s: RTH steering disabled", vdev->ndev->name); + } + + switch (vdev->config.tx_steering_type) { + case NO_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Tx steering disabled", vdev->ndev->name); + break; + case TX_PRIORITY_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Unsupported tx steering option", + vdev->ndev->name); + vxge_debug_init(VXGE_TRACE, + "%s: Tx steering disabled", vdev->ndev->name); + vdev->config.tx_steering_type = 0; + break; + case TX_VLAN_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Unsupported tx steering option", + vdev->ndev->name); + vxge_debug_init(VXGE_TRACE, + "%s: Tx steering disabled", vdev->ndev->name); + vdev->config.tx_steering_type = 0; + break; + case TX_MULTIQ_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Tx multiqueue steering enabled", + vdev->ndev->name); + break; + case TX_PORT_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Tx port steering enabled", + vdev->ndev->name); + break; + default: + vxge_debug_init(VXGE_ERR, + "%s: Unsupported tx steering type", + vdev->ndev->name); + vxge_debug_init(VXGE_TRACE, + "%s: Tx steering disabled", vdev->ndev->name); + vdev->config.tx_steering_type = 0; + } + + if (vdev->config.gro_enable) { + vxge_debug_init(VXGE_ERR, + "%s: Generic receive offload enabled", + vdev->ndev->name); + } else + vxge_debug_init(VXGE_TRACE, + "%s: Generic receive offload disabled", + vdev->ndev->name); + + if (vdev->config.addr_learn_en) + vxge_debug_init(VXGE_TRACE, + "%s: MAC Address learning enabled", vdev->ndev->name); + + vxge_debug_init(VXGE_TRACE, + "%s: Rx doorbell mode enabled", vdev->ndev->name); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!vxge_bVALn(vpath_mask, i, 1)) + continue; + vxge_debug_ll_config(VXGE_TRACE, + "%s: MTU size - %d", vdev->ndev->name, + ((struct __vxge_hw_device *)(vdev->devh))-> + config.vp_config[i].mtu); + vxge_debug_init(VXGE_TRACE, + "%s: VLAN tag stripping %s", vdev->ndev->name, + ((struct __vxge_hw_device *)(vdev->devh))-> + config.vp_config[i].rpa_strip_vlan_tag + ? "Enabled" : "Disabled"); + vxge_debug_init(VXGE_TRACE, + "%s: Ring blocks : %d", vdev->ndev->name, + ((struct __vxge_hw_device *)(vdev->devh))-> + config.vp_config[i].ring.ring_blocks); + vxge_debug_init(VXGE_TRACE, + "%s: Fifo blocks : %d", vdev->ndev->name, + ((struct __vxge_hw_device *)(vdev->devh))-> + config.vp_config[i].fifo.fifo_blocks); + vxge_debug_ll_config(VXGE_TRACE, + "%s: Max frags : %d", vdev->ndev->name, + ((struct __vxge_hw_device *)(vdev->devh))-> + config.vp_config[i].fifo.max_frags); + break; + } +} + +#ifdef CONFIG_PM +/** + * vxge_pm_suspend - vxge power management suspend entry point + * + */ +static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state) +{ + return -ENOSYS; +} +/** + * vxge_pm_resume - vxge power management resume entry point + * + */ +static int vxge_pm_resume(struct pci_dev *pdev) +{ + return -ENOSYS; +} + +#endif + +/** + * vxge_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct __vxge_hw_device *hldev = + (struct __vxge_hw_device *) pci_get_drvdata(pdev); + struct net_device *netdev = hldev->ndev; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + /* Bring down the card, while avoiding PCI I/O */ + do_vxge_close(netdev, 0); + } + + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * vxge_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) +{ + struct __vxge_hw_device *hldev = + (struct __vxge_hw_device *) pci_get_drvdata(pdev); + struct net_device *netdev = hldev->ndev; + + struct vxgedev *vdev = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + printk(KERN_ERR "%s: " + "Cannot re-enable device after reset\n", + VXGE_DRIVER_NAME); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + vxge_reset(vdev); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * vxge_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void vxge_io_resume(struct pci_dev *pdev) +{ + struct __vxge_hw_device *hldev = + (struct __vxge_hw_device *) pci_get_drvdata(pdev); + struct net_device *netdev = hldev->ndev; + + if (netif_running(netdev)) { + if (vxge_open(netdev)) { + printk(KERN_ERR "%s: " + "Can't bring device back up after reset\n", + VXGE_DRIVER_NAME); + return; + } + } + + netif_device_attach(netdev); +} + +/** + * vxge_probe + * @pdev : structure containing the PCI related information of the device. + * @pre: List of PCI devices supported by the driver listed in vxge_id_table. + * Description: + * This function is called when a new PCI device gets detected and initializes + * it. + * Return value: + * returns 0 on success and negative on failure. + * + */ +static int __devinit +vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) +{ + struct __vxge_hw_device *hldev; + enum vxge_hw_status status; + int ret; + int high_dma = 0; + u64 vpath_mask = 0; + struct vxgedev *vdev; + struct vxge_config ll_config; + struct vxge_hw_device_config *device_config = NULL; + struct vxge_hw_device_attr attr; + int i, j, no_of_vpath = 0, max_vpath_supported = 0; + u8 *macaddr; + struct vxge_mac_addrs *entry; + static int bus = -1, device = -1; + u8 new_device = 0; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + attr.pdev = pdev; + + if (bus != pdev->bus->number) + new_device = 1; + if (device != PCI_SLOT(pdev->devfn)) + new_device = 1; + + bus = pdev->bus->number; + device = PCI_SLOT(pdev->devfn); + + if (new_device) { + if (driver_config->config_dev_cnt && + (driver_config->config_dev_cnt != + driver_config->total_dev_cnt)) + vxge_debug_init(VXGE_ERR, + "%s: Configured %d of %d devices", + VXGE_DRIVER_NAME, + driver_config->config_dev_cnt, + driver_config->total_dev_cnt); + driver_config->config_dev_cnt = 0; + driver_config->total_dev_cnt = 0; + driver_config->g_no_cpus = 0; + driver_config->vpath_per_dev = max_config_vpath; + } + + driver_config->total_dev_cnt++; + if (++driver_config->config_dev_cnt > max_config_dev) { + ret = 0; + goto _exit0; + } + + device_config = kzalloc(sizeof(struct vxge_hw_device_config), + GFP_KERNEL); + if (!device_config) { + ret = -ENOMEM; + vxge_debug_init(VXGE_ERR, + "device_config : malloc failed %s %d", + __FILE__, __LINE__); + goto _exit0; + } + + memset(&ll_config, 0, sizeof(struct vxge_config)); + ll_config.tx_steering_type = TX_MULTIQ_STEERING; + ll_config.intr_type = MSI_X; + ll_config.napi_weight = NEW_NAPI_WEIGHT; + ll_config.rth_steering = RTH_STEERING; + + /* get the default configuration parameters */ + vxge_hw_device_config_default_get(device_config); + + /* initialize configuration parameters */ + vxge_device_config_init(device_config, &ll_config.intr_type); + + ret = pci_enable_device(pdev); + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s : can not enable PCI device", __func__); + goto _exit0; + } + + if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { + vxge_debug_ll_config(VXGE_TRACE, + "%s : using 64bit DMA", __func__); + + high_dma = 1; + + if (pci_set_consistent_dma_mask(pdev, + 0xffffffffffffffffULL)) { + vxge_debug_init(VXGE_ERR, + "%s : unable to obtain 64bit DMA for " + "consistent allocations", __func__); + ret = -ENOMEM; + goto _exit1; + } + } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) { + vxge_debug_ll_config(VXGE_TRACE, + "%s : using 32bit DMA", __func__); + } else { + ret = -ENOMEM; + goto _exit1; + } + + if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) { + vxge_debug_init(VXGE_ERR, + "%s : request regions failed", __func__); + ret = -ENODEV; + goto _exit1; + } + + pci_set_master(pdev); + + attr.bar0 = pci_ioremap_bar(pdev, 0); + if (!attr.bar0) { + vxge_debug_init(VXGE_ERR, + "%s : cannot remap io memory bar0", __func__); + ret = -ENODEV; + goto _exit2; + } + vxge_debug_ll_config(VXGE_TRACE, + "pci ioremap bar0: %p:0x%llx", + attr.bar0, + (unsigned long long)pci_resource_start(pdev, 0)); + + attr.bar1 = pci_ioremap_bar(pdev, 2); + if (!attr.bar1) { + vxge_debug_init(VXGE_ERR, + "%s : cannot remap io memory bar2", __func__); + ret = -ENODEV; + goto _exit3; + } + vxge_debug_ll_config(VXGE_TRACE, + "pci ioremap bar1: %p:0x%llx", + attr.bar1, + (unsigned long long)pci_resource_start(pdev, 2)); + + status = vxge_hw_device_hw_info_get(attr.bar0, + &ll_config.device_hw_info); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: Reading of hardware info failed." + "Please try upgrading the firmware.", VXGE_DRIVER_NAME); + ret = -EINVAL; + goto _exit4; + } + + if (ll_config.device_hw_info.fw_version.major != + VXGE_DRIVER_VERSION_MAJOR) { + vxge_debug_init(VXGE_ERR, + "FW Ver.(maj): %d not driver's expected version: %d", + ll_config.device_hw_info.fw_version.major, + VXGE_DRIVER_VERSION_MAJOR); + ret = -EINVAL; + goto _exit4; + } + + vpath_mask = ll_config.device_hw_info.vpath_mask; + if (vpath_mask == 0) { + vxge_debug_ll_config(VXGE_TRACE, + "%s: No vpaths available in device", VXGE_DRIVER_NAME); + ret = -EINVAL; + goto _exit4; + } + + vxge_debug_ll_config(VXGE_TRACE, + "%s:%d Vpath mask = %llx", __func__, __LINE__, + (unsigned long long)vpath_mask); + + /* Check how many vpaths are available */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!((vpath_mask) & vxge_mBIT(i))) + continue; + max_vpath_supported++; + } + + /* + * Configure vpaths and get driver configured number of vpaths + * which is less than or equal to the maximum vpaths per function. + */ + no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config); + if (!no_of_vpath) { + vxge_debug_ll_config(VXGE_ERR, + "%s: No more vpaths to configure", VXGE_DRIVER_NAME); + ret = 0; + goto _exit4; + } + + /* Setting driver callbacks */ + attr.uld_callbacks.link_up = vxge_callback_link_up; + attr.uld_callbacks.link_down = vxge_callback_link_down; + attr.uld_callbacks.crit_err = vxge_callback_crit_err; + + status = vxge_hw_device_initialize(&hldev, &attr, device_config); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "Failed to initialize device (%d)", status); + ret = -EINVAL; + goto _exit4; + } + + vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); + + /* set private device info */ + pci_set_drvdata(pdev, hldev); + + ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; + ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; + ll_config.addr_learn_en = addr_learn_en; + ll_config.rth_algorithm = RTH_ALG_JENKINS; + ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; + ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; + ll_config.rth_bkt_sz = RTH_BUCKET_SIZE; + ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; + ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; + + if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath, + &vdev)) { + ret = -EINVAL; + goto _exit5; + } + + vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); + VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), + vxge_hw_device_trace_level_get(hldev)); + + /* set private HW device info */ + hldev->ndev = vdev->ndev; + vdev->mtu = VXGE_HW_DEFAULT_MTU; + vdev->bar0 = attr.bar0; + vdev->bar1 = attr.bar1; + vdev->max_vpath_supported = max_vpath_supported; + vdev->no_of_vpath = no_of_vpath; + + /* Virtual Path count */ + for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!vxge_bVALn(vpath_mask, i, 1)) + continue; + if (j >= vdev->no_of_vpath) + break; + + vdev->vpaths[j].is_configured = 1; + vdev->vpaths[j].device_id = i; + vdev->vpaths[j].fifo.driver_id = j; + vdev->vpaths[j].ring.driver_id = j; + vdev->vpaths[j].vdev = vdev; + vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; + memcpy((u8 *)vdev->vpaths[j].macaddr, + (u8 *)ll_config.device_hw_info.mac_addrs[i], + ETH_ALEN); + + /* Initialize the mac address list header */ + INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list); + + vdev->vpaths[j].mac_addr_cnt = 0; + vdev->vpaths[j].mcast_addr_cnt = 0; + j++; + } + vdev->exec_mode = VXGE_EXEC_MODE_DISABLE; + vdev->max_config_port = max_config_port; + + vdev->vlan_tag_strip = vlan_tag_strip; + + /* map the hashing selector table to the configured vpaths */ + for (i = 0; i < vdev->no_of_vpath; i++) + vdev->vpath_selector[i] = vpath_selector[i]; + + macaddr = (u8 *)vdev->vpaths[0].macaddr; + + ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; + ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; + ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; + + vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", + vdev->ndev->name, ll_config.device_hw_info.serial_number); + + vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", + vdev->ndev->name, ll_config.device_hw_info.part_number); + + vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", + vdev->ndev->name, ll_config.device_hw_info.product_desc); + + vxge_debug_init(VXGE_TRACE, + "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X", + vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + + vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", + vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); + + vxge_debug_init(VXGE_TRACE, + "%s: Firmware version : %s Date : %s", vdev->ndev->name, + ll_config.device_hw_info.fw_version.version, + ll_config.device_hw_info.fw_date.date); + + vxge_print_parm(vdev, vpath_mask); + + /* Store the fw version for ethttool option */ + strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version); + memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); + memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); + + /* Copy the station mac address to the list */ + for (i = 0; i < vdev->no_of_vpath; i++) { + entry = (struct vxge_mac_addrs *) + kzalloc(sizeof(struct vxge_mac_addrs), + GFP_KERNEL); + if (NULL == entry) { + vxge_debug_init(VXGE_ERR, + "%s: mac_addr_list : memory allocation failed", + vdev->ndev->name); + ret = -EPERM; + goto _exit6; + } + macaddr = (u8 *)&entry->macaddr; + memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); + list_add(&entry->item, &vdev->vpaths[i].mac_addr_list); + vdev->vpaths[i].mac_addr_cnt = 1; + } + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", + vdev->ndev->name, __func__, __LINE__); + + vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); + VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), + vxge_hw_device_trace_level_get(hldev)); + + return 0; + +_exit6: + for (i = 0; i < vdev->no_of_vpath; i++) + vxge_free_mac_add_list(&vdev->vpaths[i]); + + vxge_device_unregister(hldev); +_exit5: + vxge_hw_device_terminate(hldev); +_exit4: + iounmap(attr.bar1); +_exit3: + iounmap(attr.bar0); +_exit2: + pci_release_regions(pdev); +_exit1: + pci_disable_device(pdev); +_exit0: + kfree(device_config); + driver_config->config_dev_cnt--; + pci_set_drvdata(pdev, NULL); + return ret; +} + +/** + * vxge_rem_nic - Free the PCI device + * @pdev: structure containing the PCI related information of the device. + * Description: This function is called by the Pci subsystem to release a + * PCI device and free up all resource held up by the device. + */ +static void __devexit +vxge_remove(struct pci_dev *pdev) +{ + struct __vxge_hw_device *hldev; + struct vxgedev *vdev = NULL; + struct net_device *dev; + int i = 0; +#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ + (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) + u32 level_trace; +#endif + + hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev); + + if (hldev == NULL) + return; + dev = hldev->ndev; + vdev = netdev_priv(dev); + +#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ + (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) + level_trace = vdev->level_trace; +#endif + vxge_debug_entryexit(level_trace, + "%s:%d", __func__, __LINE__); + + vxge_debug_init(level_trace, + "%s : removing PCI device...", __func__); + vxge_device_unregister(hldev); + + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_free_mac_add_list(&vdev->vpaths[i]); + vdev->vpaths[i].mcast_addr_cnt = 0; + vdev->vpaths[i].mac_addr_cnt = 0; + } + + kfree(vdev->vpaths); + + iounmap(vdev->bar0); + iounmap(vdev->bar1); + + /* we are safe to free it now */ + free_netdev(dev); + + vxge_debug_init(level_trace, + "%s:%d Device unregistered", __func__, __LINE__); + + vxge_hw_device_terminate(hldev); + + pci_disable_device(pdev); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + vxge_debug_entryexit(level_trace, + "%s:%d Exiting...", __func__, __LINE__); +} + +static struct pci_error_handlers vxge_err_handler = { + .error_detected = vxge_io_error_detected, + .slot_reset = vxge_io_slot_reset, + .resume = vxge_io_resume, +}; + +static struct pci_driver vxge_driver = { + .name = VXGE_DRIVER_NAME, + .id_table = vxge_id_table, + .probe = vxge_probe, + .remove = __devexit_p(vxge_remove), +#ifdef CONFIG_PM + .suspend = vxge_pm_suspend, + .resume = vxge_pm_resume, +#endif + .err_handler = &vxge_err_handler, +}; + +static int __init +vxge_starter(void) +{ + int ret = 0; + char version[32]; + snprintf(version, 32, "%s", DRV_VERSION); + + printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n", + VXGE_DRIVER_NAME); + printk(KERN_CRIT "%s: Driver version: %s\n", + VXGE_DRIVER_NAME, version); + + verify_bandwidth(); + + driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL); + if (!driver_config) + return -ENOMEM; + + ret = pci_register_driver(&vxge_driver); + + if (driver_config->config_dev_cnt && + (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) + vxge_debug_init(VXGE_ERR, + "%s: Configured %d of %d devices", + VXGE_DRIVER_NAME, driver_config->config_dev_cnt, + driver_config->total_dev_cnt); + + if (ret) + kfree(driver_config); + + return ret; +} + +static void __exit +vxge_closer(void) +{ + pci_unregister_driver(&vxge_driver); + kfree(driver_config); +} +module_init(vxge_starter); +module_exit(vxge_closer); diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h new file mode 100644 index 00000000000..9704b2bd432 --- /dev/null +++ b/drivers/net/vxge/vxge-main.h @@ -0,0 +1,557 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-main.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2009 Neterion Inc. + ******************************************************************************/ +#ifndef VXGE_MAIN_H +#define VXGE_MAIN_H + +#include "vxge-traffic.h" +#include "vxge-config.h" +#include "vxge-version.h" +#include <linux/list.h> + +#define VXGE_DRIVER_NAME "vxge" +#define VXGE_DRIVER_VENDOR "Neterion, Inc" +#define VXGE_DRIVER_VERSION_MAJOR 0 + +#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\ + VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\ + VXGE_VERSION_FOR + +#define PCI_DEVICE_ID_TITAN_WIN 0x5733 +#define PCI_DEVICE_ID_TITAN_UNI 0x5833 +#define VXGE_USE_DEFAULT 0xffffffff +#define VXGE_HW_VPATH_MSIX_ACTIVE 4 +#define VXGE_HW_RXSYNC_FREQ_CNT 4 +#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ) +#define VXGE_LL_RX_COPY_THRESHOLD 256 +#define VXGE_DEF_FIFO_LENGTH 84 + +#define NO_STEERING 0 +#define PORT_STEERING 0x1 +#define RTH_STEERING 0x2 +#define RX_TOS_STEERING 0x3 +#define RX_VLAN_STEERING 0x4 +#define RTH_BUCKET_SIZE 4 + +#define TX_PRIORITY_STEERING 1 +#define TX_VLAN_STEERING 2 +#define TX_PORT_STEERING 3 +#define TX_MULTIQ_STEERING 4 + +#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE + +#define VXGE_TTI_BTIMER_VAL 250000 + +#define VXGE_TTI_LTIMER_VAL 1000 +#define VXGE_TTI_RTIMER_VAL 0 +#define VXGE_RTI_BTIMER_VAL 250 +#define VXGE_RTI_LTIMER_VAL 100 +#define VXGE_RTI_RTIMER_VAL 0 +#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH +#define VXGE_ISR_POLLING_CNT 8 +#define VXGE_MAX_CONFIG_DEV 0xFF +#define VXGE_EXEC_MODE_DISABLE 0 +#define VXGE_EXEC_MODE_ENABLE 1 +#define VXGE_MAX_CONFIG_PORT 1 +#define VXGE_ALL_VID_DISABLE 0 +#define VXGE_ALL_VID_ENABLE 1 +#define VXGE_PAUSE_CTRL_DISABLE 0 +#define VXGE_PAUSE_CTRL_ENABLE 1 + +#define TTI_TX_URANGE_A 5 +#define TTI_TX_URANGE_B 15 +#define TTI_TX_URANGE_C 40 +#define TTI_TX_UFC_A 5 +#define TTI_TX_UFC_B 40 +#define TTI_TX_UFC_C 60 +#define TTI_TX_UFC_D 100 + +#define RTI_RX_URANGE_A 5 +#define RTI_RX_URANGE_B 15 +#define RTI_RX_URANGE_C 40 +#define RTI_RX_UFC_A 1 +#define RTI_RX_UFC_B 5 +#define RTI_RX_UFC_C 10 +#define RTI_RX_UFC_D 15 + +/* Milli secs timer period */ +#define VXGE_TIMER_DELAY 10000 + +#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE) + +enum vxge_reset_event { + /* reset events */ + VXGE_LL_VPATH_RESET = 0, + VXGE_LL_DEVICE_RESET = 1, + VXGE_LL_FULL_RESET = 2, + VXGE_LL_START_RESET = 3, + VXGE_LL_COMPL_RESET = 4 +}; +/* These flags represent the devices temporary state */ +enum vxge_device_state_t { +__VXGE_STATE_RESET_CARD = 0, +__VXGE_STATE_CARD_UP +}; + +enum vxge_mac_addr_state { + /* mac address states */ + VXGE_LL_MAC_ADDR_IN_LIST = 0, + VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1 +}; + +struct vxge_drv_config { + int config_dev_cnt; + int total_dev_cnt; + unsigned long inta_dev_open; + int g_no_cpus; + unsigned int vpath_per_dev; +}; + +struct macInfo { + unsigned char macaddr[ETH_ALEN]; + unsigned char macmask[ETH_ALEN]; + unsigned int vpath_no; + enum vxge_mac_addr_state state; +}; + +struct vxge_config { + int tx_pause_enable; + int rx_pause_enable; + +#define NEW_NAPI_WEIGHT 64 + int napi_weight; +#define VXGE_GRO_DONOT_AGGREGATE 0 +#define VXGE_GRO_ALWAYS_AGGREGATE 1 + int gro_enable; + int intr_type; +#define INTA 0 +#define MSI 1 +#define MSI_X 2 + + int addr_learn_en; + + int rth_steering; + int rth_algorithm; + int rth_hash_type_tcpipv4; + int rth_hash_type_ipv4; + int rth_hash_type_tcpipv6; + int rth_hash_type_ipv6; + int rth_hash_type_tcpipv6ex; + int rth_hash_type_ipv6ex; + int rth_bkt_sz; + int rth_jhash_golden_ratio; + int tx_steering_type; + int fifo_indicate_max_pkts; + struct vxge_hw_device_hw_info device_hw_info; +}; + +struct vxge_msix_entry { + /* Mimicing the msix_entry struct of Kernel. */ + u16 vector; + u16 entry; + u16 in_use; + void *arg; +}; + +/* Software Statistics */ + +struct vxge_sw_stats { + /* Network Stats (interface stats) */ + struct net_device_stats net_stats; + + /* Tx */ + u64 tx_frms; + u64 tx_errors; + u64 tx_bytes; + u64 txd_not_free; + u64 txd_out_of_desc; + + /* Virtual Path */ + u64 vpaths_open; + u64 vpath_open_fail; + + /* Rx */ + u64 rx_frms; + u64 rx_errors; + u64 rx_bytes; + u64 rx_mcast; + + /* Misc. */ + u64 link_up; + u64 link_down; + u64 pci_map_fail; + u64 skb_alloc_fail; +}; + +struct vxge_mac_addrs { + struct list_head item; + u64 macaddr; + u64 macmask; + enum vxge_mac_addr_state state; +}; + +struct vxgedev; + +struct vxge_fifo_stats { + u64 tx_frms; + u64 tx_errors; + u64 tx_bytes; + u64 txd_not_free; + u64 txd_out_of_desc; + u64 pci_map_fail; +}; + +struct vxge_fifo { + struct net_device *ndev; + struct pci_dev *pdev; + struct __vxge_hw_fifo *handle; + + /* The vpath id maintained in the driver - + * 0 to 'maximum_vpaths_in_function - 1' + */ + int driver_id; + int tx_steering_type; + int indicate_max_pkts; + spinlock_t tx_lock; + /* flag used to maintain queue state when MULTIQ is not enabled */ +#define VPATH_QUEUE_START 0 +#define VPATH_QUEUE_STOP 1 + int queue_state; + + /* Tx stats */ + struct vxge_fifo_stats stats; +} ____cacheline_aligned; + +struct vxge_ring_stats { + u64 prev_rx_frms; + u64 rx_frms; + u64 rx_errors; + u64 rx_dropped; + u64 rx_bytes; + u64 rx_mcast; + u64 pci_map_fail; + u64 skb_alloc_fail; +}; + +struct vxge_ring { + struct net_device *ndev; + struct pci_dev *pdev; + struct __vxge_hw_ring *handle; + /* The vpath id maintained in the driver - + * 0 to 'maximum_vpaths_in_function - 1' + */ + int driver_id; + + /* copy of the flag indicating whether rx_csum is to be used */ + u32 rx_csum; + + int pkts_processed; + int budget; + int gro_enable; + + struct napi_struct napi; + +#define VXGE_MAX_MAC_ADDR_COUNT 30 + + int vlan_tag_strip; + struct vlan_group *vlgrp; + int rx_vector_no; + enum vxge_hw_status last_status; + + /* Rx stats */ + struct vxge_ring_stats stats; +} ____cacheline_aligned; + +struct vxge_vpath { + + struct vxge_fifo fifo; + struct vxge_ring ring; + + struct __vxge_hw_vpath_handle *handle; + + /* Actual vpath id for this vpath in the device - 0 to 16 */ + int device_id; + int max_mac_addr_cnt; + int is_configured; + int is_open; + struct vxgedev *vdev; + u8 (macaddr)[ETH_ALEN]; + u8 (macmask)[ETH_ALEN]; + +#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 + /* mac addresses currently programmed into NIC */ + u16 mac_addr_cnt; + u16 mcast_addr_cnt; + struct list_head mac_addr_list; + + u32 level_err; + u32 level_trace; +}; +#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \ + for (i = 0; i < vdev->no_of_vpath; i++) { \ + vdev->vpaths[i].level_err = err; \ + vdev->vpaths[i].level_trace = trace; \ + } \ + vdev->level_err = err; \ + vdev->level_trace = trace; \ +} + +struct vxgedev { + struct net_device *ndev; + struct pci_dev *pdev; + struct __vxge_hw_device *devh; + struct vlan_group *vlgrp; + int vlan_tag_strip; + struct vxge_config config; + unsigned long state; + + /* Indicates which vpath to reset */ + unsigned long vp_reset; + + /* Timer used for polling vpath resets */ + struct timer_list vp_reset_timer; + + /* Timer used for polling vpath lockup */ + struct timer_list vp_lockup_timer; + + /* + * Flags to track whether device is in All Multicast + * or in promiscuous mode. + */ + u16 all_multi_flg; + + /* A flag indicating whether rx_csum is to be used or not. */ + u32 rx_csum; + + struct vxge_msix_entry *vxge_entries; + struct msix_entry *entries; + /* + * 4 for each vpath * 17; + * total is 68 + */ +#define VXGE_MAX_REQUESTED_MSIX 68 +#define VXGE_INTR_STRLEN 80 + char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN]; + + enum vxge_hw_event cric_err_event; + + int max_vpath_supported; + int no_of_vpath; + + struct napi_struct napi; + /* A debug option, when enabled and if error condition occurs, + * the driver will do following steps: + * - mask all interrupts + * - Not clear the source of the alarm + * - gracefully stop all I/O + * A diagnostic dump of register and stats at this point + * reveals very useful information. + */ + int exec_mode; + int max_config_port; + struct vxge_vpath *vpaths; + + struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS]; + void __iomem *bar0; + void __iomem *bar1; + struct vxge_sw_stats stats; + int mtu; + /* Below variables are used for vpath selection to transmit a packet */ + u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS]; + u64 vpaths_deployed; + + u32 intr_cnt; + u32 level_err; + u32 level_trace; + char fw_version[VXGE_HW_FW_STRLEN]; +}; + +struct vxge_rx_priv { + struct sk_buff *skb; + dma_addr_t data_dma; + dma_addr_t data_size; +}; + +struct vxge_tx_priv { + struct sk_buff *skb; + dma_addr_t dma_buffers[MAX_SKB_FRAGS+1]; +}; + +#define VXGE_MODULE_PARAM_INT(p, val) \ + static int p = val; \ + module_param(p, int, 0) + +#define vxge_os_bug(fmt...) { printk(fmt); BUG(); } + +#define vxge_os_timer(timer, handle, arg, exp) do { \ + init_timer(&timer); \ + timer.function = handle; \ + timer.data = (unsigned long) arg; \ + mod_timer(&timer, (jiffies + exp)); \ + } while (0); + +int __devinit vxge_device_register(struct __vxge_hw_device *devh, + struct vxge_config *config, + int high_dma, int no_of_vpath, + struct vxgedev **vdev); + +void vxge_device_unregister(struct __vxge_hw_device *devh); + +void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id); + +void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id); + +void vxge_callback_link_up(struct __vxge_hw_device *devh); + +void vxge_callback_link_down(struct __vxge_hw_device *devh); + +enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, + struct macInfo *mac); + +int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac); + +int vxge_reset(struct vxgedev *vdev); + +enum vxge_hw_status +vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, + u8 t_code, void *userdata); + +enum vxge_hw_status +vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, + enum vxge_hw_fifo_tcode t_code, void *userdata, void **skb_ptr); + +int vxge_close(struct net_device *dev); + +int vxge_open(struct net_device *dev); + +void vxge_close_vpaths(struct vxgedev *vdev, int index); + +int vxge_open_vpaths(struct vxgedev *vdev); + +enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); + +void vxge_stop_all_tx_queue(struct vxgedev *vdev); + +void vxge_stop_tx_queue(struct vxge_fifo *fifo); + +void vxge_start_all_tx_queue(struct vxgedev *vdev); + +void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb); + +enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, + struct macInfo *mac); + +enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, + struct macInfo *mac); + +int vxge_mac_list_add(struct vxge_vpath *vpath, + struct macInfo *mac); + +void vxge_free_mac_add_list(struct vxge_vpath *vpath); + +enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath); + +enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath); + +int do_vxge_close(struct net_device *dev, int do_io); +extern void initialize_ethtool_ops(struct net_device *ndev); +/** + * #define VXGE_DEBUG_INIT: debug for initialization functions + * #define VXGE_DEBUG_TX : debug transmit related functions + * #define VXGE_DEBUG_RX : debug recevice related functions + * #define VXGE_DEBUG_MEM : debug memory module + * #define VXGE_DEBUG_LOCK: debug locks + * #define VXGE_DEBUG_SEM : debug semaphore + * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements +*/ +#define VXGE_DEBUG_INIT 0x00000001 +#define VXGE_DEBUG_TX 0x00000002 +#define VXGE_DEBUG_RX 0x00000004 +#define VXGE_DEBUG_MEM 0x00000008 +#define VXGE_DEBUG_LOCK 0x00000010 +#define VXGE_DEBUG_SEM 0x00000020 +#define VXGE_DEBUG_ENTRYEXIT 0x00000040 +#define VXGE_DEBUG_INTR 0x00000080 +#define VXGE_DEBUG_LL_CONFIG 0x00000100 + +/* Debug tracing for VXGE driver */ +#ifndef VXGE_DEBUG_MASK +#define VXGE_DEBUG_MASK 0x0 +#endif + +#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK) +#define vxge_debug_ll_config(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__) +#else +#define vxge_debug_ll_config(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) +#define vxge_debug_init(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__) +#else +#define vxge_debug_init(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK) +#define vxge_debug_tx(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__) +#else +#define vxge_debug_tx(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK) +#define vxge_debug_rx(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__) +#else +#define vxge_debug_rx(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK) +#define vxge_debug_mem(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__) +#else +#define vxge_debug_mem(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK) +#define vxge_debug_entryexit(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__) +#else +#define vxge_debug_entryexit(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK) +#define vxge_debug_intr(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__) +#else +#define vxge_debug_intr(level, fmt, ...) +#endif + +#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\ + vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \ + level, mask);\ + VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \ + vxge_hw_device_error_level_get((struct __vxge_hw_device *) \ + vdev->devh), \ + vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \ + vdev->devh));\ +} + +#ifdef NETIF_F_GSO +#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size) +#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size) +#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type) +#endif + +#endif diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h new file mode 100644 index 00000000000..10f4da32929 --- /dev/null +++ b/drivers/net/vxge/vxge-reg.h @@ -0,0 +1,4608 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-reg.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O Virtualized + * Server Adapter. + * Copyright(c) 2002-2009 Neterion Inc. + ******************************************************************************/ +#ifndef VXGE_REG_H +#define VXGE_REG_H + +/* + * vxge_mBIT(loc) - set bit at offset + */ +#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc)) + +/* + * vxge_vBIT(val, loc, sz) - set bits at offset + */ +#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz))) +#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz))) + +/* + * vxge_bVALn(bits, loc, n) - Get the value of n bits at location + */ +#define vxge_bVALn(bits, loc, n) \ + ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1)) + +#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \ + vxge_bVALn(bits, 0, 16) +#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \ + vxge_bVALn(bits, 48, 8) +#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \ + vxge_bVALn(bits, 56, 8) + +#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \ + vxge_bVALn(bits, 3, 5) +#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \ + vxge_bVALn(bits, 5, 3) +#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5 + +#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17 +#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17 +#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17 +#define VXGE_HW_TITAN_VPATH_REG_SPACES 17 + +#define VXGE_HW_ASIC_MODE_RESERVED 0 +#define VXGE_HW_ASIC_MODE_NO_IOV 1 +#define VXGE_HW_ASIC_MODE_SR_IOV 2 +#define VXGE_HW_ASIC_MODE_MR_IOV 3 + +#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3) +#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19) +#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23) +#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31) + +#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1) + +#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \ + vxge_bVALn(bits, 0, 32) + +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \ + vxge_bVALn(bits, 50, 14) + +#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \ + vxge_bVALn(bits, 0, 17) + +#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \ + vxge_bVALn(bits, 3, 5) + +#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \ + vxge_bVALn(bits, 17, 15) + +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0 +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1 +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2 + +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0 +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1 + +#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \ + (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7)) +#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \ + vxge_bVALn(val, 61, 3) +#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \ + (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7)) +#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \ + vxge_bVALn(val, 61, 3) + +#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits +#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits + +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \ + vxge_bVALn(bits, 1, 15) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \ + vxge_bVALn(bits, 17, 15) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \ + vxge_bVALn(bits, 33, 15) + +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \ + vxge_vBIT(val, 49, 15) + +#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0 +#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1 +#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2 + +#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0 +#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2 +#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1 + +#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0 +#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1 + +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13 + +#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ + vxge_bVALn(bits, 0, 48) +#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48) + +#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \ + vxge_bVALn(bits, 0, 48) +#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48) +#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \ + vxge_mBIT(54) +#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \ + vxge_bVALn(bits, 55, 5) +#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \ + vxge_vBIT(val, 55, 5) +#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \ + vxge_bVALn(bits, 62, 2) +#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2) + +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172 + +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 +#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11 +#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13 + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ + vxge_bVALn(bits, 0, 48) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \ + vxge_bVALn(bits, 7, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \ + vxge_bVALn(bits, 8, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \ + vxge_bVALn(bits, 4, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \ + vxge_vBIT(val, 4, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \ + vxge_bVALn(bits, 10, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \ + vxge_vBIT(val, 10, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \ + vxge_bVALn(bits, 15, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \ + vxge_bVALn(bits, 19, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \ + vxge_bVALn(bits, 23, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \ + vxge_bVALn(bits, 27, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \ + vxge_bVALn(bits, 31, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \ + vxge_bVALn(bits, 35, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \ + vxge_bVALn(bits, 39, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \ + vxge_bVALn(bits, 43, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \ + vxge_vBIT(val, 9, 7) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \ + vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \ + vxge_bVALn(bits, 8, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \ + vxge_vBIT(val, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \ + vxge_vBIT(val, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \ + vxge_bVALn(bits, 24, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \ + vxge_vBIT(val, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \ + vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \ + vxge_bVALn(bits, 8, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \ + vxge_vBIT(val, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \ + vxge_vBIT(val, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \ + vxge_bVALn(bits, 24, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \ + vxge_vBIT(val, 25, 7) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \ + vxge_vBIT(val, 0, 32) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \ + vxge_vBIT(val, 32, 32) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \ + vxge_bVALn(bits, 0, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \ + vxge_vBIT(val, 0, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \ + vxge_bVALn(bits, 32, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \ + vxge_vBIT(val, 32, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \ + vxge_bVALn(bits, 36, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \ + vxge_vBIT(val, 36, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \ + vxge_bVALn(bits, 40, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \ + vxge_vBIT(val, 40, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \ + vxge_bVALn(bits, 42, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \ + vxge_vBIT(val, 42, 2) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \ + vxge_bVALn(bits, 0, 64) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \ + vxge_bVALn(bits, 0, 48) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \ + vxge_vBIT(val, 0, 48) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \ + vxge_vBIT(val, 62, 2) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \ + vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \ + vxge_bVALn(bits, 8, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \ + vxge_vBIT(val, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \ + vxge_vBIT(val, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \ + vxge_bVALn(bits, 24, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \ + vxge_vBIT(val, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \ + vxge_vBIT(val, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \ + vxge_bVALn(bits, 40, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 41, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \ + vxge_vBIT(val, 41, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 48, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \ + vxge_vBIT(val, 48, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \ + vxge_bVALn(bits, 56, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 57, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \ + vxge_vBIT(val, 57, 7) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7 + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0 + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \ + vxge_bVALn(bits, 8, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \ + vxge_vBIT(val, 16, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \ + vxge_bVALn(bits, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \ + vxge_bVALn(bits, 40, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \ + vxge_bVALn(bits, 48, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \ + vxge_bVALn(bits, 8, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \ + vxge_vBIT(val, 16, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \ + vxge_bVALn(bits, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \ + vxge_bVALn(bits, 40, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \ + vxge_bVALn(bits, 48, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16) + +#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\ + vxge_bVALn(bits, 0, 18) + +#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \ + vxge_bVALn(bits, 48, 16) +#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16) +#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits) +#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits) +#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\ +) vxge_bVALn(bits, 48, 16) +#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16) +#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \ + vxge_bVALn(bits, 32, 16) +#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16) +#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \ + vxge_bVALn(bits, 32, 16) + +#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\ +) vxge_bVALn(bits, 32, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\ +) vxge_bVALn(bits, 32, 32) +#define \ +VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \ + vxge_bVALn(bits, 32, 32) + +#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32) +#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32) +#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32) +#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32) +#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32) +#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16) +#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16) +#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16) +#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16) +#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16) +#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16) + +#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \ + vxge_bVALn(bits, 32, 32) + +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 8, 8) +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 16, 8) + +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 8, 8) +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 16, 8) + +#define VXGE_HW_CONFIG_PRIV_H + +#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL +#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL +#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL +#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL + +#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL + +#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL + +#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL + +#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL + +/* + * The registers are memory mapped and are native big-endian byte order. The + * little-endian hosts are handled by enabling hardware byte-swapping for + * register and dma operations. + */ +struct vxge_hw_legacy_reg { + + u8 unused00010[0x00010]; + +/*0x00010*/ u64 toc_swapper_fb; +#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x00018*/ u64 pifm_rd_swap_en; +#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64) +/*0x00020*/ u64 pifm_rd_flip_en; +#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64) +/*0x00028*/ u64 pifm_wr_swap_en; +#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64) +/*0x00030*/ u64 pifm_wr_flip_en; +#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64) +/*0x00038*/ u64 toc_first_pointer; +#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x00040*/ u64 host_access_en; +#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64) + +} __packed; + +struct vxge_hw_toc_reg { + + u8 unused00050[0x00050]; + +/*0x00050*/ u64 toc_common_pointer; +#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x00058*/ u64 toc_memrepair_pointer; +#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17]; +#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) + u8 unused001e0[0x001e0-0x000e8]; + +/*0x001e0*/ u64 toc_mrpcim_pointer; +#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x001e8*/ u64 toc_srpcim_pointer[17]; +#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) + u8 unused00278[0x00278-0x00270]; + +/*0x00278*/ u64 toc_vpmgmt_pointer[17]; +#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) + u8 unused00390[0x00390-0x00300]; + +/*0x00390*/ u64 toc_vpath_pointer[17]; +#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) + u8 unused004a0[0x004a0-0x00418]; + +/*0x004a0*/ u64 toc_kdfc; +#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61) +#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3) +/*0x004a8*/ u64 toc_usdc; +#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61) +#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3) +/*0x004b0*/ u64 toc_kdfc_vpath_stride; +#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \ + vxge_vBIT(val, 0, 64) +/*0x004b8*/ u64 toc_kdfc_fifo_stride; +#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \ + vxge_vBIT(val, 0, 64) + +} __packed; + +struct vxge_hw_common_reg { + + u8 unused00a00[0x00a00]; + +/*0x00a00*/ u64 prc_status1; +#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n) +/*0x00a08*/ u64 rxdcm_reset_in_progress; +#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n) +/*0x00a10*/ u64 replicq_flush_in_progress; +#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) +/*0x00a18*/ u64 rxpe_cmds_reset_in_progress; +#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) +/*0x00a20*/ u64 mxp_cmds_reset_in_progress; +#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) +/*0x00a28*/ u64 noffload_reset_in_progress; +#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n) +/*0x00a30*/ u64 rd_req_in_progress; +#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n) +/*0x00a38*/ u64 rd_req_outstanding; +#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n) +/*0x00a40*/ u64 kdfc_reset_in_progress; +#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) + u8 unused00b00[0x00b00-0x00a48]; + +/*0x00b00*/ u64 one_cfg_vp; +#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n) +/*0x00b08*/ u64 one_common; +#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n) + u8 unused00b80[0x00b80-0x00b10]; + +/*0x00b80*/ u64 tim_int_en; +#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n) +/*0x00b88*/ u64 tim_set_int_en; +#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n) +/*0x00b90*/ u64 tim_clr_int_en; +#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n) +/*0x00b98*/ u64 tim_mask_int_during_reset; +#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n) +/*0x00ba0*/ u64 tim_reset_in_progress; +#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n) +/*0x00ba8*/ u64 tim_outstanding_bmap; +#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n) + u8 unused00c00[0x00c00-0x00bb0]; + +/*0x00c00*/ u64 msg_reset_in_progress; +#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17) +/*0x00c08*/ u64 msg_mxp_mr_ready; +#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n) +/*0x00c10*/ u64 msg_uxp_mr_ready; +#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n) +/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch; +#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n) +/*0x00c20*/ u64 msg_umq_rtl_bwr; +#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n) + u8 unused00d00[0x00d00-0x00c28]; + +/*0x00d00*/ u64 cmn_rsthdlr_cfg0; +#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17) +/*0x00d08*/ u64 cmn_rsthdlr_cfg1; +#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17) +/*0x00d10*/ u64 cmn_rsthdlr_cfg2; +#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17) +/*0x00d18*/ u64 cmn_rsthdlr_cfg3; +#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17) +/*0x00d20*/ u64 cmn_rsthdlr_cfg4; +#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17) + u8 unused00d40[0x00d40-0x00d28]; + +/*0x00d40*/ u64 cmn_rsthdlr_cfg8; +#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17) +/*0x00d48*/ u64 stats_cfg0; +#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17) + u8 unused00da8[0x00da8-0x00d50]; + +/*0x00da8*/ u64 clear_msix_mask_vect[4]; +#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \ + vxge_vBIT(val, 0, 17) +/*0x00dc8*/ u64 set_msix_mask_vect[4]; +#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17) +/*0x00de8*/ u64 clear_msix_mask_all_vect; +#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \ + vxge_vBIT(val, 0, 17) +/*0x00df0*/ u64 set_msix_mask_all_vect; +#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \ + vxge_vBIT(val, 0, 17) +/*0x00df8*/ u64 mask_vector[4]; +#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17) +/*0x00e18*/ u64 msix_pending_vector[4]; +#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \ + vxge_vBIT(val, 0, 17) +/*0x00e38*/ u64 clr_msix_one_shot_vec[4]; +#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \ + vxge_vBIT(val, 0, 17) +/*0x00e58*/ u64 titan_asic_id; +#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8) +#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8) +/*0x00e60*/ u64 titan_general_int_status; +#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0) +#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1) +#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2) +#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \ + vxge_vBIT(val, 3, 17) + u8 unused00e70[0x00e70-0x00e68]; + +/*0x00e70*/ u64 titan_mask_all_int; +#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7) +#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15) + u8 unused00e80[0x00e80-0x00e78]; + +/*0x00e80*/ u64 tim_int_status0; +#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64) +/*0x00e88*/ u64 tim_int_mask0; +#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64) +/*0x00e90*/ u64 tim_int_status1; +#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4) +/*0x00e98*/ u64 tim_int_mask1; +#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4) +/*0x00ea0*/ u64 rti_int_status; +#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17) +/*0x00ea8*/ u64 rti_int_mask; +#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17) +/*0x00eb0*/ u64 adapter_status; +#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0) +#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1) +#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2) +#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3) +#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4) +#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5) +#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6) +#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7) +#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8) +#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9) +#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10) +#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11) +#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12) +#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8) +/*0x00eb8*/ u64 gen_ctrl; +#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0) +#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1) +#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2) +#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3) +#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4) +#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5) +#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4) + u8 unused00ed0[0x00ed0-0x00ec0]; + +/*0x00ed0*/ u64 adapter_ready; +#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63) +/*0x00ed8*/ u64 outstanding_read; +#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17) +/*0x00ee0*/ u64 vpath_rst_in_prog; +#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17) +/*0x00ee8*/ u64 vpath_reg_modified; +#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17) + u8 unused00fc0[0x00fc0-0x00ef0]; + +/*0x00fc0*/ u64 cp_reset_in_progress; +#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n) + u8 unused01080[0x01080-0x00fc8]; + +/*0x01080*/ u64 xgmac_ready; +#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17) + u8 unused010c0[0x010c0-0x01088]; + +/*0x010c0*/ u64 fbif_ready; +#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17) + u8 unused01100[0x01100-0x010c8]; + +/*0x01100*/ u64 vplane_assignments; +#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5) +/*0x01108*/ u64 vpath_assignments; +#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17) +/*0x01110*/ u64 resource_assignments; +#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \ + vxge_vBIT(val, 0, 17) +/*0x01118*/ u64 host_type_assignments; +#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \ + vxge_vBIT(val, 5, 3) + u8 unused01128[0x01128-0x01120]; + +/*0x01128*/ u64 max_resource_assignments; +#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \ + vxge_vBIT(val, 11, 5) +/*0x01130*/ u64 pf_vpath_assignments; +#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \ + vxge_vBIT(val, 0, 17) + u8 unused01200[0x01200-0x01138]; + +/*0x01200*/ u64 rts_access_icmp; +#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17) +/*0x01208*/ u64 rts_access_tcpsyn; +#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17) +/*0x01210*/ u64 rts_access_zl4pyld; +#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17) +/*0x01218*/ u64 rts_access_l4prtcl_tcp; +#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17) +/*0x01220*/ u64 rts_access_l4prtcl_udp; +#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17) +/*0x01228*/ u64 rts_access_l4prtcl_flex; +#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17) +/*0x01230*/ u64 rts_access_ipfrag; +#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17) + +} __packed; + +struct vxge_hw_memrepair_reg { + u64 unused1; + u64 unused2; +} __packed; + +struct vxge_hw_pcicfgmgmt_reg { + +/*0x00000*/ u64 resource_no; +#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3) +/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask; +#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \ + vxge_vBIT(val, 2, 6) +/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask; +#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \ + vxge_vBIT(val, 2, 6) +/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask; +#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \ + vxge_vBIT(val, 2, 6) +/*0x00020*/ u64 msixgrp_no; +#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11) + +} __packed; + +struct vxge_hw_mrpcim_reg { +/*0x00000*/ u64 g3fbct_int_status; +#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x00008*/ u64 g3fbct_int_mask; +/*0x00010*/ u64 g3fbct_err_reg; +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31) +/*0x00018*/ u64 g3fbct_err_mask; +/*0x00020*/ u64 g3fbct_err_alarm; + + u8 unused00a00[0x00a00-0x00028]; + +/*0x00a00*/ u64 wrdma_int_status; +#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0) +#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1) +#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2) +#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3) +#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6) +#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8) +#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9) +#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12) +#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13) +#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14) +#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15) +#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16) +#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17) +/*0x00a08*/ u64 wrdma_int_mask; +/*0x00a10*/ u64 rc_alarm_reg; +#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1) +#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2) +#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3) +#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4) +#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5) +#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6) +#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7) +#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8) +#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9) +#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10) +#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12) +/*0x00a18*/ u64 rc_alarm_mask; +/*0x00a20*/ u64 rc_alarm_alarm; +/*0x00a28*/ u64 rxdrm_sm_err_reg; +#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n) +/*0x00a30*/ u64 rxdrm_sm_err_mask; +/*0x00a38*/ u64 rxdrm_sm_err_alarm; +/*0x00a40*/ u64 rxdcm_sm_err_reg; +#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n) +/*0x00a48*/ u64 rxdcm_sm_err_mask; +/*0x00a50*/ u64 rxdcm_sm_err_alarm; +/*0x00a58*/ u64 rxdwm_sm_err_reg; +#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n) +/*0x00a60*/ u64 rxdwm_sm_err_mask; +/*0x00a68*/ u64 rxdwm_sm_err_alarm; +/*0x00a70*/ u64 rda_err_reg; +#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0) +#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1) +#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2) +#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3) +#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4) +#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5) +#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6) +#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7) +/*0x00a78*/ u64 rda_err_mask; +/*0x00a80*/ u64 rda_err_alarm; +/*0x00a88*/ u64 rda_ecc_db_reg; +#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n) +/*0x00a90*/ u64 rda_ecc_db_mask; +/*0x00a98*/ u64 rda_ecc_db_alarm; +/*0x00aa0*/ u64 rda_ecc_sg_reg; +#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n) +/*0x00aa8*/ u64 rda_ecc_sg_mask; +/*0x00ab0*/ u64 rda_ecc_sg_alarm; +/*0x00ab8*/ u64 rqa_err_reg; +#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0) +/*0x00ac0*/ u64 rqa_err_mask; +/*0x00ac8*/ u64 rqa_err_alarm; +/*0x00ad0*/ u64 frf_alarm_reg; +#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n) +/*0x00ad8*/ u64 frf_alarm_mask; +/*0x00ae0*/ u64 frf_alarm_alarm; +/*0x00ae8*/ u64 rocrc_alarm_reg; +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4) +#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5) +#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18) +#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19) +#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20) +#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22) +/*0x00af0*/ u64 rocrc_alarm_mask; +/*0x00af8*/ u64 rocrc_alarm_alarm; +/*0x00b00*/ u64 wde0_alarm_reg; +#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1) +#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2) +#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3) +#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4) +/*0x00b08*/ u64 wde0_alarm_mask; +/*0x00b10*/ u64 wde0_alarm_alarm; +/*0x00b18*/ u64 wde1_alarm_reg; +#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1) +#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2) +#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3) +#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4) +/*0x00b20*/ u64 wde1_alarm_mask; +/*0x00b28*/ u64 wde1_alarm_alarm; +/*0x00b30*/ u64 wde2_alarm_reg; +#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1) +#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2) +#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3) +#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4) +/*0x00b38*/ u64 wde2_alarm_mask; +/*0x00b40*/ u64 wde2_alarm_alarm; +/*0x00b48*/ u64 wde3_alarm_reg; +#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1) +#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2) +#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3) +#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4) +/*0x00b50*/ u64 wde3_alarm_mask; +/*0x00b58*/ u64 wde3_alarm_alarm; + + u8 unused00be8[0x00be8-0x00b60]; + +/*0x00be8*/ u64 rx_w_round_robin_0; +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5) +/*0x00bf0*/ u64 rx_w_round_robin_1; +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \ + vxge_vBIT(val, 59, 5) +/*0x00bf8*/ u64 rx_w_round_robin_2; +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c00*/ u64 rx_w_round_robin_3; +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c08*/ u64 rx_w_round_robin_4; +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c10*/ u64 rx_w_round_robin_5; +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c18*/ u64 rx_w_round_robin_6; +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c20*/ u64 rx_w_round_robin_7; +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c28*/ u64 rx_w_round_robin_8; +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c30*/ u64 rx_w_round_robin_9; +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c38*/ u64 rx_w_round_robin_10; +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c40*/ u64 rx_w_round_robin_11; +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c48*/ u64 rx_w_round_robin_12; +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c50*/ u64 rx_w_round_robin_13; +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c58*/ u64 rx_w_round_robin_14; +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c60*/ u64 rx_w_round_robin_15; +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c68*/ u64 rx_w_round_robin_16; +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c70*/ u64 rx_w_round_robin_17; +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c78*/ u64 rx_w_round_robin_18; +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c80*/ u64 rx_w_round_robin_19; +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c88*/ u64 rx_w_round_robin_20; +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c90*/ u64 rx_w_round_robin_21; +#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \ + vxge_vBIT(val, 19, 5) + +#define VXGE_HW_WRR_RING_SERVICE_STATES 171 +#define VXGE_HW_WRR_RING_COUNT 22 + +/*0x00c98*/ u64 rx_queue_priority_0; +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5) +/*0x00ca0*/ u64 rx_queue_priority_1; +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5) +/*0x00ca8*/ u64 rx_queue_priority_2; +#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5) + u8 unused00cc8[0x00cc8-0x00cb0]; + +/*0x00cc8*/ u64 replication_queue_priority; +#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \ + vxge_vBIT(val, 59, 5) +/*0x00cd0*/ u64 rx_queue_select; +#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n) +#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15) +#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23) +/*0x00cd8*/ u64 rqa_vpbp_ctrl; +#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15) +#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23) +#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31) +/*0x00ce0*/ u64 rx_multi_cast_ctrl; +#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0) +#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1) +#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \ + vxge_vBIT(val, 2, 30) +#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32) +/*0x00ce8*/ u64 wde_prm_ctrl; +#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10) +#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14) +#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32) +#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33) +#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2) +/*0x00cf0*/ u64 noa_ctrl; +#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16) +#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4) +#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4) +#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4) +#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4) +/*0x00cf8*/ u64 phase_cfg; +#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0) +#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3) +#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7) +#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11) +#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15) +#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19) +#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23) +#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27) +#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31) +#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35) +#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39) +#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43) +/*0x00d00*/ u64 rcq_bypq_cfg; +#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22) +#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9) +#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9) + u8 unused00e00[0x00e00-0x00d08]; + +/*0x00e00*/ u64 doorbell_int_status; +#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7) +#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15) +/*0x00e08*/ u64 doorbell_int_mask; +/*0x00e10*/ u64 kdfc_err_reg; +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7) +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15) +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23) +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32) +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39) +/*0x00e18*/ u64 kdfc_err_mask; +/*0x00e20*/ u64 kdfc_err_reg_alarm; +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7) +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15) +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23) +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32) +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39) + u8 unused00e40[0x00e40-0x00e28]; +/*0x00e40*/ u64 kdfc_vp_partition_0; +#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0) +#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15) +/*0x00e48*/ u64 kdfc_vp_partition_1; +#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15) +/*0x00e50*/ u64 kdfc_vp_partition_2; +#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15) +/*0x00e58*/ u64 kdfc_vp_partition_3; +#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15) +/*0x00e60*/ u64 kdfc_vp_partition_4; +#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15) +/*0x00e68*/ u64 kdfc_vp_partition_5; +#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15) +/*0x00e70*/ u64 kdfc_vp_partition_6; +#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15) +/*0x00e78*/ u64 kdfc_vp_partition_7; +#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15) +/*0x00e80*/ u64 kdfc_vp_partition_8; +#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15) +/*0x00e88*/ u64 kdfc_w_round_robin_0; +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5) + + u8 unused0f28[0x0f28-0x0e90]; + +/*0x00f28*/ u64 kdfc_w_round_robin_20; +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5) + +#define VXGE_HW_WRR_FIFO_COUNT 20 + + u8 unused0fc8[0x0fc8-0x0f30]; + +/*0x00fc8*/ u64 kdfc_w_round_robin_40; +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5) + + u8 unused1068[0x01068-0x0fd0]; + +/*0x01068*/ u64 kdfc_entry_type_sel_0; +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2) +/*0x01070*/ u64 kdfc_entry_type_sel_1; +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2) +/*0x01078*/ u64 kdfc_fifo_0_ctrl; +#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176 +#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153 + + u8 unused1100[0x01100-0x1080]; + +/*0x01100*/ u64 kdfc_fifo_17_ctrl; +#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5) + + u8 unused1600[0x01600-0x1108]; + +/*0x01600*/ u64 rxmac_int_status; +#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3) +#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7) +#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \ + vxge_mBIT(11) +/*0x01608*/ u64 rxmac_int_mask; + u8 unused01618[0x01618-0x01610]; + +/*0x01618*/ u64 rxmac_gen_err_reg; +/*0x01620*/ u64 rxmac_gen_err_mask; +/*0x01628*/ u64 rxmac_gen_err_alarm; +/*0x01630*/ u64 rxmac_ecc_err_reg; +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \ + vxge_vBIT(val, 0, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \ + vxge_vBIT(val, 4, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \ + vxge_vBIT(val, 8, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \ + vxge_vBIT(val, 12, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \ + vxge_vBIT(val, 16, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \ + vxge_vBIT(val, 20, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \ + vxge_vBIT(val, 24, 2) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \ + vxge_vBIT(val, 26, 2) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \ + vxge_vBIT(val, 28, 2) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \ + vxge_vBIT(val, 30, 2) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \ + vxge_vBIT(val, 40, 7) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \ + vxge_vBIT(val, 47, 7) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \ + vxge_vBIT(val, 54, 3) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \ + vxge_vBIT(val, 57, 3) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \ + vxge_mBIT(60) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \ + vxge_mBIT(61) +/*0x01638*/ u64 rxmac_ecc_err_mask; +/*0x01640*/ u64 rxmac_ecc_err_alarm; +/*0x01648*/ u64 rxmac_various_err_reg; +#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0) +#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1) +#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2) +#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3) +/*0x01650*/ u64 rxmac_various_err_mask; +/*0x01658*/ u64 rxmac_various_err_alarm; +/*0x01660*/ u64 rxmac_gen_cfg; +#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11) +/*0x01668*/ u64 rxmac_authorize_all_addr; +#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n) +/*0x01670*/ u64 rxmac_authorize_all_vid; +#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n) + u8 unused016c0[0x016c0-0x01678]; + +/*0x016c0*/ u64 rxmac_red_rate_repl_queue; +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35) + u8 unused016e0[0x016e0-0x016c8]; + +/*0x016e0*/ u64 rxmac_cfg0_port[3]; +#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7) +#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11) +#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15) +#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19) +#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23) +#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27) +#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14) + u8 unused01710[0x01710-0x016f8]; + +/*0x01710*/ u64 rxmac_cfg2_port[3]; +#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3) +/*0x01728*/ u64 rxmac_pause_cfg_port[3]; +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59) + u8 unused01758[0x01758-0x01740]; + +/*0x01758*/ u64 rxmac_red_cfg0_port[3]; +#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n) +/*0x01770*/ u64 rxmac_red_cfg1_port[3]; +#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11) +/*0x01788*/ u64 rxmac_red_cfg2_port[3]; +#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n) +/*0x017a0*/ u64 rxmac_link_util_port[3]; +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \ + vxge_vBIT(val, 1, 7) +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \ + vxge_vBIT(val, 12, 4) +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23) + u8 unused017d0[0x017d0-0x017b8]; + +/*0x017d0*/ u64 rxmac_status_port[3]; +#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3) + u8 unused01800[0x01800-0x017e8]; + +/*0x01800*/ u64 rxmac_rx_pa_cfg0; +#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3) +#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7) +#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18) +#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19) +#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23) +#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27) +#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59) +#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63) +/*0x01808*/ u64 rxmac_rx_pa_cfg1; +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23) + u8 unused01828[0x01828-0x01810]; + +/*0x01828*/ u64 rts_mgr_cfg0; +#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3) +#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35) +#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39) +#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43) +#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47) +#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51) +#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55) +#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59) +/*0x01830*/ u64 rts_mgr_cfg1; +#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3) +#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7) +/*0x01838*/ u64 rts_mgr_criteria_priority; +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3) +/*0x01840*/ u64 rts_mgr_da_pause_cfg; +#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17) +/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg; +#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \ + vxge_vBIT(val, 0, 17) + u8 unused01890[0x01890-0x01850]; +/*0x01890*/ u64 rts_mgr_cbasin_cfg; + u8 unused01968[0x01968-0x01898]; + +/*0x01968*/ u64 dbg_stat_rx_any_frms; +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \ + vxge_vBIT(val, 16, 8) + u8 unused01a00[0x01a00-0x01970]; + +/*0x01a00*/ u64 rxmac_red_rate_vp[17]; +#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4) + u8 unused01e00[0x01e00-0x01a88]; + +/*0x01e00*/ u64 xgmac_int_status; +#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3) +#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \ + vxge_mBIT(7) +#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \ + vxge_mBIT(11) +#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15) +#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19) +#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23) +/*0x01e08*/ u64 xgmac_int_mask; +/*0x01e10*/ u64 xmac_gen_err_reg; +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \ + vxge_mBIT(7) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \ + vxge_mBIT(11) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \ + vxge_mBIT(19) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \ + vxge_mBIT(23) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27) +#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \ + vxge_vBIT(val, 40, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \ + vxge_vBIT(val, 42, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \ + vxge_vBIT(val, 44, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \ + vxge_vBIT(val, 46, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \ + vxge_vBIT(val, 48, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \ + vxge_vBIT(val, 50, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \ + vxge_vBIT(val, 52, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \ + vxge_vBIT(val, 54, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \ + vxge_vBIT(val, 56, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \ + vxge_vBIT(val, 58, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63) +/*0x01e18*/ u64 xmac_gen_err_mask; +/*0x01e20*/ u64 xmac_gen_err_alarm; +/*0x01e28*/ u64 xmac_link_err_port_reg[2]; +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \ + vxge_mBIT(19) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \ + vxge_mBIT(47) +/*0x01e30*/ u64 xmac_link_err_port_mask[2]; +/*0x01e38*/ u64 xmac_link_err_port_alarm[2]; +/*0x01e58*/ u64 xgxs_gen_err_reg; +#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63) +/*0x01e60*/ u64 xgxs_gen_err_mask; +/*0x01e68*/ u64 xgxs_gen_err_alarm; +/*0x01e70*/ u64 asic_ntwk_err_reg; +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23) +/*0x01e78*/ u64 asic_ntwk_err_mask; +/*0x01e80*/ u64 asic_ntwk_err_alarm; +/*0x01e88*/ u64 asic_gpio_err_reg; +#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n) +/*0x01e90*/ u64 asic_gpio_err_mask; +/*0x01e98*/ u64 asic_gpio_err_alarm; +/*0x01ea0*/ u64 xgmac_gen_status; +#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3) +#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11) +/*0x01ea8*/ u64 xgmac_gen_fw_memo_status; +#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \ + vxge_vBIT(val, 0, 17) +/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask; +#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64) +/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status; +#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \ + vxge_vBIT(val, 0, 17) +/*0x01ec0*/ u64 xgmac_main_cfg_port[2]; +#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3) + u8 unused01f40[0x01f40-0x01ed0]; + +/*0x01f40*/ u64 xmac_gen_cfg; +#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2) +#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7) +#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27) +#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4) +#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4) +/*0x01f48*/ u64 xmac_timestamp; +#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3) +#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19) +#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16) +/*0x01f50*/ u64 xmac_stats_gen_cfg; +#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15) +/*0x01f58*/ u64 xmac_stats_sys_cmd; +#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15) +#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8) +/*0x01f60*/ u64 xmac_stats_sys_data; +#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64) + u8 unused01f80[0x01f80-0x01f68]; + +/*0x01f80*/ u64 asic_ntwk_ctrl; +#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3) +#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11) +#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15) +/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info; +#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n) +/*0x01f90*/ u64 asic_ntwk_cfg_port_num; +#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n) +/*0x01f98*/ u64 xmac_cfg_port[3]; +#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3) +#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7) +#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11) +#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15) +/*0x01fb0*/ u64 xmac_station_addr_port[2]; +#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48) + u8 unused02020[0x02020-0x01fc0]; + +/*0x02020*/ u64 lag_cfg; +#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3) +#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11) +#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15) +#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19) +/*0x02028*/ u64 lag_status; +#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3) +#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \ + vxge_vBIT(val, 8, 8) +/*0x02030*/ u64 lag_active_passive_cfg; +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \ + vxge_vBIT(val, 32, 16) + u8 unused02040[0x02040-0x02038]; + +/*0x02040*/ u64 lag_lacp_cfg; +#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3) +#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7) +#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11) +#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15) +/*0x02048*/ u64 lag_timer_cfg_1; +#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16) +/*0x02050*/ u64 lag_timer_cfg_2; +#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16) +/*0x02058*/ u64 lag_sys_id; +#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48) +#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51) +#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55) +/*0x02060*/ u64 lag_sys_cfg; +#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16) + u8 unused02070[0x02070-0x02068]; + +/*0x02070*/ u64 lag_aggr_addr_cfg[2]; +#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48) +#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51) +#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55) +/*0x02080*/ u64 lag_aggr_id_cfg[2]; +#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16) +/*0x02090*/ u64 lag_aggr_admin_key[2]; +#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16) +/*0x020a0*/ u64 lag_aggr_alt_admin_key; +#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19) +/*0x020a8*/ u64 lag_aggr_oper_key[2]; +#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16) +/*0x020b8*/ u64 lag_aggr_partner_sys_id[2]; +#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48) +/*0x020c8*/ u64 lag_aggr_partner_info[2]; +#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \ + vxge_vBIT(val, 16, 16) +/*0x020d8*/ u64 lag_aggr_state[2]; +#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3) +#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7) +#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11) +#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15) + u8 unused020f0[0x020f0-0x020e8]; + +/*0x020f0*/ u64 lag_port_cfg[2]; +#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15) +/*0x02100*/ u64 lag_port_actor_admin_cfg[2]; +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16) +/*0x02110*/ u64 lag_port_actor_admin_state[2]; +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31) +/*0x02120*/ u64 lag_port_partner_admin_sys_id[2]; +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48) +/*0x02130*/ u64 lag_port_partner_admin_cfg[2]; +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \ + vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \ + vxge_vBIT(val, 48, 16) +/*0x02140*/ u64 lag_port_partner_admin_state[2]; +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31) +/*0x02150*/ u64 lag_port_to_aggr[2]; +#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19) +/*0x02160*/ u64 lag_port_actor_oper_key[2]; +#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16) +/*0x02170*/ u64 lag_port_actor_oper_state[2]; +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31) +/*0x02180*/ u64 lag_port_partner_oper_sys_id[2]; +#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \ + vxge_vBIT(val, 0, 48) +/*0x02190*/ u64 lag_port_partner_oper_info[2]; +#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \ + vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \ + vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \ + vxge_vBIT(val, 48, 16) +/*0x021a0*/ u64 lag_port_partner_oper_state[2]; +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \ + vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31) +/*0x021b0*/ u64 lag_port_state_vars[2]; +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \ + vxge_mBIT(32) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \ + vxge_mBIT(33) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \ + vxge_vBIT(val, 41, 3) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \ + vxge_vBIT(val, 56, 4) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \ + vxge_vBIT(val, 60, 4) +/*0x021c0*/ u64 lag_port_timer_cntr[2]; +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \ + vxge_vBIT(val, 8, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \ + vxge_vBIT(val, 32, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \ + vxge_vBIT(val, 40, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \ + vxge_vBIT(val, 48, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \ + vxge_vBIT(val, 56, 8) + u8 unused02208[0x02700-0x021d0]; + +/*0x02700*/ u64 rtdma_int_status; +#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1) +#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2) +#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4) +#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5) +/*0x02708*/ u64 rtdma_int_mask; +/*0x02710*/ u64 pda_alarm_reg; +#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0) +#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1) +/*0x02718*/ u64 pda_alarm_mask; +/*0x02720*/ u64 pda_alarm_alarm; +/*0x02728*/ u64 pcc_error_reg; +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n) +/*0x02730*/ u64 pcc_error_mask; +/*0x02738*/ u64 pcc_error_alarm; +/*0x02740*/ u64 lso_error_reg; +#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n) +#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n) +/*0x02748*/ u64 lso_error_mask; +/*0x02750*/ u64 lso_error_alarm; +/*0x02758*/ u64 sm_error_reg; +#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15) +/*0x02760*/ u64 sm_error_mask; +/*0x02768*/ u64 sm_error_alarm; + + u8 unused027a8[0x027a8-0x02770]; + +/*0x027a8*/ u64 txd_ownership_ctrl; +#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7) +/*0x027b0*/ u64 pcc_cfg; +#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n) +/*0x027b8*/ u64 pcc_control; +#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15) +#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31) +/*0x027c0*/ u64 pda_status1; +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4) +/*0x027c8*/ u64 rtdma_bw_timer; +#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4) + + u8 unused02900[0x02900-0x027d0]; +/*0x02900*/ u64 g3cmct_int_status; +#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x02908*/ u64 g3cmct_int_mask; +/*0x02910*/ u64 g3cmct_err_reg; +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31) +/*0x02918*/ u64 g3cmct_err_mask; +/*0x02920*/ u64 g3cmct_err_alarm; + u8 unused03000[0x03000-0x02928]; + +/*0x03000*/ u64 mc_int_status; +#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3) +#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7) +#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11) +#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15) +/*0x03008*/ u64 mc_int_mask; +/*0x03010*/ u64 mc_err_reg; +#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3) +#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4) +#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5) +#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6) +#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7) +#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10) +#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11) +#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12) +#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13) +#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14) +#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15) +/*0x03018*/ u64 mc_err_mask; +/*0x03020*/ u64 mc_err_alarm; +/*0x03028*/ u64 grocrc_alarm_reg; +#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3) +#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7) +/*0x03030*/ u64 grocrc_alarm_mask; +/*0x03038*/ u64 grocrc_alarm_alarm; + u8 unused03100[0x03100-0x03040]; + +/*0x03100*/ u64 rx_thresh_cfg_repl; +#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62) +#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63) + u8 unused033b8[0x033b8-0x03108]; + +/*0x033b8*/ u64 fbmc_ecc_cfg; +#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5) + u8 unused03400[0x03400-0x033c0]; + +/*0x03400*/ u64 pcipif_int_status; +#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3) +#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7) +#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11) +#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15) +#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \ + vxge_mBIT(19) +/*0x03408*/ u64 pcipif_int_mask; +/*0x03410*/ u64 dbecc_err_reg; +#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3) +#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7) +#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11) +#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15) +#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19) +#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23) +/*0x03418*/ u64 dbecc_err_mask; +/*0x03420*/ u64 dbecc_err_alarm; +/*0x03428*/ u64 sbecc_err_reg; +#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3) +#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7) +#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11) +#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15) +#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19) +#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23) +/*0x03430*/ u64 sbecc_err_mask; +/*0x03438*/ u64 sbecc_err_alarm; +/*0x03440*/ u64 general_err_reg; +#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3) +#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7) +#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11) +#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15) +#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19) +#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23) +#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27) +/*0x03448*/ u64 general_err_mask; +/*0x03450*/ u64 general_err_alarm; +/*0x03458*/ u64 srpcim_msg_reg; +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \ + vxge_mBIT(0) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \ + vxge_mBIT(1) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \ + vxge_mBIT(2) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \ + vxge_mBIT(3) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \ + vxge_mBIT(4) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \ + vxge_mBIT(5) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \ + vxge_mBIT(6) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \ + vxge_mBIT(7) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \ + vxge_mBIT(8) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \ + vxge_mBIT(9) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \ + vxge_mBIT(10) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \ + vxge_mBIT(11) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \ + vxge_mBIT(12) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \ + vxge_mBIT(13) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \ + vxge_mBIT(14) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \ + vxge_mBIT(15) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \ + vxge_mBIT(16) +/*0x03460*/ u64 srpcim_msg_mask; +/*0x03468*/ u64 srpcim_msg_alarm; + u8 unused03600[0x03600-0x03470]; + +/*0x03600*/ u64 gcmg1_int_status; +#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6) +#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7) +#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8) +/*0x03608*/ u64 gcmg1_int_mask; + u8 unused03a00[0x03a00-0x03610]; + +/*0x03a00*/ u64 pcmg1_int_status; +#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0) +#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1) +#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2) +#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3) +/*0x03a08*/ u64 pcmg1_int_mask; + u8 unused04000[0x04000-0x03a10]; + +/*0x04000*/ u64 one_int_status; +#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7) +#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \ + vxge_mBIT(13) +#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \ + vxge_mBIT(14) +#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15) +#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23) +#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31) +#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39) +#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47) +#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55) +/*0x04008*/ u64 one_int_mask; + u8 unused04818[0x04818-0x04010]; + +/*0x04818*/ u64 noa_wct_ctrl; +#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0) +/*0x04820*/ u64 rc_cfg2; +#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16) +/*0x04828*/ u64 rc_cfg3; +#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16) +/*0x04830*/ u64 rx_multi_cast_ctrl1; +#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7) +#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5) +/*0x04838*/ u64 rxdm_dbg_rd; +#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12) +#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31) +/*0x04840*/ u64 rxdm_dbg_rd_data; +#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64) +/*0x04848*/ u64 rqa_top_prty_for_vh[17]; +#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \ + vxge_vBIT(val, 59, 5) + u8 unused04900[0x04900-0x048d0]; + +/*0x04900*/ u64 tim_status; +#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0) +/*0x04908*/ u64 tim_ecc_enable; +#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7) +#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15) +#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23) +/*0x04910*/ u64 tim_bp_ctrl; +#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7) +#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15) +#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23) +/*0x04918*/ u64 tim_resource_assignment_vh[17]; +#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32) +/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17]; +#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5) + u8 unused04b00[0x04b00-0x04a28]; + +/*0x04b00*/ u64 gcmg2_int_status; +#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7) +#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15) +#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23) +/*0x04b08*/ u64 gcmg2_int_mask; +/*0x04b10*/ u64 gxtmc_err_reg; +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \ + vxge_mBIT(21) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \ + vxge_mBIT(22) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \ + vxge_mBIT(24) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \ + vxge_mBIT(25) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35) +/*0x04b18*/ u64 gxtmc_err_mask; +/*0x04b20*/ u64 gxtmc_err_alarm; +/*0x04b28*/ u64 cmc_err_reg; +#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0) +/*0x04b30*/ u64 cmc_err_mask; +/*0x04b38*/ u64 cmc_err_alarm; +/*0x04b40*/ u64 gcp_err_reg; +#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0) +#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1) +#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2) +#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3) +/*0x04b48*/ u64 gcp_err_mask; +/*0x04b50*/ u64 gcp_err_alarm; + u8 unused04f00[0x04f00-0x04b58]; + +/*0x04f00*/ u64 pcmg2_int_status; +#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7) +#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15) +#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23) +/*0x04f08*/ u64 pcmg2_int_mask; +/*0x04f10*/ u64 pxtmc_err_reg; +#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57) +/*0x04f18*/ u64 pxtmc_err_mask; +/*0x04f20*/ u64 pxtmc_err_alarm; +/*0x04f28*/ u64 cp_err_reg; +#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2) +#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10) +#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11) +#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12) +#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13) +#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14) +#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15) +#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2) +#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2) +#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34) +#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35) +#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36) +#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37) +#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38) +#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39) +#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2) +#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48) +#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49) +#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50) +#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51) +#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52) +#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53) +#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54) +#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55) +#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56) +#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57) +#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60) +#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61) +#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62) +#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63) +/*0x04f30*/ u64 cp_err_mask; +/*0x04f38*/ u64 cp_err_alarm; + u8 unused04fe8[0x04f50-0x04f40]; + +/*0x04f50*/ u64 cp_exc_reg; +#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47) +#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55) +#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63) +/*0x04f58*/ u64 cp_exc_mask; +/*0x04f60*/ u64 cp_exc_alarm; +/*0x04f68*/ u64 cp_exc_cause; +#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32) + u8 unused05200[0x05200-0x04f70]; + +/*0x05200*/ u64 msg_int_status; +#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7) +#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60) +#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61) +#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62) +#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63) +/*0x05208*/ u64 msg_int_mask; +/*0x05210*/ u64 tim_err_reg; +#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7) +#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19) +#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20) +#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22) +#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23) +#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n) +/*0x05218*/ u64 tim_err_mask; +/*0x05220*/ u64 tim_err_alarm; +/*0x05228*/ u64 msg_err_reg; +#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \ + vxge_mBIT(2) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \ + vxge_mBIT(3) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31) +#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33) +#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \ + vxge_mBIT(36) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63) +/*0x05230*/ u64 msg_err_mask; +/*0x05238*/ u64 msg_err_alarm; + u8 unused05340[0x05340-0x05240]; + +/*0x05340*/ u64 msg_exc_reg; +#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50) +#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51) +#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54) +#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55) +#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62) +#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63) +/*0x05348*/ u64 msg_exc_mask; +/*0x05350*/ u64 msg_exc_alarm; +/*0x05358*/ u64 msg_exc_cause; +#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32) + u8 unused05368[0x05380-0x05360]; + +/*0x05380*/ u64 msg_err2_reg; +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(0) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(1) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(2) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \ + vxge_mBIT(3) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \ + vxge_mBIT(5) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \ + vxge_mBIT(12) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \ + vxge_mBIT(13) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \ + vxge_mBIT(14) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \ + vxge_mBIT(15) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \ + vxge_mBIT(16) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \ + vxge_mBIT(17) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \ + vxge_mBIT(18) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \ + vxge_mBIT(19) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \ + vxge_mBIT(20) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \ + vxge_mBIT(21) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \ + vxge_mBIT(22) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \ + vxge_mBIT(23) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \ + vxge_mBIT(24) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \ + vxge_mBIT(25) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \ + vxge_mBIT(26) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \ + vxge_mBIT(27) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \ + vxge_mBIT(28) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(30) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(31) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(32) +#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33) +#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62) +#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63) +/*0x05388*/ u64 msg_err2_mask; +/*0x05390*/ u64 msg_err2_alarm; +/*0x05398*/ u64 msg_err3_reg; +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57) +/*0x053a0*/ u64 msg_err3_mask; +/*0x053a8*/ u64 msg_err3_alarm; + u8 unused05600[0x05600-0x053b0]; + +/*0x05600*/ u64 fau_gen_err_reg; +#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3) +#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7) +#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11) +#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15) +/*0x05608*/ u64 fau_gen_err_mask; +/*0x05610*/ u64 fau_gen_err_alarm; +/*0x05618*/ u64 fau_ecc_err_reg; +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \ + vxge_vBIT(val, 2, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \ + vxge_vBIT(val, 4, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \ + vxge_vBIT(val, 8, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \ + vxge_vBIT(val, 10, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \ + vxge_vBIT(val, 14, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \ + vxge_vBIT(val, 16, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \ + vxge_vBIT(val, 18, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \ + vxge_vBIT(val, 20, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31) +/*0x05620*/ u64 fau_ecc_err_mask; +/*0x05628*/ u64 fau_ecc_err_alarm; + u8 unused05658[0x05658-0x05630]; +/*0x05658*/ u64 fau_pa_cfg; +#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3) +#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7) +#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11) + u8 unused05668[0x05668-0x05660]; + +/*0x05668*/ u64 dbg_stats_fau_rx_path; +#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \ + vxge_vBIT(val, 32, 32) + u8 unused056c0[0x056c0-0x05670]; + +/*0x056c0*/ u64 fau_lag_cfg; +#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2) +#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7) + u8 unused05800[0x05800-0x056c8]; + +/*0x05800*/ u64 tpa_int_status; +#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15) +#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23) +#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31) +/*0x05808*/ u64 tpa_int_mask; +/*0x05810*/ u64 orp_err_reg; +#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3) +#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7) +#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11) +#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15) +#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27) +#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43) +#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47) +/*0x05818*/ u64 orp_err_mask; +/*0x05820*/ u64 orp_err_alarm; +/*0x05828*/ u64 ptm_alarm_reg; +#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3) +#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7) +#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11) +#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15) +#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2) +#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2) +/*0x05830*/ u64 ptm_alarm_mask; +/*0x05838*/ u64 ptm_alarm_alarm; +/*0x05840*/ u64 tpa_error_reg; +#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3) +#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7) +#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11) +/*0x05848*/ u64 tpa_error_mask; +/*0x05850*/ u64 tpa_error_alarm; +/*0x05858*/ u64 tpa_global_cfg; +#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7) +#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35) + u8 unused05868[0x05870-0x05860]; + +/*0x05870*/ u64 ptm_ecc_cfg; +#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3) +/*0x05878*/ u64 ptm_phase_cfg; +#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3) +#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7) + u8 unused05898[0x05898-0x05880]; + +/*0x05898*/ u64 dbg_stats_tpa_tx_path; +#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \ + vxge_vBIT(val, 32, 32) + u8 unused05900[0x05900-0x058a0]; + +/*0x05900*/ u64 tmac_int_status; +#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3) +#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7) +/*0x05908*/ u64 tmac_int_mask; +/*0x05910*/ u64 txmac_gen_err_reg; +#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3) +#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7) +/*0x05918*/ u64 txmac_gen_err_mask; +/*0x05920*/ u64 txmac_gen_err_alarm; +/*0x05928*/ u64 txmac_ecc_err_reg; +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39) +/*0x05930*/ u64 txmac_ecc_err_mask; +/*0x05938*/ u64 txmac_ecc_err_alarm; + u8 unused05978[0x05978-0x05940]; + +/*0x05978*/ u64 dbg_stat_tx_any_frms; +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \ + vxge_vBIT(val, 16, 8) + u8 unused059a0[0x059a0-0x05980]; + +/*0x059a0*/ u64 txmac_link_util_port[3]; +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \ + vxge_vBIT(val, 1, 7) +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \ + vxge_vBIT(val, 12, 4) +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23) +/*0x059b8*/ u64 txmac_cfg0_port[3]; +#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3) +#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7) +#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8) +/*0x059d0*/ u64 txmac_cfg1_port[3]; +#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8) +/*0x059e8*/ u64 txmac_status_port[3]; +#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3) + u8 unused05a20[0x05a20-0x05a00]; + +/*0x05a20*/ u64 lag_distrib_dest; +#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n) +/*0x05a28*/ u64 lag_marker_cfg; +#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3) +#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7) +#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \ + vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51) +/*0x05a30*/ u64 lag_tx_cfg; +#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3) +#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11) +#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16) +/*0x05a38*/ u64 lag_tx_status; +#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \ + vxge_vBIT(val, 0, 8) +#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \ + vxge_vBIT(val, 8, 8) +#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \ + vxge_vBIT(val, 16, 8) + u8 unused05d48[0x05d48-0x05a40]; + +/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17]; +#define \ +VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\ + vxge_vBIT(val, 0, 64) + u8 unused06420[0x06420-0x05dd0]; + +/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17]; +#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17]; + +/*0x06530*/ u64 debug_stats0; +#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32) +/*0x06538*/ u64 debug_stats1; +#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32) +/*0x06540*/ u64 debug_stats2; +#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32) +/*0x06548*/ u64 debug_stats3_vplane[17]; +#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16) +/*0x065d0*/ u64 debug_stats4_vplane[17]; +#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16) + + u8 unused07000[0x07000-0x06658]; + +/*0x07000*/ u64 mrpcim_general_int_status; +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22) +/*0x07008*/ u64 mrpcim_general_int_mask; +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22) +/*0x07010*/ u64 mrpcim_ppif_int_status; +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\ + vxge_mBIT(31) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\ + vxge_mBIT(32) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\ + vxge_mBIT(33) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\ + vxge_mBIT(34) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\ + vxge_mBIT(35) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\ + vxge_mBIT(36) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\ + vxge_mBIT(37) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\ + vxge_mBIT(38) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\ + vxge_mBIT(39) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\ + vxge_mBIT(40) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \ + vxge_mBIT(41) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \ + vxge_mBIT(42) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \ + vxge_mBIT(43) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \ + vxge_mBIT(44) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \ + vxge_mBIT(45) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \ + vxge_mBIT(46) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \ + vxge_mBIT(47) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \ + vxge_mBIT(55) +/*0x07018*/ u64 mrpcim_ppif_int_mask; + u8 unused07028[0x07028-0x07020]; + +/*0x07028*/ u64 ini_errors_reg; +#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3) +#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11) +#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12) +#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15) +#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19) +#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23) +#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27) +#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31) +#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35) +#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39) +#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43) +#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47) +#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51) +#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55) +#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59) +#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63) +/*0x07030*/ u64 ini_errors_mask; +/*0x07038*/ u64 ini_errors_alarm; +/*0x07040*/ u64 dma_errors_reg; +#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3) +#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11) +#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12) +#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13) +#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14) +#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15) +#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16) +#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17) +#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18) +#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29) +#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32) +#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33) +#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34) +/*0x07048*/ u64 dma_errors_mask; +/*0x07050*/ u64 dma_errors_alarm; +/*0x07058*/ u64 tgt_errors_reg; +#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0) +#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1) +#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2) +#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3) +#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4) +#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5) +#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6) +#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7) +#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8) +#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14) +#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15) +#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16) +#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17) +#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18) +#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19) +#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20) +#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21) +/*0x07060*/ u64 tgt_errors_mask; +/*0x07068*/ u64 tgt_errors_alarm; +/*0x07070*/ u64 config_errors_reg; +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27) +#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31) +#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35) +#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39) +#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43) +#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47) +#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51) +#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55) +#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59) +#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63) +/*0x07078*/ u64 config_errors_mask; +/*0x07080*/ u64 config_errors_alarm; + u8 unused07090[0x07090-0x07088]; + +/*0x07090*/ u64 crdt_errors_reg; +#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11) +#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \ + vxge_mBIT(15) +#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19) +#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \ + vxge_mBIT(23) +#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35) +#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39) +#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43) +#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \ + vxge_mBIT(47) +/*0x07098*/ u64 crdt_errors_mask; +/*0x070a0*/ u64 crdt_errors_alarm; + u8 unused070b0[0x070b0-0x070a8]; + +/*0x070b0*/ u64 mrpcim_general_errors_reg; +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \ + vxge_mBIT(47) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51) +/*0x070b8*/ u64 mrpcim_general_errors_mask; +/*0x070c0*/ u64 mrpcim_general_errors_alarm; + u8 unused070d0[0x070d0-0x070c8]; + +/*0x070d0*/ u64 pll_errors_reg; +#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3) +#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7) +#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11) +/*0x070d8*/ u64 pll_errors_mask; +/*0x070e0*/ u64 pll_errors_alarm; +/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg; +#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \ + vxge_vBIT(val, 0, 17) +/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask; +/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm; +/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg; +#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \ + vxge_vBIT(val, 0, 17) +/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask; +/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm; + u8 unused07128[0x07128-0x07118]; + +/*0x07128*/ u64 crdt_errors_vplane_reg[17]; +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \ + vxge_mBIT(3) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \ + vxge_mBIT(7) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \ + vxge_mBIT(11) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \ + vxge_mBIT(15) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \ + vxge_mBIT(19) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \ + vxge_mBIT(23) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \ + vxge_mBIT(27) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \ + vxge_mBIT(31) +/*0x07130*/ u64 crdt_errors_vplane_mask[17]; +/*0x07138*/ u64 crdt_errors_vplane_alarm[17]; + u8 unused072f0[0x072f0-0x072c0]; + +/*0x072f0*/ u64 mrpcim_rst_in_prog; +#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7) +/*0x072f8*/ u64 mrpcim_reg_modified; +#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7) + + u8 unused07378[0x07378-0x07300]; + +/*0x07378*/ u64 write_arb_pending; +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3) +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7) +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11) +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15) +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19) +/*0x07380*/ u64 read_arb_pending; +#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3) +#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7) +#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11) +/*0x07388*/ u64 dmaif_dmadbl_pending; +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \ + vxge_vBIT(val, 13, 51) +/*0x07390*/ u64 wrcrdtarb_status0_vplane[17]; +#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \ + vxge_vBIT(val, 0, 8) +/*0x07418*/ u64 wrcrdtarb_status1_vplane[17]; +#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \ + vxge_vBIT(val, 4, 12) + u8 unused07500[0x07500-0x074a0]; + +/*0x07500*/ u64 mrpcim_general_cfg1; +#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7) +/*0x07508*/ u64 mrpcim_general_cfg2; +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \ + vxge_vBIT(val, 47, 5) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63) +/*0x07510*/ u64 mrpcim_general_cfg3; +#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \ + vxge_vBIT(val, 36, 16) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63) +/*0x07518*/ u64 mrpcim_stats_start_host_addr; +#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\ + vxge_vBIT(val, 0, 57) + + u8 unused07950[0x07950-0x07520]; + +/*0x07950*/ u64 rdcrdtarb_cfg0; +#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \ + vxge_vBIT(val, 18, 6) +#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \ + vxge_vBIT(val, 26, 6) +#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \ + vxge_vBIT(val, 34, 6) +#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4) +#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6) +#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63) + u8 unused07be8[0x07be8-0x07958]; + +/*0x07be8*/ u64 bf_sw_reset; +#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8) +/*0x07bf0*/ u64 sw_reset_status; +#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7) +#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15) + u8 unused07d30[0x07d30-0x07bf8]; + +/*0x07d30*/ u64 mrpcim_debug_stats0; +#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32) +/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17]; +#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \ + vxge_vBIT(val, 32, 32) +/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17]; +#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \ + vxge_vBIT(val, 32, 32) +/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17]; +#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \ + vxge_vBIT(val, 32, 32) +/*0x07ed0*/ u64 mrpcim_debug_stats4; +#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \ + vxge_vBIT(val, 32, 32) +/*0x07ed8*/ u64 genstats_count01; +#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32) +/*0x07ee0*/ u64 genstats_count23; +#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32) +/*0x07ee8*/ u64 genstats_count4; +#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32) +/*0x07ef0*/ u64 genstats_count5; +#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32) + + u8 unused07f08[0x07f08-0x07ef8]; + +/*0x07f08*/ u64 genstats_cfg[6]; +#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3) +#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17) +/*0x07f38*/ u64 genstat_64bit_cfg; +#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3) +#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7) + u8 unused08000[0x08000-0x07f40]; +/*0x08000*/ u64 gcmg3_int_status; +#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0) +#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1) +#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2) +#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3) +#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4) +#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5) +#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6) +/*0x08008*/ u64 gcmg3_int_mask; + u8 unused09000[0x09000-0x8010]; + +/*0x09000*/ u64 g3ifcmd_fb_int_status; +#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x09008*/ u64 g3ifcmd_fb_int_mask; +/*0x09010*/ u64 g3ifcmd_fb_err_reg; +#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6) +#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7) +#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \ + vxge_vBIT(val, 24, 8) +#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55) +/*0x09018*/ u64 g3ifcmd_fb_err_mask; +/*0x09020*/ u64 g3ifcmd_fb_err_alarm; + + u8 unused09400[0x09400-0x09028]; + +/*0x09400*/ u64 g3ifcmd_cmu_int_status; +#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x09408*/ u64 g3ifcmd_cmu_int_mask; +/*0x09410*/ u64 g3ifcmd_cmu_err_reg; +#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6) +#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7) +#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \ + vxge_vBIT(val, 24, 8) +#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55) +/*0x09418*/ u64 g3ifcmd_cmu_err_mask; +/*0x09420*/ u64 g3ifcmd_cmu_err_alarm; + + u8 unused09800[0x09800-0x09428]; + +/*0x09800*/ u64 g3ifcmd_cml_int_status; +#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x09808*/ u64 g3ifcmd_cml_int_mask; +/*0x09810*/ u64 g3ifcmd_cml_err_reg; +#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6) +#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7) +#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \ + vxge_vBIT(val, 24, 8) +#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55) +/*0x09818*/ u64 g3ifcmd_cml_err_mask; +/*0x09820*/ u64 g3ifcmd_cml_err_alarm; + u8 unused09b00[0x09b00-0x09828]; + +/*0x09b00*/ u64 vpath_to_vplane_map[17]; +#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \ + vxge_vBIT(val, 3, 5) + u8 unused09c30[0x09c30-0x09b88]; + +/*0x09c30*/ u64 xgxs_cfg_port[2]; +#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27) +#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3) +#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4) +#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4) +#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4) +#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4) +/*0x09c40*/ u64 xgxs_rxber_cfg_port[2]; +#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \ + vxge_vBIT(val, 16, 48) +/*0x09c50*/ u64 xgxs_rxber_status_port[2]; +#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \ + vxge_vBIT(val, 0, 16) +#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \ + vxge_vBIT(val, 32, 16) +#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \ + vxge_vBIT(val, 48, 16) +/*0x09c60*/ u64 xgxs_status_port[2]; +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \ + vxge_vBIT(val, 12, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \ + vxge_vBIT(val, 32, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \ + vxge_vBIT(val, 36, 4) +/*0x09c70*/ u64 xgxs_pma_reset_port[2]; +#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8) + u8 unused09c90[0x09c90-0x09c80]; + +/*0x09c90*/ u64 xgxs_static_cfg_port[2]; +#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3) + u8 unused09d40[0x09d40-0x09ca0]; + +/*0x09d40*/ u64 xgxs_info_port[2]; +#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32) +/*0x09d50*/ u64 ratemgmt_cfg_port[2]; +#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2) +#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7) +#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11) +#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15) +#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19) +/*0x09d60*/ u64 ratemgmt_status_port[2]; +#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3) +#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7) +#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11) + u8 unused09d80[0x09d80-0x09d70]; + +/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2]; +#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7) +/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2]; +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \ + vxge_vBIT(val, 16, 4) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \ + vxge_vBIT(val, 20, 4) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \ + vxge_vBIT(val, 24, 4) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35) +/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2]; +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \ + vxge_mBIT(11) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \ + vxge_mBIT(15) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35) +/*0x09db0*/ u64 anbe_cfg_port[2]; +#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2) +#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2) +/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2]; +#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3) +#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7) +#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9) +#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32) + u8 unused09de0[0x09de0-0x09dd0]; + +/*0x09de0*/ u64 anbe_fw_mstr_port[2]; +#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3) +#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7) +/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2]; +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \ + vxge_mBIT(3) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \ + vxge_mBIT(7) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \ + vxge_mBIT(11) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \ + vxge_mBIT(15) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \ + vxge_vBIT(val, 18, 6) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \ + vxge_mBIT(27) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \ + vxge_mBIT(35) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \ + vxge_mBIT(39) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \ + vxge_mBIT(43) +#define \ +VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \ + vxge_mBIT(47) +#define \ +VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \ +vxge_mBIT(51) +#define \ +VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \ + vxge_mBIT(55) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \ + vxge_vBIT(val, 56, 4) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \ + vxge_vBIT(val, 60, 4) +/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2]; +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \ + vxge_mBIT(32) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \ + vxge_mBIT(33) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \ + vxge_mBIT(40) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \ + vxge_mBIT(41) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \ + vxge_mBIT(42) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \ + vxge_mBIT(50) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \ + vxge_vBIT(val, 54, 5) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \ + vxge_vBIT(val, 59, 5) +/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2]; +#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \ + vxge_vBIT(val, 32, 32) + u8 unused09e30[0x09e30-0x09e20]; + +/*0x09e30*/ u64 antp_gen_cfg_port[2]; +/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2]; +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \ + vxge_vBIT(val, 10, 6) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \ + vxge_mBIT(23) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \ + vxge_mBIT(27) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \ + vxge_mBIT(35) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \ + vxge_mBIT(43) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \ + vxge_mBIT(51) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \ + vxge_mBIT(59) +/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2]; +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \ + vxge_vBIT(val, 4, 7) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \ + vxge_vBIT(val, 11, 5) +/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2]; +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \ + vxge_vBIT(val, 5, 11) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \ + vxge_vBIT(val, 32, 16) +/*0x09e70*/ u64 mdio_mgr_access_port[2]; +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63) + u8 unused0a200[0x0a200-0x09e80]; +/*0x0a200*/ u64 xmac_vsport_choices_vh[17]; +#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17) + u8 unused0a400[0x0a400-0x0a288]; + +/*0x0a400*/ u64 rx_thresh_cfg_vp[17]; +#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8) + u8 unused0ac90[0x0ac90-0x0a488]; +} __packed; + +/*VXGE_HW_SRPCIM_REGS_H*/ +struct vxge_hw_srpcim_reg { + +/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh; +#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \ + vxge_vBIT(val, 0, 32) + u8 unused00100[0x00100-0x00008]; + +/*0x00100*/ u64 srpcim_pcipif_int_status; +#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3) +#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7) +#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \ + BIT(11) +/*0x00108*/ u64 srpcim_pcipif_int_mask; +/*0x00110*/ u64 mrpcim_msg_reg; +#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3) +/*0x00118*/ u64 mrpcim_msg_mask; +/*0x00120*/ u64 mrpcim_msg_alarm; +/*0x00128*/ u64 vpath_msg_reg; +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16) +/*0x00130*/ u64 vpath_msg_mask; +/*0x00138*/ u64 vpath_msg_alarm; + u8 unused00160[0x00160-0x00140]; + +/*0x00160*/ u64 srpcim_to_mrpcim_wmsg; +#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig; +#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0) +/*0x00170*/ u64 mrpcim_to_srpcim_rmsg; +#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel; +#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \ + vxge_vBIT(val, 0, 5) +/*0x00180*/ u64 vpath_to_srpcim_rmsg; +#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \ + vxge_vBIT(val, 0, 64) + u8 unused00200[0x00200-0x00188]; + +/*0x00200*/ u64 srpcim_general_int_status; +#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0) +#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3) +#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7) + u8 unused00210[0x00210-0x00208]; + +/*0x00210*/ u64 srpcim_general_int_mask; +#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0) +#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3) +#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7) + u8 unused00220[0x00220-0x00218]; + +/*0x00220*/ u64 srpcim_ppif_int_status; + +/*0x00228*/ u64 srpcim_ppif_int_mask; +/*0x00230*/ u64 srpcim_gen_errors_reg; +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23) +/*0x00238*/ u64 srpcim_gen_errors_mask; +/*0x00240*/ u64 srpcim_gen_errors_alarm; +/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg; +#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3) +/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask; +/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm; +/*0x00260*/ u64 vpath_to_srpcim_alarm_reg; + +/*0x00268*/ u64 vpath_to_srpcim_alarm_mask; +/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm; + u8 unused00280[0x00280-0x00278]; + +/*0x00280*/ u64 pf_sw_reset; +#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8) +/*0x00288*/ u64 srpcim_general_cfg1; +#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39) +/*0x00290*/ u64 srpcim_interrupt_cfg1; +#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7) +#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3) + u8 unused002a8[0x002a8-0x00298]; + +/*0x002a8*/ u64 srpcim_clear_msix_mask; +#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0) +/*0x002b0*/ u64 srpcim_set_msix_mask; +#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0) +/*0x002b8*/ u64 srpcim_clr_msix_one_shot; +#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0) +/*0x002c0*/ u64 srpcim_rst_in_prog; +#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7) +/*0x002c8*/ u64 srpcim_reg_modified; +#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7) +/*0x002d0*/ u64 tgt_pf_illegal_access; +#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7) +/*0x002d8*/ u64 srpcim_msix_status; +#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3) +#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7) + u8 unused00880[0x00880-0x002e0]; + +/*0x00880*/ u64 xgmac_sr_int_status; +#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3) +/*0x00888*/ u64 xgmac_sr_int_mask; +/*0x00890*/ u64 asic_ntwk_sr_err_reg; +#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3) +#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7) +#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \ + BIT(11) +#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15) +/*0x00898*/ u64 asic_ntwk_sr_err_mask; +/*0x008a0*/ u64 asic_ntwk_sr_err_alarm; + u8 unused008c0[0x008c0-0x008a8]; + +/*0x008c0*/ u64 xmac_vsport_choices_sr_clone; +#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \ + vxge_vBIT(val, 0, 17) + u8 unused00900[0x00900-0x008c8]; + +/*0x00900*/ u64 mr_rqa_top_prty_for_vh; +#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \ + vxge_vBIT(val, 59, 5) +/*0x00908*/ u64 umq_vh_data_list_empty; +#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \ + BIT(0) +/*0x00910*/ u64 wde_cfg; +#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0) +#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1) +#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2) +#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3) +#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4) +#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5) +#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6) +#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7) +#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8) +#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9) +#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10) +#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11) +#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12) +#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13) +#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14) +#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15) +#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16) +#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17) +#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19) +#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2) +#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2) + +} __packed; + +/*VXGE_HW_VPMGMT_REGS_H*/ +struct vxge_hw_vpmgmt_reg { + + u8 unused00040[0x00040-0x00000]; + +/*0x00040*/ u64 vpath_to_func_map_cfg1; +#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \ + vxge_vBIT(val, 3, 5) +/*0x00048*/ u64 vpath_is_first; +#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3) +/*0x00050*/ u64 srpcim_to_vpath_wmsg; +#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig; +#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \ + vxge_mBIT(0) + u8 unused00100[0x00100-0x00060]; + +/*0x00100*/ u64 tim_vpath_assignment; +#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32) + u8 unused00140[0x00140-0x00108]; + +/*0x00140*/ u64 rqa_top_prty_for_vp; +#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \ + vxge_vBIT(val, 59, 5) + u8 unused001c0[0x001c0-0x00148]; + +/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone; +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \ + vxge_mBIT(19) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \ + vxge_mBIT(23) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \ + vxge_mBIT(39) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \ + vxge_mBIT(43) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \ + vxge_mBIT(47) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \ + vxge_mBIT(51) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \ + vxge_mBIT(55) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \ + vxge_mBIT(59) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63) +/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone; +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \ + vxge_vBIT(val, 24, 8) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59) +/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone; +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \ + vxge_vBIT(val, 5, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \ + vxge_vBIT(val, 9, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \ + vxge_vBIT(val, 13, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \ + vxge_vBIT(val, 17, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \ + vxge_vBIT(val, 21, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \ + vxge_vBIT(val, 25, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \ + vxge_vBIT(val, 29, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \ + vxge_vBIT(val, 33, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \ + vxge_vBIT(val, 37, 3) +/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3]; +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \ + vxge_mBIT(27) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \ + vxge_vBIT(val, 50, 14) +/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3]; +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \ + vxge_vBIT(val, 9, 3) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \ + vxge_vBIT(val, 20, 16) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \ + vxge_mBIT(39) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \ + vxge_mBIT(43) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \ + vxge_vBIT(val, 48, 8) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \ + vxge_mBIT(59) + u8 unused00240[0x00240-0x00208]; + +/*0x00240*/ u64 xmac_vsport_choices_vp; +#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17) + u8 unused00260[0x00260-0x00248]; + +/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone; +#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3) +#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \ + vxge_mBIT(11) +/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2]; +#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \ + vxge_mBIT(3) +#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7) +#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \ + vxge_mBIT(11) +#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15) +/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone; +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \ + vxge_vBIT(val, 2, 2) +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \ + vxge_mBIT(7) +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27) +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \ + vxge_vBIT(val, 28, 4) +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \ + vxge_vBIT(val, 32, 4) +/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone; +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3) +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \ + vxge_vBIT(val, 6, 2) +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19) +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \ + vxge_vBIT(val, 32, 16) +/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone; +#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \ + vxge_vBIT(val, 4, 4) +#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \ + vxge_vBIT(val, 8, 4) +#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15) +/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3]; +#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3) +#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \ + vxge_mBIT(7) +#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11) +#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15) + u8 unused002c0[0x002c0-0x002a8]; + +/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone; +#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7) +/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3]; +#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3) +#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7) +#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8) + u8 unused00300[0x00300-0x002e0]; + +/*0x00300*/ u64 wol_mp_crc; +#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63) +/*0x00308*/ u64 wol_mp_mask_a; +#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64) +/*0x00310*/ u64 wol_mp_mask_b; +#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64) + u8 unused00360[0x00360-0x00318]; + +/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone; +#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3) +#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7) +#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11) +/*0x00368*/ u64 rx_datapath_util_vp_clone; +#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \ + vxge_vBIT(val, 7, 9) +#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \ + vxge_vBIT(val, 16, 4) +#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \ + vxge_vBIT(val, 20, 4) +#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \ + vxge_vBIT(val, 24, 4) + u8 unused00380[0x00380-0x00370]; + +/*0x00380*/ u64 tx_datapath_util_vp_clone; +#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \ + vxge_vBIT(val, 7, 9) +#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \ + vxge_vBIT(val, 16, 4) +#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \ + vxge_vBIT(val, 20, 4) +#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \ + vxge_vBIT(val, 24, 4) + +} __packed; + +struct vxge_hw_vpath_reg { + + u8 unused00300[0x00300]; + +/*0x00300*/ u64 usdc_vpath; +#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32) + u8 unused00a00[0x00a00-0x00308]; + +/*0x00a00*/ u64 wrdma_alarm_status; +#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1) +/*0x00a08*/ u64 wrdma_alarm_mask; + u8 unused00a30[0x00a30-0x00a10]; + +/*0x00a30*/ u64 prc_alarm_reg; +#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0) +#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1) +#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2) +#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3) +/*0x00a38*/ u64 prc_alarm_mask; +/*0x00a40*/ u64 prc_alarm_alarm; +/*0x00a48*/ u64 prc_cfg1; +#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29) +#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34) +#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35) +#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36) +#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37) +#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39) +#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2) + u8 unused00a60[0x00a60-0x00a50]; + +/*0x00a60*/ u64 prc_cfg4; +#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7) +#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22) +#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23) +#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31) +#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32) +#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36) +#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37) +#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24) +/*0x00a68*/ u64 prc_cfg5; +#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61) +/*0x00a70*/ u64 prc_cfg6; +#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0) +#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2) +#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5) +#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8) +#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9) +#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9) +#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9) +/*0x00a78*/ u64 prc_cfg7; +#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11) +#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12) +#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14) +#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5) +/*0x00a80*/ u64 tim_dest_addr; +#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64) +/*0x00a88*/ u64 prc_rxd_doorbell; +#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16) +/*0x00a90*/ u64 rqa_prty_for_vp; +#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5) +/*0x00a98*/ u64 rxdmem_size; +#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13) +/*0x00aa0*/ u64 frm_in_progress_cnt; +#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \ + vxge_vBIT(val, 59, 5) +/*0x00aa8*/ u64 rx_multi_cast_stats; +#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16) +/*0x00ab0*/ u64 rx_frm_transferred; +#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \ + vxge_vBIT(val, 32, 32) +/*0x00ab8*/ u64 rxd_returned; +#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16) + u8 unused00c00[0x00c00-0x00ac0]; + +/*0x00c00*/ u64 kdfc_fifo_trpl_partition; +#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15) +#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15) +/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl; +#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7) +/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl; +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16) +/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl; +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16) +/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl; +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16) +/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address; +#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64) +/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address; +#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64) +/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address; +#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64) +/*0x00c40*/ u64 kdfc_trpl_fifo_offset; +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15) +/*0x00c48*/ u64 kdfc_drbl_triplet_total; +#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \ + vxge_vBIT(val, 17, 15) + u8 unused00c60[0x00c60-0x00c50]; + +/*0x00c60*/ u64 usdc_drbl_ctrl; +#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22) +#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23) +/*0x00c68*/ u64 usdc_vp_ready; +#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7) +#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15) +#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23) +/*0x00c70*/ u64 kdfc_status; +#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0) +#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1) +#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2) + u8 unused00c80[0x00c80-0x00c78]; + +/*0x00c80*/ u64 xmac_rpa_vcfg; +#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3) +#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7) +#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11) +#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15) +#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19) +#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23) +/*0x00c88*/ u64 rxmac_vcfg0; +#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14) +#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19) +#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14) +#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43) +#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47) +#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51) +#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55) +/*0x00c90*/ u64 rxmac_vcfg1; +#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2) +#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47) +#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51) +/*0x00c98*/ u64 rts_access_steer_ctrl; +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8) +/*0x00ca0*/ u64 rts_access_steer_data0; +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64) +/*0x00ca8*/ u64 rts_access_steer_data1; +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64) + u8 unused00d00[0x00d00-0x00cb0]; + +/*0x00d00*/ u64 xmac_vsport_choice; +#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5) +/*0x00d08*/ u64 xmac_stats_cfg; +/*0x00d10*/ u64 xmac_stats_access_cmd; +#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15) +#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8) +/*0x00d18*/ u64 xmac_stats_access_data; +#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64) +/*0x00d20*/ u64 asic_ntwk_vp_ctrl; +#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3) +#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55) +#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63) + u8 unused00d30[0x00d30-0x00d28]; + +/*0x00d30*/ u64 xgmac_vp_int_status; +#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \ + vxge_mBIT(3) +/*0x00d38*/ u64 xgmac_vp_int_mask; +/*0x00d40*/ u64 asic_ntwk_vp_err_reg; +#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3) +#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7) +#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \ + vxge_mBIT(11) +#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \ + vxge_mBIT(15) +#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \ + vxge_mBIT(19) +#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23) +/*0x00d48*/ u64 asic_ntwk_vp_err_mask; +/*0x00d50*/ u64 asic_ntwk_vp_err_alarm; + u8 unused00d80[0x00d80-0x00d58]; + +/*0x00d80*/ u64 rtdma_bw_ctrl; +#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39) +#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18) +/*0x00d88*/ u64 rtdma_rd_optimization_ctrl; +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19) +#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \ + vxge_vBIT(val, 21, 3) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \ + vxge_vBIT(val, 29, 3) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \ + vxge_vBIT(val, 37, 3) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \ + vxge_vBIT(val, 61, 3) +/*0x00d90*/ u64 pda_pcc_job_monitor; +#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7) +/*0x00d98*/ u64 tx_protocol_assist_cfg; +#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6) +#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7) + u8 unused01000[0x01000-0x00da0]; + +/*0x01000*/ u64 tim_cfg1_int_num[4]; +#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26) +#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35) +#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36) +#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37) +#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38) +#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39) +#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7) +#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7) +#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7) +/*0x01020*/ u64 tim_cfg2_int_num[4]; +#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16) +/*0x01040*/ u64 tim_cfg3_int_num[4]; +#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0) +#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4) +#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26) +#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6) +#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26) +/*0x01060*/ u64 tim_wrkld_clc; +#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40) +#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2) +#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43) +#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7) +/*0x01068*/ u64 tim_bitmap; +#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32) +#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33) +/*0x01070*/ u64 tim_ring_assn; +#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2) +/*0x01078*/ u64 tim_remap; +#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5) +#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6) +#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7) +#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5) +/*0x01080*/ u64 tim_vpath_map; +#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32) +/*0x01088*/ u64 tim_pci_cfg; +#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7) +#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15) +#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23) +#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31) + u8 unused01100[0x01100-0x01090]; + +/*0x01100*/ u64 sgrp_assign; +#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64) +/*0x01108*/ u64 sgrp_aoa_and_result; +#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \ + vxge_vBIT(val, 0, 64) +/*0x01110*/ u64 rpe_pci_cfg; +#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7) +#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8) +#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9) +#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10) +#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11) +#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12) +#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13) +#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14) +#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31) +/*0x01118*/ u64 rpe_lro_cfg; +#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7) +#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11) +#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15) +#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23) +/*0x01120*/ u64 pe_mr2vp_ack_blk_limit; +#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32) +/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit; +#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \ + vxge_vBIT(val, 0, 32) +#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \ + vxge_vBIT(val, 32, 32) +/*0x01130*/ u64 txpe_pci_nce_cfg; +#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55) +#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63) + u8 unused01180[0x01180-0x01138]; + +/*0x01180*/ u64 msg_qpad_en_cfg; +#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3) +#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7) +#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11) +#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15) +#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19) +#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23) +#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27) +#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31) +/*0x01188*/ u64 msg_pci_cfg; +#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3) +#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7) +#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11) +#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15) +/*0x01190*/ u64 umqdmq_ir_init; +#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64) +/*0x01198*/ u64 dmq_ir_int; +#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6) +#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7) +#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7) +#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16) +/*0x011a0*/ u64 dmq_bwr_init_add; +#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64) +/*0x011a8*/ u64 dmq_bwr_init_byte; +#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32) +/*0x011b0*/ u64 dmq_ir; +#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8) +/*0x011b8*/ u64 umq_int; +#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6) +#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7) +#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7) +#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16) +/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init; +#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8) +/*0x011c8*/ u64 umq_bwr_pfch_ctrl; +#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3) +/*0x011d0*/ u64 umq_mr2vp_bwr_eol; +#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32) +/*0x011d8*/ u64 umq_bwr_init_add; +#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64) +/*0x011e0*/ u64 umq_bwr_init_byte; +#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32) +/*0x011e8*/ u64 gendma_int; +#define VXGE_HW_GENDMA_INT_IMMED_ENABLE vxge_mBIT(6) +#define VXGE_HW_GENDMA_INT_EVENT_ENABLE vxge_mBIT(7) +#define VXGE_HW_GENDMA_INT_NUMBER(val) vxge_vBIT(val, 9, 7) +#define VXGE_HW_GENDMA_INT_BITMAP(val) vxge_vBIT(val, 16, 16) +/*0x011f0*/ u64 umqdmq_ir_init_notify; +#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3) +/*0x011f8*/ u64 dmq_init_notify; +#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3) +/*0x01200*/ u64 umq_init_notify; +#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3) + u8 unused01380[0x01380-0x01208]; + +/*0x01380*/ u64 tpa_cfg; +#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3) +#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7) +#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11) +#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15) + u8 unused01400[0x01400-0x01388]; + +/*0x01400*/ u64 tx_vp_reset_discarded_frms; +#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \ + vxge_vBIT(val, 48, 16) + u8 unused01480[0x01480-0x01408]; + +/*0x01480*/ u64 fau_rpa_vcfg; +#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7) +#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11) +#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15) + u8 unused014d0[0x014d0-0x01488]; + +/*0x014d0*/ u64 dbg_stats_rx_mpa; +#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16) +/*0x014d8*/ u64 dbg_stats_rx_fau; +#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \ + vxge_vBIT(val, 32, 32) + u8 unused014f0[0x014f0-0x014e0]; + +/*0x014f0*/ u64 fbmc_vp_rdy; +#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0) + u8 unused01e00[0x01e00-0x014f8]; + +/*0x01e00*/ u64 vpath_pcipif_int_status; +#define \ +VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \ + vxge_mBIT(3) +#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \ + vxge_mBIT(7) +/*0x01e08*/ u64 vpath_pcipif_int_mask; + u8 unused01e20[0x01e20-0x01e10]; + +/*0x01e20*/ u64 srpcim_msg_to_vpath_reg; +#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \ + vxge_mBIT(3) +/*0x01e28*/ u64 srpcim_msg_to_vpath_mask; +/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm; + u8 unused01ea0[0x01ea0-0x01e38]; + +/*0x01ea0*/ u64 vpath_to_srpcim_wmsg; +#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig; +#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \ + vxge_mBIT(0) + u8 unused02000[0x02000-0x01eb0]; + +/*0x02000*/ u64 vpath_general_int_status; +#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3) +#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7) +#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15) +#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19) +/*0x02008*/ u64 vpath_general_int_mask; +#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3) +#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7) +#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15) +#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19) +/*0x02010*/ u64 vpath_ppif_int_status; +#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \ + vxge_mBIT(3) +#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \ + vxge_mBIT(7) +#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \ + vxge_mBIT(11) +#define \ +VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \ + vxge_mBIT(15) +#define \ +VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \ + vxge_mBIT(19) +/*0x02018*/ u64 vpath_ppif_int_mask; +/*0x02020*/ u64 kdfcctl_errors_reg; +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39) +/*0x02028*/ u64 kdfcctl_errors_mask; +/*0x02030*/ u64 kdfcctl_errors_alarm; + u8 unused02040[0x02040-0x02038]; + +/*0x02040*/ u64 general_errors_reg; +#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3) +#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7) +#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11) +#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15) +#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19) +#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27) +#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31) +/*0x02048*/ u64 general_errors_mask; +/*0x02050*/ u64 general_errors_alarm; +/*0x02058*/ u64 pci_config_errors_reg; +#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3) +#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7) +#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11) +/*0x02060*/ u64 pci_config_errors_mask; +/*0x02068*/ u64 pci_config_errors_alarm; +/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg; +#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \ + vxge_mBIT(3) +/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask; +/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm; +/*0x02088*/ u64 srpcim_to_vpath_alarm_reg; +#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \ + vxge_vBIT(val, 0, 17) +/*0x02090*/ u64 srpcim_to_vpath_alarm_mask; +/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm; + u8 unused02108[0x02108-0x020a0]; + +/*0x02108*/ u64 kdfcctl_status; +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8) +/*0x02110*/ u64 rsthdlr_status; +#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3) +#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2) +/*0x02118*/ u64 fifo0_status; +#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12) +/*0x02120*/ u64 fifo1_status; +#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12) +/*0x02128*/ u64 fifo2_status; +#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12) + u8 unused02158[0x02158-0x02130]; + +/*0x02158*/ u64 tgt_illegal_access; +#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7) + u8 unused02200[0x02200-0x02160]; + +/*0x02200*/ u64 vpath_general_cfg1; +#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3) +#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7) +#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11) +#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15) +#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23) +#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51) +#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55) +#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59) +#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63) +/*0x02208*/ u64 vpath_general_cfg2; +#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3) +/*0x02210*/ u64 vpath_general_cfg3; +#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3) + u8 unused02220[0x02220-0x02218]; + +/*0x02220*/ u64 kdfcctl_cfg0; +#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3) +#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5) +#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6) +#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39) + + u8 unused02268[0x02268-0x02228]; + +/*0x02268*/ u64 stats_cfg; +#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57) +/*0x02270*/ u64 interrupt_cfg0; +#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7) +#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7) +#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7) +#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7) +#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7) + u8 unused02280[0x02280-0x02278]; + +/*0x02280*/ u64 interrupt_cfg2; +#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7) +/*0x02288*/ u64 one_shot_vect0_en; +#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3) +/*0x02290*/ u64 one_shot_vect1_en; +#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3) +/*0x02298*/ u64 one_shot_vect2_en; +#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3) +/*0x022a0*/ u64 one_shot_vect3_en; +#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3) + u8 unused022b0[0x022b0-0x022a8]; + +/*0x022b0*/ u64 pci_config_access_cfg1; +#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12) +#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15) +/*0x022b8*/ u64 pci_config_access_cfg2; +#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0) +/*0x022c0*/ u64 pci_config_access_status; +#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0) +#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32) + u8 unused02300[0x02300-0x022c8]; + +/*0x02300*/ u64 vpath_debug_stats0; +#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32) +/*0x02308*/ u64 vpath_debug_stats1; +#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32) +/*0x02310*/ u64 vpath_debug_stats2; +#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32) +/*0x02318*/ u64 vpath_debug_stats3; +#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \ + vxge_vBIT(val, 0, 64) +/*0x02320*/ u64 vpath_debug_stats4; +#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \ + vxge_vBIT(val, 0, 64) +/*0x02328*/ u64 vpath_debug_stats5; +#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32) +/*0x02330*/ u64 vpath_debug_stats6; +#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32) +/*0x02338*/ u64 vpath_genstats_count01; +#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \ + vxge_vBIT(val, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \ + vxge_vBIT(val, 32, 32) +/*0x02340*/ u64 vpath_genstats_count23; +#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \ + vxge_vBIT(val, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \ + vxge_vBIT(val, 32, 32) +/*0x02348*/ u64 vpath_genstats_count4; +#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \ + vxge_vBIT(val, 32, 32) +/*0x02350*/ u64 vpath_genstats_count5; +#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \ + vxge_vBIT(val, 32, 32) + u8 unused02648[0x02648-0x02358]; +} __packed; + +#define VXGE_HW_EEPROM_SIZE (0x01 << 11) + +/* Capability lists */ +#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */ +#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */ +#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */ + +#endif diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c new file mode 100644 index 00000000000..7be0ae10d69 --- /dev/null +++ b/drivers/net/vxge/vxge-traffic.c @@ -0,0 +1,2528 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2009 Neterion Inc. + ******************************************************************************/ +#include <linux/etherdevice.h> + +#include "vxge-traffic.h" +#include "vxge-config.h" +#include "vxge-main.h" + +/* + * vxge_hw_vpath_intr_enable - Enable vpath interrupts. + * @vp: Virtual Path handle. + * + * Enable vpath interrupts. The function is to be executed the last in + * vpath initialization sequence. + * + * See also: vxge_hw_vpath_intr_disable() + */ +enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + enum vxge_hw_status status = VXGE_HW_OK; + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + vp_reg = vpath->vp_reg; + + writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->general_errors_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->pci_config_errors_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->mrpcim_to_vpath_alarm_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_to_vpath_alarm_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_ppif_int_status); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_msg_to_vpath_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_pcipif_int_status); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->prc_alarm_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->wrdma_alarm_status); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->asic_ntwk_vp_err_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->xgmac_vp_int_status); + + val64 = readq(&vp_reg->vpath_general_int_status); + + /* Mask unwanted interrupts */ + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_pcipif_int_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_msg_to_vpath_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_to_vpath_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->mrpcim_to_vpath_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->pci_config_errors_mask); + + /* Unmask the individual interrupts */ + + writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW| + VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW| + VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ| + VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32), + &vp_reg->general_errors_mask); + + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32), + &vp_reg->kdfcctl_errors_mask); + + __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); + + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32), + &vp_reg->prc_alarm_mask); + + __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask); + __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask); + + if (vpath->hldev->first_vp_id != vpath->vp_id) + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->asic_ntwk_vp_err_mask); + else + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(( + VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT | + VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32), + &vp_reg->asic_ntwk_vp_err_mask); + + __vxge_hw_pio_mem_write32_upper(0, + &vp_reg->vpath_general_int_mask); +exit: + return status; + +} + +/* + * vxge_hw_vpath_intr_disable - Disable vpath interrupts. + * @vp: Virtual Path handle. + * + * Disable vpath interrupts. The function is to be executed the last in + * vpath initialization sequence. + * + * See also: vxge_hw_vpath_intr_enable() + */ +enum vxge_hw_status vxge_hw_vpath_intr_disable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + vp_reg = vpath->vp_reg; + + __vxge_hw_pio_mem_write32_upper( + (u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_general_int_mask); + + val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id)); + + writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->general_errors_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->pci_config_errors_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->mrpcim_to_vpath_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_to_vpath_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_ppif_int_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_msg_to_vpath_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_pcipif_int_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->wrdma_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->prc_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->xgmac_vp_int_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->asic_ntwk_vp_err_mask); + +exit: + return status; +} + +/** + * vxge_hw_channel_msix_mask - Mask MSIX Vector. + * @channeh: Channel for rx or tx handle + * @msix_id: MSIX ID + * + * The function masks the msix interrupt for the given msix_id + * + * Returns: 0 + */ +void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) +{ + + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), + 0, 32), + &channel->common_reg->set_msix_mask_vect[msix_id%4]); + + return; +} + +/** + * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector. + * @channeh: Channel for rx or tx handle + * @msix_id: MSI ID + * + * The function unmasks the msix interrupt for the given msix_id + * + * Returns: 0 + */ +void +vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) +{ + + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), + 0, 32), + &channel->common_reg->clear_msix_mask_vect[msix_id%4]); + + return; +} + +/** + * vxge_hw_device_set_intr_type - Updates the configuration + * with new interrupt type. + * @hldev: HW device handle. + * @intr_mode: New interrupt type + */ +u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode) +{ + + if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && + (intr_mode != VXGE_HW_INTR_MODE_MSIX) && + (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && + (intr_mode != VXGE_HW_INTR_MODE_DEF)) + intr_mode = VXGE_HW_INTR_MODE_IRQLINE; + + hldev->config.intr_mode = intr_mode; + return intr_mode; +} + +/** + * vxge_hw_device_intr_enable - Enable interrupts. + * @hldev: HW device handle. + * @op: One of the enum vxge_hw_device_intr enumerated values specifying + * the type(s) of interrupts to enable. + * + * Enable Titan interrupts. The function is to be executed the last in + * Titan initialization sequence. + * + * See also: vxge_hw_device_intr_disable() + */ +void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev) +{ + u32 i; + u64 val64; + u32 val32; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; + + vxge_hw_vpath_intr_enable( + VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); + } + + if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) { + val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | + hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]; + + if (val64 != 0) { + writeq(val64, &hldev->common_reg->tim_int_status0); + + writeq(~val64, &hldev->common_reg->tim_int_mask0); + } + + val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | + hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]; + + if (val32 != 0) { + __vxge_hw_pio_mem_write32_upper(val32, + &hldev->common_reg->tim_int_status1); + + __vxge_hw_pio_mem_write32_upper(~val32, + &hldev->common_reg->tim_int_mask1); + } + } + + val64 = readq(&hldev->common_reg->titan_general_int_status); + + vxge_hw_device_unmask_all(hldev); + + return; +} + +/** + * vxge_hw_device_intr_disable - Disable Titan interrupts. + * @hldev: HW device handle. + * @op: One of the enum vxge_hw_device_intr enumerated values specifying + * the type(s) of interrupts to disable. + * + * Disable Titan interrupts. + * + * See also: vxge_hw_device_intr_enable() + */ +void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev) +{ + u32 i; + + vxge_hw_device_mask_all(hldev); + + /* mask all the tim interrupts */ + writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0); + __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32, + &hldev->common_reg->tim_int_mask1); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; + + vxge_hw_vpath_intr_disable( + VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); + } + + return; +} + +/** + * vxge_hw_device_mask_all - Mask all device interrupts. + * @hldev: HW device handle. + * + * Mask all device interrupts. + * + * See also: vxge_hw_device_unmask_all() + */ +void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev) +{ + u64 val64; + + val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM | + VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), + &hldev->common_reg->titan_mask_all_int); + + return; +} + +/** + * vxge_hw_device_unmask_all - Unmask all device interrupts. + * @hldev: HW device handle. + * + * Unmask all device interrupts. + * + * See also: vxge_hw_device_mask_all() + */ +void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev) +{ + u64 val64 = 0; + + if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) + val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), + &hldev->common_reg->titan_mask_all_int); + + return; +} + +/** + * vxge_hw_device_flush_io - Flush io writes. + * @hldev: HW device handle. + * + * The function performs a read operation to flush io writes. + * + * Returns: void + */ +void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) +{ + u32 val32; + + val32 = readl(&hldev->common_reg->titan_general_int_status); +} + +/** + * vxge_hw_device_begin_irq - Begin IRQ processing. + * @hldev: HW device handle. + * @skip_alarms: Do not clear the alarms + * @reason: "Reason" for the interrupt, the value of Titan's + * general_int_status register. + * + * The function performs two actions, It first checks whether (shared IRQ) the + * interrupt was raised by the device. Next, it masks the device interrupts. + * + * Note: + * vxge_hw_device_begin_irq() does not flush MMIO writes through the + * bridge. Therefore, two back-to-back interrupts are potentially possible. + * + * Returns: 0, if the interrupt is not "ours" (note that in this case the + * device remain enabled). + * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter + * status. + */ +enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev, + u32 skip_alarms, u64 *reason) +{ + u32 i; + u64 val64; + u64 adapter_status; + u64 vpath_mask; + enum vxge_hw_status ret = VXGE_HW_OK; + + val64 = readq(&hldev->common_reg->titan_general_int_status); + + if (unlikely(!val64)) { + /* not Titan interrupt */ + *reason = 0; + ret = VXGE_HW_ERR_WRONG_IRQ; + goto exit; + } + + if (unlikely(val64 == VXGE_HW_ALL_FOXES)) { + + adapter_status = readq(&hldev->common_reg->adapter_status); + + if (adapter_status == VXGE_HW_ALL_FOXES) { + + __vxge_hw_device_handle_error(hldev, + NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE); + *reason = 0; + ret = VXGE_HW_ERR_SLOT_FREEZE; + goto exit; + } + } + + hldev->stats.sw_dev_info_stats.total_intr_cnt++; + + *reason = val64; + + vpath_mask = hldev->vpaths_deployed >> + (64 - VXGE_HW_MAX_VIRTUAL_PATHS); + + if (val64 & + VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) { + hldev->stats.sw_dev_info_stats.traffic_intr_cnt++; + + return VXGE_HW_OK; + } + + hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; + + if (unlikely(val64 & + VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) { + + enum vxge_hw_status error_level = VXGE_HW_OK; + + hldev->stats.sw_dev_err_stats.vpath_alarms++; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; + + ret = __vxge_hw_vpath_alarm_process( + &hldev->virtual_paths[i], skip_alarms); + + error_level = VXGE_HW_SET_LEVEL(ret, error_level); + + if (unlikely((ret == VXGE_HW_ERR_CRITICAL) || + (ret == VXGE_HW_ERR_SLOT_FREEZE))) + break; + } + + ret = error_level; + } +exit: + return ret; +} + +/* + * __vxge_hw_device_handle_link_up_ind + * @hldev: HW device handle. + * + * Link up indication handler. The function is invoked by HW when + * Titan indicates that the link is up for programmable amount of time. + */ +enum vxge_hw_status +__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) +{ + /* + * If the previous link state is not down, return. + */ + if (hldev->link_state == VXGE_HW_LINK_UP) + goto exit; + + hldev->link_state = VXGE_HW_LINK_UP; + + /* notify driver */ + if (hldev->uld_callbacks.link_up) + hldev->uld_callbacks.link_up(hldev); +exit: + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_handle_link_down_ind + * @hldev: HW device handle. + * + * Link down indication handler. The function is invoked by HW when + * Titan indicates that the link is down. + */ +enum vxge_hw_status +__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) +{ + /* + * If the previous link state is not down, return. + */ + if (hldev->link_state == VXGE_HW_LINK_DOWN) + goto exit; + + hldev->link_state = VXGE_HW_LINK_DOWN; + + /* notify driver */ + if (hldev->uld_callbacks.link_down) + hldev->uld_callbacks.link_down(hldev); +exit: + return VXGE_HW_OK; +} + +/** + * __vxge_hw_device_handle_error - Handle error + * @hldev: HW device + * @vp_id: Vpath Id + * @type: Error type. Please see enum vxge_hw_event{} + * + * Handle error. + */ +enum vxge_hw_status +__vxge_hw_device_handle_error( + struct __vxge_hw_device *hldev, + u32 vp_id, + enum vxge_hw_event type) +{ + switch (type) { + case VXGE_HW_EVENT_UNKNOWN: + break; + case VXGE_HW_EVENT_RESET_START: + case VXGE_HW_EVENT_RESET_COMPLETE: + case VXGE_HW_EVENT_LINK_DOWN: + case VXGE_HW_EVENT_LINK_UP: + goto out; + case VXGE_HW_EVENT_ALARM_CLEARED: + goto out; + case VXGE_HW_EVENT_ECCERR: + case VXGE_HW_EVENT_MRPCIM_ECCERR: + goto out; + case VXGE_HW_EVENT_FIFO_ERR: + case VXGE_HW_EVENT_VPATH_ERR: + case VXGE_HW_EVENT_CRITICAL_ERR: + case VXGE_HW_EVENT_SERR: + break; + case VXGE_HW_EVENT_SRPCIM_SERR: + case VXGE_HW_EVENT_MRPCIM_SERR: + goto out; + case VXGE_HW_EVENT_SLOT_FREEZE: + break; + default: + vxge_assert(0); + goto out; + } + + /* notify driver */ + if (hldev->uld_callbacks.crit_err) + hldev->uld_callbacks.crit_err( + (struct __vxge_hw_device *)hldev, + type, vp_id); +out: + + return VXGE_HW_OK; +} + +/** + * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the + * condition that has caused the Tx and RX interrupt. + * @hldev: HW device. + * + * Acknowledge (that is, clear) the condition that has caused + * the Tx and Rx interrupt. + * See also: vxge_hw_device_begin_irq(), + * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx(). + */ +void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) +{ + + if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || + (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { + writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | + hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]), + &hldev->common_reg->tim_int_status0); + } + + if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || + (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { + __vxge_hw_pio_mem_write32_upper( + (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | + hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), + &hldev->common_reg->tim_int_status1); + } + + return; +} + +/* + * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel + * @channel: Channel + * @dtrh: Buffer to return the DTR pointer + * + * Allocates a dtr from the reserve array. If the reserve array is empty, + * it swaps the reserve and free arrays. + * + */ +enum vxge_hw_status +vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) +{ + void **tmp_arr; + + if (channel->reserve_ptr - channel->reserve_top > 0) { +_alloc_after_swap: + *dtrh = channel->reserve_arr[--channel->reserve_ptr]; + + return VXGE_HW_OK; + } + + /* switch between empty and full arrays */ + + /* the idea behind such a design is that by having free and reserved + * arrays separated we basically separated irq and non-irq parts. + * i.e. no additional lock need to be done when we free a resource */ + + if (channel->length - channel->free_ptr > 0) { + + tmp_arr = channel->reserve_arr; + channel->reserve_arr = channel->free_arr; + channel->free_arr = tmp_arr; + channel->reserve_ptr = channel->length; + channel->reserve_top = channel->free_ptr; + channel->free_ptr = channel->length; + + channel->stats->reserve_free_swaps_cnt++; + + goto _alloc_after_swap; + } + + channel->stats->full_cnt++; + + *dtrh = NULL; + return VXGE_HW_INF_OUT_OF_DESCRIPTORS; +} + +/* + * vxge_hw_channel_dtr_post - Post a dtr to the channel + * @channelh: Channel + * @dtrh: DTR pointer + * + * Posts a dtr to work array. + * + */ +void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) +{ + vxge_assert(channel->work_arr[channel->post_index] == NULL); + + channel->work_arr[channel->post_index++] = dtrh; + + /* wrap-around */ + if (channel->post_index == channel->length) + channel->post_index = 0; +} + +/* + * vxge_hw_channel_dtr_try_complete - Returns next completed dtr + * @channel: Channel + * @dtr: Buffer to return the next completed DTR pointer + * + * Returns the next completed dtr with out removing it from work array + * + */ +void +vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh) +{ + vxge_assert(channel->compl_index < channel->length); + + *dtrh = channel->work_arr[channel->compl_index]; +} + +/* + * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array + * @channel: Channel handle + * + * Removes the next completed dtr from work array + * + */ +void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel) +{ + channel->work_arr[channel->compl_index] = NULL; + + /* wrap-around */ + if (++channel->compl_index == channel->length) + channel->compl_index = 0; + + channel->stats->total_compl_cnt++; +} + +/* + * vxge_hw_channel_dtr_free - Frees a dtr + * @channel: Channel handle + * @dtr: DTR pointer + * + * Returns the dtr to free array + * + */ +void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh) +{ + channel->free_arr[--channel->free_ptr] = dtrh; +} + +/* + * vxge_hw_channel_dtr_count + * @channel: Channel handle. Obtained via vxge_hw_channel_open(). + * + * Retreive number of DTRs available. This function can not be called + * from data path. ring_initial_replenishi() is the only user. + */ +int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) +{ + return (channel->reserve_ptr - channel->reserve_top) + + (channel->length - channel->free_ptr); +} + +/** + * vxge_hw_ring_rxd_reserve - Reserve ring descriptor. + * @ring: Handle to the ring object used for receive + * @rxdh: Reserved descriptor. On success HW fills this "out" parameter + * with a valid handle. + * + * Reserve Rx descriptor for the subsequent filling-in driver + * and posting on the corresponding channel (@channelh) + * via vxge_hw_ring_rxd_post(). + * + * Returns: VXGE_HW_OK - success. + * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. + * + */ +enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring, + void **rxdh) +{ + enum vxge_hw_status status; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + status = vxge_hw_channel_dtr_alloc(channel, rxdh); + + if (status == VXGE_HW_OK) { + struct vxge_hw_ring_rxd_1 *rxdp = + (struct vxge_hw_ring_rxd_1 *)*rxdh; + + rxdp->control_0 = rxdp->control_1 = 0; + } + + return status; +} + +/** + * vxge_hw_ring_rxd_free - Free descriptor. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * + * Free the reserved descriptor. This operation is "symmetrical" to + * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's + * lifecycle. + * + * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can + * be: + * + * - reserved (vxge_hw_ring_rxd_reserve); + * + * - posted (vxge_hw_ring_rxd_post); + * + * - completed (vxge_hw_ring_rxd_next_completed); + * + * - and recycled again (vxge_hw_ring_rxd_free). + * + * For alternative state transitions and more details please refer to + * the design doc. + * + */ +void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh) +{ + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + vxge_hw_channel_dtr_free(channel, rxdh); + +} + +/** + * vxge_hw_ring_rxd_pre_post - Prepare rxd and post + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * + * This routine prepares a rxd and posts + */ +void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh) +{ + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + vxge_hw_channel_dtr_post(channel, rxdh); +} + +/** + * vxge_hw_ring_rxd_post_post - Process rxd after post. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * + * Processes rxd after post + */ +void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; + + if (ring->stats->common_stats.usage_cnt > 0) + ring->stats->common_stats.usage_cnt--; +} + +/** + * vxge_hw_ring_rxd_post - Post descriptor on the ring. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve(). + * + * Post descriptor on the ring. + * Prior to posting the descriptor should be filled in accordance with + * Host/Titan interface specification for a given service (LL, etc.). + * + */ +void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + wmb(); + rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; + + vxge_hw_channel_dtr_post(channel, rxdh); + + if (ring->stats->common_stats.usage_cnt > 0) + ring->stats->common_stats.usage_cnt--; +} + +/** + * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * + * Processes rxd after post with memory barrier. + */ +void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) +{ + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + wmb(); + vxge_hw_ring_rxd_post_post(ring, rxdh); +} + +/** + * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. Returned by HW. + * @t_code: Transfer code, as per Titan User Guide, + * Receive Descriptor Format. Returned by HW. + * + * Retrieve the _next_ completed descriptor. + * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy + * driver of new completed descriptors. After that + * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest + * completions (the very first completion is passed by HW via + * vxge_hw_ring_callback_f). + * + * Implementation-wise, the driver is free to call + * vxge_hw_ring_rxd_next_completed either immediately from inside the + * ring callback, or in a deferred fashion and separate (from HW) + * context. + * + * Non-zero @t_code means failure to fill-in receive buffer(s) + * of the descriptor. + * For instance, parity error detected during the data transfer. + * In this case Titan will complete the descriptor and indicate + * for the host that the received data is not to be used. + * For details please refer to Titan User Guide. + * + * Returns: VXGE_HW_OK - success. + * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors + * are currently available for processing. + * + * See also: vxge_hw_ring_callback_f{}, + * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}. + */ +enum vxge_hw_status vxge_hw_ring_rxd_next_completed( + struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code) +{ + struct __vxge_hw_channel *channel; + struct vxge_hw_ring_rxd_1 *rxdp; + enum vxge_hw_status status = VXGE_HW_OK; + + channel = &ring->channel; + + vxge_hw_channel_dtr_try_complete(channel, rxdh); + + rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh; + if (rxdp == NULL) { + status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; + goto exit; + } + + /* check whether it is not the end */ + if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) { + + vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != + 0); + + ++ring->cmpl_cnt; + vxge_hw_channel_dtr_complete(channel); + + *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0); + + vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); + + ring->stats->common_stats.usage_cnt++; + if (ring->stats->common_stats.usage_max < + ring->stats->common_stats.usage_cnt) + ring->stats->common_stats.usage_max = + ring->stats->common_stats.usage_cnt; + + status = VXGE_HW_OK; + goto exit; + } + + /* reset it. since we don't want to return + * garbage to the driver */ + *rxdh = NULL; + status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; +exit: + return status; +} + +/** + * vxge_hw_ring_handle_tcode - Handle transfer code. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * @t_code: One of the enumerated (and documented in the Titan user guide) + * "transfer codes". + * + * Handle descriptor's transfer code. The latter comes with each completed + * descriptor. + * + * Returns: one of the enum vxge_hw_status{} enumerated types. + * VXGE_HW_OK - for success. + * VXGE_HW_ERR_CRITICAL - when encounters critical error. + */ +enum vxge_hw_status vxge_hw_ring_handle_tcode( + struct __vxge_hw_ring *ring, void *rxdh, u8 t_code) +{ + struct __vxge_hw_channel *channel; + enum vxge_hw_status status = VXGE_HW_OK; + + channel = &ring->channel; + + /* If the t_code is not supported and if the + * t_code is other than 0x5 (unparseable packet + * such as unknown UPV6 header), Drop it !!! + */ + + if (t_code == 0 || t_code == 5) { + status = VXGE_HW_OK; + goto exit; + } + + if (t_code > 0xF) { + status = VXGE_HW_ERR_INVALID_TCODE; + goto exit; + } + + ring->stats->rxd_t_code_err_cnt[t_code]++; +exit: + return status; +} + +/** + * __vxge_hw_non_offload_db_post - Post non offload doorbell + * + * @fifo: fifohandle + * @txdl_ptr: The starting location of the TxDL in host memory + * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256) + * @no_snoop: No snoop flags + * + * This function posts a non-offload doorbell to doorbell FIFO + * + */ +static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, + u64 txdl_ptr, u32 num_txds, u32 no_snoop) +{ + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) | + VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) | + VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), + &fifo->nofl_db->control_0); + + wmb(); + + writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); + wmb(); + +} + +/** + * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in + * the fifo + * @fifoh: Handle to the fifo object used for non offload send + */ +u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh) +{ + return vxge_hw_channel_dtr_count(&fifoh->channel); +} + +/** + * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor. + * @fifoh: Handle to the fifo object used for non offload send + * @txdlh: Reserved descriptor. On success HW fills this "out" parameter + * with a valid handle. + * @txdl_priv: Buffer to return the pointer to per txdl space + * + * Reserve a single TxDL (that is, fifo descriptor) + * for the subsequent filling-in by driver) + * and posting on the corresponding channel (@channelh) + * via vxge_hw_fifo_txdl_post(). + * + * Note: it is the responsibility of driver to reserve multiple descriptors + * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor + * carries up to configured number (fifo.max_frags) of contiguous buffers. + * + * Returns: VXGE_HW_OK - success; + * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available + * + */ +enum vxge_hw_status vxge_hw_fifo_txdl_reserve( + struct __vxge_hw_fifo *fifo, + void **txdlh, void **txdl_priv) +{ + struct __vxge_hw_channel *channel; + enum vxge_hw_status status; + int i; + + channel = &fifo->channel; + + status = vxge_hw_channel_dtr_alloc(channel, txdlh); + + if (status == VXGE_HW_OK) { + struct vxge_hw_fifo_txd *txdp = + (struct vxge_hw_fifo_txd *)*txdlh; + struct __vxge_hw_fifo_txdl_priv *priv; + + priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); + + /* reset the TxDL's private */ + priv->align_dma_offset = 0; + priv->align_vaddr_start = priv->align_vaddr; + priv->align_used_frags = 0; + priv->frags = 0; + priv->alloc_frags = fifo->config->max_frags; + priv->next_txdl_priv = NULL; + + *txdl_priv = (void *)(size_t)txdp->host_control; + + for (i = 0; i < fifo->config->max_frags; i++) { + txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i; + txdp->control_0 = txdp->control_1 = 0; + } + } + + return status; +} + +/** + * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the + * descriptor. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor handle. + * @frag_idx: Index of the data buffer in the caller's scatter-gather list + * (of buffers). + * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. + * @size: Size of the data buffer (in bytes). + * + * This API is part of the preparation of the transmit descriptor for posting + * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include + * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits(). + * All three APIs fill in the fields of the fifo descriptor, + * in accordance with the Titan specification. + * + */ +void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, + void *txdlh, u32 frag_idx, + dma_addr_t dma_pointer, u32 size) +{ + struct __vxge_hw_fifo_txdl_priv *txdl_priv; + struct vxge_hw_fifo_txd *txdp, *txdp_last; + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); + txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags; + + if (frag_idx != 0) + txdp->control_0 = txdp->control_1 = 0; + else { + txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( + VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST); + txdp->control_1 |= fifo->interrupt_type; + txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER( + fifo->tx_intr_num); + if (txdl_priv->frags) { + txdp_last = (struct vxge_hw_fifo_txd *)txdlh + + (txdl_priv->frags - 1); + txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( + VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); + } + } + + vxge_assert(frag_idx < txdl_priv->alloc_frags); + + txdp->buffer_pointer = (u64)dma_pointer; + txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size); + fifo->stats->total_buffers++; + txdl_priv->frags++; +} + +/** + * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve() + * @frags: Number of contiguous buffers that are part of a single + * transmit operation. + * + * Post descriptor on the 'fifo' type channel for transmission. + * Prior to posting the descriptor should be filled in accordance with + * Host/Titan interface specification for a given service (LL, etc.). + * + */ +void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh) +{ + struct __vxge_hw_fifo_txdl_priv *txdl_priv; + struct vxge_hw_fifo_txd *txdp_last; + struct vxge_hw_fifo_txd *txdp_first; + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); + txdp_first = (struct vxge_hw_fifo_txd *)txdlh; + + txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1); + txdp_last->control_0 |= + VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); + txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER; + + vxge_hw_channel_dtr_post(&fifo->channel, txdlh); + + __vxge_hw_non_offload_db_post(fifo, + (u64)(size_t)txdl_priv->dma_addr, + txdl_priv->frags - 1, + fifo->no_snoop_bits); + + fifo->stats->total_posts++; + fifo->stats->common_stats.usage_cnt++; + if (fifo->stats->common_stats.usage_max < + fifo->stats->common_stats.usage_cnt) + fifo->stats->common_stats.usage_max = + fifo->stats->common_stats.usage_cnt; +} + +/** + * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor handle. Returned by HW. + * @t_code: Transfer code, as per Titan User Guide, + * Transmit Descriptor Format. + * Returned by HW. + * + * Retrieve the _next_ completed descriptor. + * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy + * driver of new completed descriptors. After that + * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest + * completions (the very first completion is passed by HW via + * vxge_hw_channel_callback_f). + * + * Implementation-wise, the driver is free to call + * vxge_hw_fifo_txdl_next_completed either immediately from inside the + * channel callback, or in a deferred fashion and separate (from HW) + * context. + * + * Non-zero @t_code means failure to process the descriptor. + * The failure could happen, for instance, when the link is + * down, in which case Titan completes the descriptor because it + * is not able to send the data out. + * + * For details please refer to Titan User Guide. + * + * Returns: VXGE_HW_OK - success. + * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors + * are currently available for processing. + * + */ +enum vxge_hw_status vxge_hw_fifo_txdl_next_completed( + struct __vxge_hw_fifo *fifo, void **txdlh, + enum vxge_hw_fifo_tcode *t_code) +{ + struct __vxge_hw_channel *channel; + struct vxge_hw_fifo_txd *txdp; + enum vxge_hw_status status = VXGE_HW_OK; + + channel = &fifo->channel; + + vxge_hw_channel_dtr_try_complete(channel, txdlh); + + txdp = (struct vxge_hw_fifo_txd *)*txdlh; + if (txdp == NULL) { + status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; + goto exit; + } + + /* check whether host owns it */ + if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) { + + vxge_assert(txdp->host_control != 0); + + vxge_hw_channel_dtr_complete(channel); + + *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0); + + if (fifo->stats->common_stats.usage_cnt > 0) + fifo->stats->common_stats.usage_cnt--; + + status = VXGE_HW_OK; + goto exit; + } + + /* no more completions */ + *txdlh = NULL; + status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; +exit: + return status; +} + +/** + * vxge_hw_fifo_handle_tcode - Handle transfer code. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor handle. + * @t_code: One of the enumerated (and documented in the Titan user guide) + * "transfer codes". + * + * Handle descriptor's transfer code. The latter comes with each completed + * descriptor. + * + * Returns: one of the enum vxge_hw_status{} enumerated types. + * VXGE_HW_OK - for success. + * VXGE_HW_ERR_CRITICAL - when encounters critical error. + */ +enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo, + void *txdlh, + enum vxge_hw_fifo_tcode t_code) +{ + struct __vxge_hw_channel *channel; + + enum vxge_hw_status status = VXGE_HW_OK; + channel = &fifo->channel; + + if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) { + status = VXGE_HW_ERR_INVALID_TCODE; + goto exit; + } + + fifo->stats->txd_t_code_err_cnt[t_code]++; +exit: + return status; +} + +/** + * vxge_hw_fifo_txdl_free - Free descriptor. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor handle. + * + * Free the reserved descriptor. This operation is "symmetrical" to + * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's + * lifecycle. + * + * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can + * be: + * + * - reserved (vxge_hw_fifo_txdl_reserve); + * + * - posted (vxge_hw_fifo_txdl_post); + * + * - completed (vxge_hw_fifo_txdl_next_completed); + * + * - and recycled again (vxge_hw_fifo_txdl_free). + * + * For alternative state transitions and more details please refer to + * the design doc. + * + */ +void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh) +{ + struct __vxge_hw_fifo_txdl_priv *txdl_priv; + u32 max_frags; + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, + (struct vxge_hw_fifo_txd *)txdlh); + + max_frags = fifo->config->max_frags; + + vxge_hw_channel_dtr_free(channel, txdlh); +} + +/** + * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath + * to MAC address table. + * @vp: Vpath handle. + * @macaddr: MAC address to be added for this vpath into the list + * @macaddr_mask: MAC address mask for macaddr + * @duplicate_mode: Duplicate MAC address add mode. Please see + * enum vxge_hw_vpath_mac_addr_add_mode{} + * + * Adds the given mac address and mac address mask into the list for this + * vpath. + * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and + * vxge_hw_vpath_mac_addr_get_next + * + */ +enum vxge_hw_status +vxge_hw_vpath_mac_addr_add( + struct __vxge_hw_vpath_handle *vp, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN], + enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode) +{ + u32 i; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + for (i = 0; i < ETH_ALEN; i++) { + data1 <<= 8; + data1 |= (u8)macaddr[i]; + + data2 <<= 8; + data2 |= (u8)macaddr_mask[i]; + } + + switch (duplicate_mode) { + case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE: + i = 0; + break; + case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE: + i = 1; + break; + case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE: + i = 2; + break; + default: + i = 0; + break; + } + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, + VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), + VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)| + VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i)); +exit: + return status; +} + +/** + * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath + * from MAC address table. + * @vp: Vpath handle. + * @macaddr: First MAC address entry for this vpath in the list + * @macaddr_mask: MAC address mask for macaddr + * + * Returns the first mac address and mac address mask in the list for this + * vpath. + * see also: vxge_hw_vpath_mac_addr_get_next + * + */ +enum vxge_hw_status +vxge_hw_vpath_mac_addr_get( + struct __vxge_hw_vpath_handle *vp, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]) +{ + u32 i; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_get(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, &data1, &data2); + + if (status != VXGE_HW_OK) + goto exit; + + data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); + + data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); + + for (i = ETH_ALEN; i > 0; i--) { + macaddr[i-1] = (u8)(data1 & 0xFF); + data1 >>= 8; + + macaddr_mask[i-1] = (u8)(data2 & 0xFF); + data2 >>= 8; + } +exit: + return status; +} + +/** + * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this + * vpath + * from MAC address table. + * @vp: Vpath handle. + * @macaddr: Next MAC address entry for this vpath in the list + * @macaddr_mask: MAC address mask for macaddr + * + * Returns the next mac address and mac address mask in the list for this + * vpath. + * see also: vxge_hw_vpath_mac_addr_get + * + */ +enum vxge_hw_status +vxge_hw_vpath_mac_addr_get_next( + struct __vxge_hw_vpath_handle *vp, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]) +{ + u32 i; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_get(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, &data1, &data2); + + if (status != VXGE_HW_OK) + goto exit; + + data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); + + data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); + + for (i = ETH_ALEN; i > 0; i--) { + macaddr[i-1] = (u8)(data1 & 0xFF); + data1 >>= 8; + + macaddr_mask[i-1] = (u8)(data2 & 0xFF); + data2 >>= 8; + } + +exit: + return status; +} + +/** + * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath + * to MAC address table. + * @vp: Vpath handle. + * @macaddr: MAC address to be added for this vpath into the list + * @macaddr_mask: MAC address mask for macaddr + * + * Delete the given mac address and mac address mask into the list for this + * vpath. + * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and + * vxge_hw_vpath_mac_addr_get_next + * + */ +enum vxge_hw_status +vxge_hw_vpath_mac_addr_delete( + struct __vxge_hw_vpath_handle *vp, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]) +{ + u32 i; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + for (i = 0; i < ETH_ALEN; i++) { + data1 <<= 8; + data1 |= (u8)macaddr[i]; + + data2 <<= 8; + data2 |= (u8)macaddr_mask[i]; + } + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, + VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), + VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)); +exit: + return status; +} + +/** + * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath + * to vlan id table. + * @vp: Vpath handle. + * @vid: vlan id to be added for this vpath into the list + * + * Adds the given vlan id into the list for this vpath. + * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and + * vxge_hw_vpath_vid_get_next + * + */ +enum vxge_hw_status +vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, + 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); +exit: + return status; +} + +/** + * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath + * from vlan id table. + * @vp: Vpath handle. + * @vid: Buffer to return vlan id + * + * Returns the first vlan id in the list for this vpath. + * see also: vxge_hw_vpath_vid_get_next + * + */ +enum vxge_hw_status +vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid) +{ + u64 data; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_get(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, + 0, vid, &data); + + *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid); +exit: + return status; +} + +/** + * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath + * from vlan id table. + * @vp: Vpath handle. + * @vid: Buffer to return vlan id + * + * Returns the next vlan id in the list for this vpath. + * see also: vxge_hw_vpath_vid_get + * + */ +enum vxge_hw_status +vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid) +{ + u64 data; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_get(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, + 0, vid, &data); + + *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid); +exit: + return status; +} + +/** + * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath + * to vlan id table. + * @vp: Vpath handle. + * @vid: vlan id to be added for this vpath into the list + * + * Adds the given vlan id into the list for this vpath. + * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and + * vxge_hw_vpath_vid_get_next + * + */ +enum vxge_hw_status +vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, + 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); +exit: + return status; +} + +/** + * vxge_hw_vpath_promisc_enable - Enable promiscuous mode. + * @vp: Vpath handle. + * + * Enable promiscuous mode of Titan-e operation. + * + * See also: vxge_hw_vpath_promisc_disable(). + */ +enum vxge_hw_status vxge_hw_vpath_promisc_enable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + /* Enable promiscous mode for function 0 only */ + if (!(vpath->hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) + return VXGE_HW_OK; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) { + + val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | + VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | + VXGE_HW_RXMAC_VCFG0_BCAST_EN | + VXGE_HW_RXMAC_VCFG0_ALL_VID_EN; + + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/** + * vxge_hw_vpath_promisc_disable - Disable promiscuous mode. + * @vp: Vpath handle. + * + * Disable promiscuous mode of Titan-e operation. + * + * See also: vxge_hw_vpath_promisc_enable(). + */ +enum vxge_hw_status vxge_hw_vpath_promisc_disable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) { + + val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | + VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | + VXGE_HW_RXMAC_VCFG0_ALL_VID_EN); + + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/* + * vxge_hw_vpath_bcast_enable - Enable broadcast + * @vp: Vpath handle. + * + * Enable receiving broadcasts. + */ +enum vxge_hw_status vxge_hw_vpath_bcast_enable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) { + val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN; + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/** + * vxge_hw_vpath_mcast_enable - Enable multicast addresses. + * @vp: Vpath handle. + * + * Enable Titan-e multicast addresses. + * Returns: VXGE_HW_OK on success. + * + */ +enum vxge_hw_status vxge_hw_vpath_mcast_enable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) { + val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/** + * vxge_hw_vpath_mcast_disable - Disable multicast addresses. + * @vp: Vpath handle. + * + * Disable Titan-e multicast addresses. + * Returns: VXGE_HW_OK - success. + * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle + * + */ +enum vxge_hw_status +vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) { + val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/* + * __vxge_hw_vpath_alarm_process - Process Alarms. + * @vpath: Virtual Path. + * @skip_alarms: Do not clear the alarms + * + * Process vpath alarms. + * + */ +enum vxge_hw_status __vxge_hw_vpath_alarm_process( + struct __vxge_hw_virtualpath *vpath, + u32 skip_alarms) +{ + u64 val64; + u64 alarm_status; + u64 pic_status; + struct __vxge_hw_device *hldev = NULL; + enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; + u64 mask64; + struct vxge_hw_vpath_stats_sw_info *sw_stats; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vpath == NULL) { + alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, + alarm_event); + goto out; + } + + hldev = vpath->hldev; + vp_reg = vpath->vp_reg; + alarm_status = readq(&vp_reg->vpath_general_int_status); + + if (alarm_status == VXGE_HW_ALL_FOXES) { + alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, + alarm_event); + goto out; + } + + sw_stats = vpath->sw_stats; + + if (alarm_status & ~( + VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | + VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | + VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | + VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { + sw_stats->error_stats.unknown_alarms++; + + alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, + alarm_event); + goto out; + } + + if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { + + val64 = readq(&vp_reg->xgmac_vp_int_status); + + if (val64 & + VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { + + val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); + + if (((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || + ((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) + && (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) + ))) { + sw_stats->error_stats.network_sustained_fault++; + + writeq( + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, + &vp_reg->asic_ntwk_vp_err_mask); + + __vxge_hw_device_handle_link_down_ind(hldev); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_LINK_DOWN, alarm_event); + } + + if (((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || + ((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) + && (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) + ))) { + + sw_stats->error_stats.network_sustained_ok++; + + writeq( + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, + &vp_reg->asic_ntwk_vp_err_mask); + + __vxge_hw_device_handle_link_up_ind(hldev); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_LINK_UP, alarm_event); + } + + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->asic_ntwk_vp_err_reg); + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); + + if (skip_alarms) + return VXGE_HW_OK; + } + } + + if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { + + pic_status = readq(&vp_reg->vpath_ppif_int_status); + + if (pic_status & + VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { + + val64 = readq(&vp_reg->general_errors_reg); + mask64 = readq(&vp_reg->general_errors_mask); + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & + ~mask64) { + sw_stats->error_stats.ini_serr_det++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_SERR, alarm_event); + } + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & + ~mask64) { + sw_stats->error_stats.dblgen_fifo0_overflow++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, alarm_event); + } + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & + ~mask64) + sw_stats->error_stats.statsb_pif_chain_error++; + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & + ~mask64) + sw_stats->error_stats.statsb_drop_timeout++; + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & + ~mask64) + sw_stats->error_stats.target_illegal_access++; + + if (!skip_alarms) { + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->general_errors_reg); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, + alarm_event); + } + } + + if (pic_status & + VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { + + val64 = readq(&vp_reg->kdfcctl_errors_reg); + mask64 = readq(&vp_reg->kdfcctl_errors_mask); + + if ((val64 & + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & + ~mask64) { + sw_stats->error_stats.kdfcctl_fifo0_overwrite++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, + alarm_event); + } + + if ((val64 & + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & + ~mask64) { + sw_stats->error_stats.kdfcctl_fifo0_poison++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, + alarm_event); + } + + if ((val64 & + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & + ~mask64) { + sw_stats->error_stats.kdfcctl_fifo0_dma_error++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, + alarm_event); + } + + if (!skip_alarms) { + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->kdfcctl_errors_reg); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, + alarm_event); + } + } + + } + + if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { + + val64 = readq(&vp_reg->wrdma_alarm_status); + + if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { + + val64 = readq(&vp_reg->prc_alarm_reg); + mask64 = readq(&vp_reg->prc_alarm_mask); + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& + ~mask64) + sw_stats->error_stats.prc_ring_bumps++; + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & + ~mask64) { + sw_stats->error_stats.prc_rxdcm_sc_err++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_VPATH_ERR, + alarm_event); + } + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) + & ~mask64) { + sw_stats->error_stats.prc_rxdcm_sc_abort++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_VPATH_ERR, + alarm_event); + } + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) + & ~mask64) { + sw_stats->error_stats.prc_quanta_size_err++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_VPATH_ERR, + alarm_event); + } + + if (!skip_alarms) { + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->prc_alarm_reg); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, + alarm_event); + } + } + } +out: + hldev->stats.sw_dev_err_stats.vpath_alarms++; + + if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || + (alarm_event == VXGE_HW_EVENT_UNKNOWN)) + return VXGE_HW_OK; + + __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); + + if (alarm_event == VXGE_HW_EVENT_SERR) + return VXGE_HW_ERR_CRITICAL; + + return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? + VXGE_HW_ERR_SLOT_FREEZE : + (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : + VXGE_HW_ERR_VPATH; +} + +/* + * vxge_hw_vpath_alarm_process - Process Alarms. + * @vpath: Virtual Path. + * @skip_alarms: Do not clear the alarms + * + * Process vpath alarms. + * + */ +enum vxge_hw_status vxge_hw_vpath_alarm_process( + struct __vxge_hw_vpath_handle *vp, + u32 skip_alarms) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms); +exit: + return status; +} + +/** + * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and + * alrms + * @vp: Virtual Path handle. + * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of + * interrupts(Can be repeated). If fifo or ring are not enabled + * the MSIX vector for that should be set to 0 + * @alarm_msix_id: MSIX vector for alarm. + * + * This API will associate a given MSIX vector numbers with the four TIM + * interrupts and alarm interrupt. + */ +enum vxge_hw_status +vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, + int alarm_msix_id) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath = vp->vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; + u32 first_vp_id = vpath->hldev->first_vp_id; + + val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( + (first_vp_id * 4) + tim_msix_id[0]) | + VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( + (first_vp_id * 4) + tim_msix_id[1]) | + VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI( + (first_vp_id * 4) + tim_msix_id[2]); + + val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI( + (first_vp_id * 4) + tim_msix_id[3]); + + writeq(val64, &vp_reg->interrupt_cfg0); + + writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( + (first_vp_id * 4) + alarm_msix_id), + &vp_reg->interrupt_cfg2); + + if (vpath->hldev->config.intr_mode == + VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( + VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, + 0, 32), &vp_reg->one_shot_vect1_en); + } + + if (vpath->hldev->config.intr_mode == + VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( + VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, + 0, 32), &vp_reg->one_shot_vect2_en); + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( + VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, + 0, 32), &vp_reg->one_shot_vect3_en); + } + + return VXGE_HW_OK; +} + +/** + * vxge_hw_vpath_msix_mask - Mask MSIX Vector. + * @vp: Virtual Path handle. + * @msix_id: MSIX ID + * + * The function masks the msix interrupt for the given msix_id + * + * Returns: 0, + * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range + * status. + * See also: + */ +void +vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) +{ + struct __vxge_hw_device *hldev = vp->vpath->hldev; + __vxge_hw_pio_mem_write32_upper( + (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id + + (msix_id / 4)), 0, 32), + &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); + + return; +} + +/** + * vxge_hw_vpath_msix_clear - Clear MSIX Vector. + * @vp: Virtual Path handle. + * @msix_id: MSI ID + * + * The function clears the msix interrupt for the given msix_id + * + * Returns: 0, + * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range + * status. + * See also: + */ +void +vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) +{ + struct __vxge_hw_device *hldev = vp->vpath->hldev; + if (hldev->config.intr_mode == + VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + + (msix_id/4)), 0, 32), + &hldev->common_reg-> + clr_msix_one_shot_vec[msix_id%4]); + } else { + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + + (msix_id/4)), 0, 32), + &hldev->common_reg-> + clear_msix_mask_vect[msix_id%4]); + } + + return; +} + +/** + * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. + * @vp: Virtual Path handle. + * @msix_id: MSI ID + * + * The function unmasks the msix interrupt for the given msix_id + * + * Returns: 0, + * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range + * status. + * See also: + */ +void +vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) +{ + struct __vxge_hw_device *hldev = vp->vpath->hldev; + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + + (msix_id/4)), 0, 32), + &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); + + return; +} + +/** + * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath. + * @vp: Virtual Path handle. + * + * The function masks all msix interrupt for the given vpath + * + */ +void +vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp) +{ + + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32), + &vp->vpath->hldev->common_reg->set_msix_mask_all_vect); + + return; +} + +/** + * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. + * @vp: Virtual Path handle. + * + * Mask Tx and Rx vpath interrupts. + * + * See also: vxge_hw_vpath_inta_mask_tx_rx() + */ +void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp) +{ + u64 tim_int_mask0[4] = {[0 ...3] = 0}; + u32 tim_int_mask1[4] = {[0 ...3] = 0}; + u64 val64; + struct __vxge_hw_device *hldev = vp->vpath->hldev; + + VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, + tim_int_mask1, vp->vpath->vp_id); + + val64 = readq(&hldev->common_reg->tim_int_mask0); + + if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || + (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { + writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | + tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64), + &hldev->common_reg->tim_int_mask0); + } + + val64 = readl(&hldev->common_reg->tim_int_mask1); + + if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || + (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { + __vxge_hw_pio_mem_write32_upper( + (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | + tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), + &hldev->common_reg->tim_int_mask1); + } + + return; +} + +/** + * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts. + * @vp: Virtual Path handle. + * + * Unmask Tx and Rx vpath interrupts. + * + * See also: vxge_hw_vpath_inta_mask_tx_rx() + */ +void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp) +{ + u64 tim_int_mask0[4] = {[0 ...3] = 0}; + u32 tim_int_mask1[4] = {[0 ...3] = 0}; + u64 val64; + struct __vxge_hw_device *hldev = vp->vpath->hldev; + + VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, + tim_int_mask1, vp->vpath->vp_id); + + val64 = readq(&hldev->common_reg->tim_int_mask0); + + if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || + (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { + writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | + tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64, + &hldev->common_reg->tim_int_mask0); + } + + if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || + (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { + __vxge_hw_pio_mem_write32_upper( + (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | + tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, + &hldev->common_reg->tim_int_mask1); + } + + return; +} + +/** + * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed + * descriptors and process the same. + * @ring: Handle to the ring object used for receive + * + * The function polls the Rx for the completed descriptors and calls + * the driver via supplied completion callback. + * + * Returns: VXGE_HW_OK, if the polling is completed successful. + * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed + * descriptors available which are yet to be processed. + * + * See also: vxge_hw_vpath_poll_rx() + */ +enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring) +{ + u8 t_code; + enum vxge_hw_status status = VXGE_HW_OK; + void *first_rxdh; + u64 val64 = 0; + int new_count = 0; + + ring->cmpl_cnt = 0; + + status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code); + if (status == VXGE_HW_OK) + ring->callback(ring, first_rxdh, + t_code, ring->channel.userdata); + + if (ring->cmpl_cnt != 0) { + ring->doorbell_cnt += ring->cmpl_cnt; + if (ring->doorbell_cnt >= ring->rxds_limit) { + /* + * Each RxD is of 4 qwords, update the number of + * qwords replenished + */ + new_count = (ring->doorbell_cnt * 4); + + /* For each block add 4 more qwords */ + ring->total_db_cnt += ring->doorbell_cnt; + if (ring->total_db_cnt >= ring->rxds_per_block) { + new_count += 4; + /* Reset total count */ + ring->total_db_cnt %= ring->rxds_per_block; + } + writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count), + &ring->vp_reg->prc_rxd_doorbell); + val64 = + readl(&ring->common_reg->titan_general_int_status); + ring->doorbell_cnt = 0; + } + } + + return status; +} + +/** + * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process + * the same. + * @fifo: Handle to the fifo object used for non offload send + * + * The function polls the Tx for the completed descriptors and calls + * the driver via supplied completion callback. + * + * Returns: VXGE_HW_OK, if the polling is completed successful. + * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed + * descriptors available which are yet to be processed. + * + * See also: vxge_hw_vpath_poll_tx(). + */ +enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, + void **skb_ptr) +{ + enum vxge_hw_fifo_tcode t_code; + void *first_txdlh; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + status = vxge_hw_fifo_txdl_next_completed(fifo, + &first_txdlh, &t_code); + if (status == VXGE_HW_OK) + if (fifo->callback(fifo, first_txdlh, + t_code, channel->userdata, skb_ptr) != VXGE_HW_OK) + status = VXGE_HW_COMPLETIONS_REMAIN; + + return status; +} diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h new file mode 100644 index 00000000000..7567a1140d0 --- /dev/null +++ b/drivers/net/vxge/vxge-traffic.h @@ -0,0 +1,2409 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-traffic.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2009 Neterion Inc. + ******************************************************************************/ +#ifndef VXGE_TRAFFIC_H +#define VXGE_TRAFFIC_H + +#include "vxge-reg.h" +#include "vxge-version.h" + +#define VXGE_HW_DTR_MAX_T_CODE 16 +#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_MAX_VIRTUAL_PATHS 17 + +#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2 + +#define VXGE_HW_DEFAULT_32 0xffffffff +/* frames sizes */ +#define VXGE_HW_HEADER_802_2_SIZE 3 +#define VXGE_HW_HEADER_SNAP_SIZE 5 +#define VXGE_HW_HEADER_VLAN_SIZE 4 +#define VXGE_HW_MAC_HEADER_MAX_SIZE \ + (ETH_HLEN + \ + VXGE_HW_HEADER_802_2_SIZE + \ + VXGE_HW_HEADER_VLAN_SIZE + \ + VXGE_HW_HEADER_SNAP_SIZE) + +#define VXGE_HW_TCPIP_HEADER_MAX_SIZE (64 + 64) + +/* 32bit alignments */ +#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2 +#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2 +#define VXGE_HW_HEADER_802_2_ALIGN 3 +#define VXGE_HW_HEADER_SNAP_ALIGN 1 + +#define VXGE_HW_L3_CKSUM_OK 0xFFFF +#define VXGE_HW_L4_CKSUM_OK 0xFFFF + +/* Forward declarations */ +struct __vxge_hw_device; +struct __vxge_hw_vpath_handle; +struct vxge_hw_vp_config; +struct __vxge_hw_virtualpath; +struct __vxge_hw_channel; +struct __vxge_hw_fifo; +struct __vxge_hw_ring; +struct vxge_hw_ring_attr; +struct vxge_hw_mempool; + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +/*VXGE_HW_STATUS_H*/ + +#define VXGE_HW_EVENT_BASE 0 +#define VXGE_LL_EVENT_BASE 100 + +/** + * enum vxge_hw_event- Enumerates slow-path HW events. + * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event. + * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event. + * @VXGE_HW_EVENT_ECCERR: vpath ECC error event. + * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath + * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error. + * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event. + * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event. + * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event. + * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset + * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed + * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish + * slot-freeze from the rest critical events (e.g. ECC) when it is + * impossible to PIO read "through" the bus, i.e. when getting all-foxes. + * + * enum vxge_hw_event enumerates slow-path HW eventis. + * + * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{}, + * vxge_uld_link_down_f{}. + */ +enum vxge_hw_event { + VXGE_HW_EVENT_UNKNOWN = 0, + /* HW events */ + VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1, + VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2, + VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3, + VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4, + VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5, + VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6, + VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7, + VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8, + VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9, + VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10, + VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11, + VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12, + VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13, + VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14, +}; + +#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b)) + +/* + * struct vxge_hw_mempool_dma - Represents DMA objects passed to the + caller. + */ +struct vxge_hw_mempool_dma { + dma_addr_t addr; + struct pci_dev *handle; + struct pci_dev *acc_handle; +}; + +/* + * vxge_hw_mempool_item_f - Mempool item alloc/free callback + * @mempoolh: Memory pool handle. + * @memblock: Address of memory block + * @memblock_index: Index of memory block + * @item: Item that gets allocated or freed. + * @index: Item's index in the memory pool. + * @is_last: True, if this item is the last one in the pool; false - otherwise. + * userdata: Per-pool user context. + * + * Memory pool allocation/deallocation callback. + */ + +/* + * struct vxge_hw_mempool - Memory pool. + */ +struct vxge_hw_mempool { + + void (*item_func_alloc)( + struct vxge_hw_mempool *mempoolh, + u32 memblock_index, + struct vxge_hw_mempool_dma *dma_object, + u32 index, + u32 is_last); + + void *userdata; + void **memblocks_arr; + void **memblocks_priv_arr; + struct vxge_hw_mempool_dma *memblocks_dma_arr; + struct __vxge_hw_device *devh; + u32 memblock_size; + u32 memblocks_max; + u32 memblocks_allocated; + u32 item_size; + u32 items_max; + u32 items_initial; + u32 items_current; + u32 items_per_memblock; + void **items_arr; + u32 items_priv_size; +}; + +#define VXGE_HW_MAX_INTR_PER_VP 4 +#define VXGE_HW_VPATH_INTR_TX 0 +#define VXGE_HW_VPATH_INTR_RX 1 +#define VXGE_HW_VPATH_INTR_EINTA 2 +#define VXGE_HW_VPATH_INTR_BMAP 3 + +#define VXGE_HW_BLOCK_SIZE 4096 + +/** + * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration. + * @intr_enable: Set to 1, if interrupt is enabled. + * @btimer_val: Boundary Timer Initialization value in units of 272 ns. + * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when + * asserted, other interrupt-generating entities will cancel the + * scheduled timer interrupt. + * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable: + * When asserted, an interrupt will be generated every time the + * boundary timer expires, even if no traffic has been transmitted + * on this interrupt. + * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive + * (Re-) Interrupt Enable: When asserted, an interrupt will be + * generated the next time the timer expires, even if no traffic has + * been transmitted on this interrupt. (This will only happen once + * each time that this value is written to the TIM.) This bit is + * cleared by H/W at the end of the current-timer-interval when + * the interrupt is triggered. + * @rtimer_val: Restriction Timer Initialization value in units of 272 ns. + * @util_sel: Utilization Selector. Selects which of the workload approximations + * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host + * specified utilization etc.), selects one of + * the 17 host configured values. + * 0-Virtual Path 0 + * 1-Virtual Path 1 + * ... + * 16-Virtual Path 17 + * 17-Legacy Tx network utilization, provided by TPA + * 18-Legacy Rx network utilization, provided by FAU + * 19-Average of legacy Rx and Tx utilization calculated from link + * utilization values. + * 20-31-Invalid configurations + * 32-Host utilization for Virtual Path 0 + * 33-Host utilization for Virtual Path 1 + * ... + * 48-Host utilization for Virtual Path 17 + * 49-Legacy Tx network utilization, provided by TPA + * 50-Legacy Rx network utilization, provided by FAU + * 51-Average of legacy Rx and Tx utilization calculated from + * link utilization values. + * 52-63-Invalid configurations + * @ltimer_val: Latency Timer Initialization Value in units of 272 ns. + * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set + * to 1 enables counting of TxD0 returns (signalled by PCC's), + * towards utilization event count values. + * @urange_a: Defines the upper limit (in percent) for this utilization range + * to be active. This range is considered active + * if 0 = UTIL = URNG_A + * and the UEC_A field (below) is non-zero. + * @uec_a: Utilization Event Count A. If this range is active, the adapter will + * wait until UEC_A events have occurred on the interrupt before + * generating an interrupt. + * @urange_b: Link utilization range B. + * @uec_b: Utilization Event Count B. + * @urange_c: Link utilization range C. + * @uec_c: Utilization Event Count C. + * @urange_d: Link utilization range D. + * @uec_d: Utilization Event Count D. + * Traffic Interrupt Controller Module interrupt configuration. + */ +struct vxge_hw_tim_intr_config { + + u32 intr_enable; +#define VXGE_HW_TIM_INTR_ENABLE 1 +#define VXGE_HW_TIM_INTR_DISABLE 0 +#define VXGE_HW_TIM_INTR_DEFAULT 0 + + u32 btimer_val; +#define VXGE_HW_MIN_TIM_BTIMER_VAL 0 +#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864 +#define VXGE_HW_USE_FLASH_DEFAULT 0xffffffff + + u32 timer_ac_en; +#define VXGE_HW_TIM_TIMER_AC_ENABLE 1 +#define VXGE_HW_TIM_TIMER_AC_DISABLE 0 + + u32 timer_ci_en; +#define VXGE_HW_TIM_TIMER_CI_ENABLE 1 +#define VXGE_HW_TIM_TIMER_CI_DISABLE 0 + + u32 timer_ri_en; +#define VXGE_HW_TIM_TIMER_RI_ENABLE 1 +#define VXGE_HW_TIM_TIMER_RI_DISABLE 0 + + u32 rtimer_val; +#define VXGE_HW_MIN_TIM_RTIMER_VAL 0 +#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864 + + u32 util_sel; +#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17 +#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18 +#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19 +#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63 + + u32 ltimer_val; +#define VXGE_HW_MIN_TIM_LTIMER_VAL 0 +#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864 + + /* Line utilization interrupts */ + u32 urange_a; +#define VXGE_HW_MIN_TIM_URANGE_A 0 +#define VXGE_HW_MAX_TIM_URANGE_A 100 + + u32 uec_a; +#define VXGE_HW_MIN_TIM_UEC_A 0 +#define VXGE_HW_MAX_TIM_UEC_A 65535 + + u32 urange_b; +#define VXGE_HW_MIN_TIM_URANGE_B 0 +#define VXGE_HW_MAX_TIM_URANGE_B 100 + + u32 uec_b; +#define VXGE_HW_MIN_TIM_UEC_B 0 +#define VXGE_HW_MAX_TIM_UEC_B 65535 + + u32 urange_c; +#define VXGE_HW_MIN_TIM_URANGE_C 0 +#define VXGE_HW_MAX_TIM_URANGE_C 100 + + u32 uec_c; +#define VXGE_HW_MIN_TIM_UEC_C 0 +#define VXGE_HW_MAX_TIM_UEC_C 65535 + + u32 uec_d; +#define VXGE_HW_MIN_TIM_UEC_D 0 +#define VXGE_HW_MAX_TIM_UEC_D 65535 +}; + +#define VXGE_HW_STATS_OP_READ 0 +#define VXGE_HW_STATS_OP_CLEAR_STAT 1 +#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2 +#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2 +#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3 + +#define VXGE_HW_STATS_LOC_AGGR 17 +#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720 + +#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0 +#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090 + +#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3) +#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \ + vxge_bVALn(bits, 0, 32) + +#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \ + vxge_bVALn(bits, 32, 32) + +#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3) +#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \ + vxge_bVALn(bits, 0, 32) + +#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \ + vxge_bVALn(bits, 32, 32) + +/** + * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics + * + * @tx_frms: Count of data frames transmitted on this Aggregator on all + * its Aggregation ports. Does not include LACPDUs or Marker PDUs. + * However, does include frames discarded by the Distribution + * function. + * @tx_data_octets: Count of data and padding octets of frames transmitted + * on this Aggregator on all its Aggregation ports. Does not include + * octets of LACPDUs or Marker PDUs. However, does include octets of + * frames discarded by the Distribution function. + * @tx_mcast_frms: Count of data frames transmitted (to a group destination + * address other than the broadcast address) on this Aggregator on + * all its Aggregation ports. Does not include LACPDUs or Marker + * PDUs. However, does include frames discarded by the Distribution + * function. + * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator + * on all its Aggregation ports. Does not include LACPDUs or Marker + * PDUs. However, does include frames discarded by the Distribution + * function. + * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator + * that are discarded by the Distribution function. This occurs when + * conversation are allocated to different ports and have to be + * flushed on old ports + * @tx_errored_frms: Count of data frames transmitted on this Aggregator that + * experience transmission errors on its Aggregation ports. + * @rx_frms: Count of data frames received on this Aggregator on all its + * Aggregation ports. Does not include LACPDUs or Marker PDUs. + * Also, does not include frames discarded by the Collection + * function. + * @rx_data_octets: Count of data and padding octets of frames received on this + * Aggregator on all its Aggregation ports. Does not include octets + * of LACPDUs or Marker PDUs. Also, does not include + * octets of frames + * discarded by the Collection function. + * @rx_mcast_frms: Count of data frames received (from a group destination + * address other than the broadcast address) on this Aggregator on + * all its Aggregation ports. Does not include LACPDUs or Marker + * PDUs. Also, does not include frames discarded by the Collection + * function. + * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on + * all its Aggregation ports. Does not include LACPDUs or Marker + * PDUs. Also, does not include frames discarded by the Collection + * function. + * @rx_discarded_frms: Count of data frames received on this Aggregator that are + * discarded by the Collection function because the Collection + * function was disabled on the port which the frames are received. + * @rx_errored_frms: Count of data frames received on this Aggregator that are + * discarded by its Aggregation ports, or are discarded by the + * Collection function of the Aggregator, or that are discarded by + * the Aggregator due to detection of an illegal Slow Protocols PDU. + * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator + * that are discarded by its Aggregation ports due to detection of + * an unknown Slow Protocols PDU. + * + * Per aggregator XMAC RX statistics. + */ +struct vxge_hw_xmac_aggr_stats { +/*0x000*/ u64 tx_frms; +/*0x008*/ u64 tx_data_octets; +/*0x010*/ u64 tx_mcast_frms; +/*0x018*/ u64 tx_bcast_frms; +/*0x020*/ u64 tx_discarded_frms; +/*0x028*/ u64 tx_errored_frms; +/*0x030*/ u64 rx_frms; +/*0x038*/ u64 rx_data_octets; +/*0x040*/ u64 rx_mcast_frms; +/*0x048*/ u64 rx_bcast_frms; +/*0x050*/ u64 rx_discarded_frms; +/*0x058*/ u64 rx_errored_frms; +/*0x060*/ u64 rx_unknown_slow_proto_frms; +} __packed; + +/** + * struct vxge_hw_xmac_port_stats - XMAC Port Statistics + * + * @tx_ttl_frms: Count of successfully transmitted MAC frames + * @tx_ttl_octets: Count of total octets of transmitted frames, not including + * framing characters (i.e. less framing bits). To determine the + * total octets of transmitted frames, including framing characters, + * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless + * otherwise configured, this stat only counts frames that have + * 8 bytes of preamble for each frame). This stat can be configured + * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything + * including the preamble octets. + * @tx_data_octets: Count of data and padding octets of successfully transmitted + * frames. + * @tx_mcast_frms: Count of successfully transmitted frames to a group address + * other than the broadcast address. + * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast + * group address. + * @tx_ucast_frms: Count of transmitted frames containing a unicast address. + * Includes discarded frames that are not sent to the network. + * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag. + * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network. + * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that + * are passed to the network. + * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent + * due to problems within ICMP. + * @tx_tcp: Count of transmitted TCP segments. Does not include segments + * containing retransmitted octets. + * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag. + * @tx_udp: Count of transmitted UDP datagrams. + * @tx_parse_error: Increments when the TPA is unable to parse a packet. This + * generally occurs when a packet is corrupt somehow, including + * packets that have IP version mismatches, invalid Layer 2 control + * fields, etc. L3/L4 checksums are not offloaded, but the packet + * is still be transmitted. + * @tx_unknown_protocol: Increments when the TPA encounters an unknown + * protocol, such as a new IPv6 extension header, or an unsupported + * Routing Type. The packet still has a checksum calculated but it + * may be incorrect. + * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted. + * Since, the only control frames supported by this device are + * PAUSE frames, this register is a count of all transmitted MAC + * control frames. + * @tx_marker_pdu_frms: Count of Marker PDUs transmitted + * on this Aggregation port. + * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port. + * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to + * the network. Increments because of: + * 1) An internal processing error + * (such as an uncorrectable ECC error). 2) A frame parsing error + * during IP checksum calculation. + * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this + * Aggregation port. + * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII + * characters that match a pattern that is programmable through + * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern + * is set to /T/ (i.e. the terminate character), thus the statistic + * tracks the number of transmitted Terminate characters. + * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII + * characters that match a pattern that is programmable through + * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern + * is set to /S/ (i.e. the start character), + * thus the statistic tracks + * the number of transmitted Start characters. + * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII + * columns that match a pattern that is programmable through register + * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set + * to 4 x /E/ (i.e. a column containing all error characters), thus + * the statistic tracks the number of Error columns transmitted at + * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is + * set to 1, then this stat increments when COLUMN2 is found within + * 'n' clocks after COLUMN1. Here, 'n' is defined by + * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set + * to 0, then it means to search anywhere for COLUMN2). + * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII + * columns that match a pattern that is programmable through register + * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set + * to 4 x /I/ (i.e. a column containing all idle characters), + * thus the statistic tracks the number of transmitted Idle columns. + * @tx_any_err_frms: Count of transmitted frames containing any error that + * prevents them from being passed to the network. Increments if + * there is an ECC while reading the frame out of the transmit + * buffer. Also increments if the transmit protocol assist (TPA) + * block determines that the frame should not be sent. + * @tx_drop_frms: Count of frames that could not be sent for no other reason + * than internal MAC processing. Increments once whenever the + * transmit buffer is flushed (due to an ECC error on a memory + * descriptor). + * @rx_ttl_frms: Count of total received MAC frames, including frames received + * with frame-too-long, FCS, or length errors. This stat can be + * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count + * everything, even "frames" as small one byte of preamble. + * @rx_vld_frms: Count of successfully received MAC frames. Does not include + * frames received with frame-too-long, FCS, or length errors. + * @rx_offload_frms: Count of offloaded received frames that are passed to + * the host. + * @rx_ttl_octets: Count of total octets of received frames, not including + * framing characters (i.e. less framing bits). To determine the + * total octets of received frames, including framing characters, + * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless + * otherwise configured, this stat only counts frames that have 8 + * bytes of preamble for each frame). This stat can be configured + * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything, + * even the preamble octets of "frames" as small one byte of preamble + * @rx_data_octets: Count of data and padding octets of successfully received + * frames. Does not include frames received with frame-too-long, + * FCS, or length errors. + * @rx_offload_octets: Count of total octets, not including framing + * characters, of offloaded received frames that are passed + * to the host. + * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a + * nonbroadcast group address. Does not include frames received + * with frame-too-long, FCS, or length errors. + * @rx_vld_bcast_frms: Count of successfully received MAC frames containing + * the broadcast group address. Does not include frames received + * with frame-too-long, FCS, or length errors. + * @rx_accepted_ucast_frms: Count of successfully received frames containing + * a unicast address. Only includes frames that are passed to + * the system. + * @rx_accepted_nucast_frms: Count of successfully received frames containing + * a non-unicast (broadcast or multicast) address. Only includes + * frames that are passed to the system. Could include, for instance, + * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG + * register is set to pass FCS-errored frames to the host. + * @rx_tagged_frms: Count of received frames containing a VLAN tag. + * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN + * + 18 bytes (+ 22 bytes if VLAN-tagged). + * @rx_usized_frms: Count of received frames of length (including FCS, but not + * framing bits) less than 64 octets, that are otherwise well-formed. + * In other words, counts runts. + * @rx_osized_frms: Count of received frames of length (including FCS, but not + * framing bits) more than 1518 octets, that are otherwise + * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING + * is set to 1, then "more than 1518 octets" becomes "more than 1518 + * (1522 if VLAN-tagged) octets". + * @rx_frag_frms: Count of received frames of length (including FCS, but not + * framing bits) less than 64 octets that had bad FCS. In other + * words, counts fragments. + * @rx_jabber_frms: Count of received frames of length (including FCS, but not + * framing bits) more than 1518 octets that had bad FCS. In other + * words, counts jabbers. Note: If register + * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than + * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged) + * octets". + * @rx_ttl_64_frms: Count of total received MAC frames with length (including + * FCS, but not framing bits) of exactly 64 octets. Includes frames + * received with frame-too-long, FCS, or length errors. + * @rx_ttl_65_127_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 65 and 127 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_128_255_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 128 and 255 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_256_511_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 256 and 511 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_512_1023_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 512 and 1023 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 1024 and 1518 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 1519 and 4095 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 4096 and 8191 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_8192_max_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 8192 and + * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received + * with frame-too-long, FCS, or length errors. + * @rx_ttl_gt_max_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) exceeding + * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive. + * Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams. + * @rx_accepted_ip: Count of received IP datagrams that + * are passed to the system. + * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes + * errored IP datagrams. + * @rx_err_ip: Count of received IP datagrams containing errors. For example, + * bad IP checksum. + * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages. + * @rx_tcp: Count of received TCP segments. Includes errored TCP segments. + * Note: This stat contains a count of all received TCP segments, + * regardless of whether or not they pertain to an established + * connection. + * @rx_udp: Count of received UDP datagrams. + * @rx_err_tcp: Count of received TCP segments containing errors. For example, + * bad TCP checksum. + * @rx_pause_count: Count of number of pause quanta that the MAC has been in + * the paused state. Recall, one pause quantum equates to 512 + * bit times. + * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames. + * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not + * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and + * this register is a count of all received MAC control frames. + * Note: This stat may be configured to count all layer 2 errors + * (i.e. length errors and FCS errors). + * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does + * not include frames received with frame-too-long or + * frame-too-short error. + * @rx_in_rng_len_err_frms: Count of received frames with a length/type field + * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500 + * for VLAN-tagged frames), inclusive, that does not match the + * number of data octets (including pad) received. Also contains + * a count of received frames with a length/type field less than + * 46 (42 for VLAN-tagged frames) and the number of data octets + * (including pad) received is greater than 46 (42 for VLAN-tagged + * frames). + * @rx_out_rng_len_err_frms: Count of received frames with length/type field + * between 1501 and 1535 decimal, inclusive. + * @rx_drop_frms: Count of received frames that could not be passed to the host. + * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD, + * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD + * for a list of reasons. Because the RMAC drops one frame at a time, + * this stat also indicates the number of drop events. + * @rx_discarded_frms: Count of received frames containing + * any error that prevents + * them from being passed to the system. See PORTn_RX_FCS_DISCARD, + * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of + * reasons. + * @rx_drop_ip: Count of received IP datagrams that could not be passed to the + * host. See PORTn_RX_DROP_FRMS for a list of reasons. + * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the + * host. See PORTn_RX_DROP_FRMS for a list of reasons. + * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation + * port. + * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port. + * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port) + * that carry the Slow Protocols EtherType, but contain an unknown + * PDU. Or frames that contain the Slow Protocols group MAC address, + * but do not carry the Slow Protocols EtherType. + * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on + * this Aggregation port. + * @rx_fcs_discard: Count of received frames that are discarded because the + * FCS check failed. + * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port) + * that carry the Slow Protocols EtherType, but contain a badly + * formed PDU. Or frames that carry the Slow Protocols EtherType, + * but contain an illegal value of Protocol Subtype. + * @rx_switch_discard: Count of received frames that are discarded by the + * internal switch because they did not have an entry in the + * Filtering Database. This includes frames that had an invalid + * destination MAC address or VLAN ID. It also includes frames are + * discarded because they did not satisfy the length requirements + * of the target VPATH. + * @rx_len_discard: Count of received frames that are discarded because of an + * invalid frame length (includes fragments, oversized frames and + * mismatch between frame length and length/type field). This stat + * can be configured + * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING). + * @rx_rpa_discard: Count of received frames that were discarded because the + * receive protocol assist (RPA) discovered and error in the frame + * or was unable to parse the frame. + * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames, + * Link Aggregation Control Protocol (LACP) frames, etc.) that are + * discarded. + * @rx_rts_discard: Count of received frames that are discarded by the receive + * traffic steering (RTS) logic. Includes those frame discarded + * because the SSC response contradicted the switch table, because + * the SSC timed out, or because the target queue could not fit the + * frame. + * @rx_trash_discard: Count of received frames that are discarded because + * receive traffic steering (RTS) steered the frame to the trash + * queue. + * @rx_buff_full_discard: Count of received frames that are discarded because + * internal buffers are full. Includes frames discarded because the + * RTS logic is waiting for an SSC lookup that has no timeout bound. + * Also, includes frames that are dropped because the MAC2FAU buffer + * is nearly full -- this can happen if the external receive buffer + * is full and the receive path is backing up. + * @rx_red_discard: Count of received frames that are discarded because of RED + * (Random Early Discard). + * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control + * characters occuring between times of normal data transmission + * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is + * incremented when either - + * 1) The Reconciliation Sublayer (RS) is expecting one control + * character and gets another (i.e. is expecting a Start + * character, but gets another control character). + * 2) Start control character is not in lane 0 + * Only increments the count by one for each XGMII column. + * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters + * during normal data transmission. If the Reconciliation Sublayer + * (RS) receives a control character, other than a terminate control + * character, during receipt of data octets then this register is + * incremented. Also increments if the start frame delimiter is not + * found in the correct location. Only increments the count by one + * for each XGMII column. + * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters + * that match a pattern that is programmable through register + * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set + * to /E/ (i.e. the error character), thus the statistic tracks the + * number of Error characters received at any time. + * @rx_xgmii_err_sym: Count of the number of symbol errors in the received + * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII). + * Only includes symbol errors that are observed between the XGMII + * Start Frame Delimiter and End Frame Delimiter, inclusive. And + * only increments the count by one for each frame. + * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns + * that match a pattern that is programmable through register + * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set + * to 4 x /E/ (i.e. a column containing all error characters), thus + * the statistic tracks the number of Error columns received at any + * time. + * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters + * that match a pattern that is programmable through register + * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set + * to /E/ (i.e. the error character), thus the statistic tracks the + * number of Error characters received at any time. + * @rx_local_fault: Maintains a count of the number of times that link + * transitioned from "up" to "down" due to a local fault. + * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns + * that match a pattern that is programmable through register + * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set + * to 4 x /E/ (i.e. a column containing all error characters), thus + * the statistic tracks the number of Error columns received at any + * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set + * to 1, then this stat increments when COLUMN2 is found within 'n' + * clocks after COLUMN1. Here, 'n' is defined by + * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to + * 0, then it means to search anywhere for COLUMN2). + * @rx_jettison: Count of received frames that are jettisoned because internal + * buffers are full. + * @rx_remote_fault: Maintains a count of the number of times that link + * transitioned from "up" to "down" due to a remote fault. + * + * XMAC Port Statistics. + */ +struct vxge_hw_xmac_port_stats { +/*0x000*/ u64 tx_ttl_frms; +/*0x008*/ u64 tx_ttl_octets; +/*0x010*/ u64 tx_data_octets; +/*0x018*/ u64 tx_mcast_frms; +/*0x020*/ u64 tx_bcast_frms; +/*0x028*/ u64 tx_ucast_frms; +/*0x030*/ u64 tx_tagged_frms; +/*0x038*/ u64 tx_vld_ip; +/*0x040*/ u64 tx_vld_ip_octets; +/*0x048*/ u64 tx_icmp; +/*0x050*/ u64 tx_tcp; +/*0x058*/ u64 tx_rst_tcp; +/*0x060*/ u64 tx_udp; +/*0x068*/ u32 tx_parse_error; +/*0x06c*/ u32 tx_unknown_protocol; +/*0x070*/ u64 tx_pause_ctrl_frms; +/*0x078*/ u32 tx_marker_pdu_frms; +/*0x07c*/ u32 tx_lacpdu_frms; +/*0x080*/ u32 tx_drop_ip; +/*0x084*/ u32 tx_marker_resp_pdu_frms; +/*0x088*/ u32 tx_xgmii_char2_match; +/*0x08c*/ u32 tx_xgmii_char1_match; +/*0x090*/ u32 tx_xgmii_column2_match; +/*0x094*/ u32 tx_xgmii_column1_match; +/*0x098*/ u32 unused1; +/*0x09c*/ u16 tx_any_err_frms; +/*0x09e*/ u16 tx_drop_frms; +/*0x0a0*/ u64 rx_ttl_frms; +/*0x0a8*/ u64 rx_vld_frms; +/*0x0b0*/ u64 rx_offload_frms; +/*0x0b8*/ u64 rx_ttl_octets; +/*0x0c0*/ u64 rx_data_octets; +/*0x0c8*/ u64 rx_offload_octets; +/*0x0d0*/ u64 rx_vld_mcast_frms; +/*0x0d8*/ u64 rx_vld_bcast_frms; +/*0x0e0*/ u64 rx_accepted_ucast_frms; +/*0x0e8*/ u64 rx_accepted_nucast_frms; +/*0x0f0*/ u64 rx_tagged_frms; +/*0x0f8*/ u64 rx_long_frms; +/*0x100*/ u64 rx_usized_frms; +/*0x108*/ u64 rx_osized_frms; +/*0x110*/ u64 rx_frag_frms; +/*0x118*/ u64 rx_jabber_frms; +/*0x120*/ u64 rx_ttl_64_frms; +/*0x128*/ u64 rx_ttl_65_127_frms; +/*0x130*/ u64 rx_ttl_128_255_frms; +/*0x138*/ u64 rx_ttl_256_511_frms; +/*0x140*/ u64 rx_ttl_512_1023_frms; +/*0x148*/ u64 rx_ttl_1024_1518_frms; +/*0x150*/ u64 rx_ttl_1519_4095_frms; +/*0x158*/ u64 rx_ttl_4096_8191_frms; +/*0x160*/ u64 rx_ttl_8192_max_frms; +/*0x168*/ u64 rx_ttl_gt_max_frms; +/*0x170*/ u64 rx_ip; +/*0x178*/ u64 rx_accepted_ip; +/*0x180*/ u64 rx_ip_octets; +/*0x188*/ u64 rx_err_ip; +/*0x190*/ u64 rx_icmp; +/*0x198*/ u64 rx_tcp; +/*0x1a0*/ u64 rx_udp; +/*0x1a8*/ u64 rx_err_tcp; +/*0x1b0*/ u64 rx_pause_count; +/*0x1b8*/ u64 rx_pause_ctrl_frms; +/*0x1c0*/ u64 rx_unsup_ctrl_frms; +/*0x1c8*/ u64 rx_fcs_err_frms; +/*0x1d0*/ u64 rx_in_rng_len_err_frms; +/*0x1d8*/ u64 rx_out_rng_len_err_frms; +/*0x1e0*/ u64 rx_drop_frms; +/*0x1e8*/ u64 rx_discarded_frms; +/*0x1f0*/ u64 rx_drop_ip; +/*0x1f8*/ u64 rx_drop_udp; +/*0x200*/ u32 rx_marker_pdu_frms; +/*0x204*/ u32 rx_lacpdu_frms; +/*0x208*/ u32 rx_unknown_pdu_frms; +/*0x20c*/ u32 rx_marker_resp_pdu_frms; +/*0x210*/ u32 rx_fcs_discard; +/*0x214*/ u32 rx_illegal_pdu_frms; +/*0x218*/ u32 rx_switch_discard; +/*0x21c*/ u32 rx_len_discard; +/*0x220*/ u32 rx_rpa_discard; +/*0x224*/ u32 rx_l2_mgmt_discard; +/*0x228*/ u32 rx_rts_discard; +/*0x22c*/ u32 rx_trash_discard; +/*0x230*/ u32 rx_buff_full_discard; +/*0x234*/ u32 rx_red_discard; +/*0x238*/ u32 rx_xgmii_ctrl_err_cnt; +/*0x23c*/ u32 rx_xgmii_data_err_cnt; +/*0x240*/ u32 rx_xgmii_char1_match; +/*0x244*/ u32 rx_xgmii_err_sym; +/*0x248*/ u32 rx_xgmii_column1_match; +/*0x24c*/ u32 rx_xgmii_char2_match; +/*0x250*/ u32 rx_local_fault; +/*0x254*/ u32 rx_xgmii_column2_match; +/*0x258*/ u32 rx_jettison; +/*0x25c*/ u32 rx_remote_fault; +} __packed; + +/** + * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics + * + * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames. + * @tx_ttl_eth_octets: Count of total octets of transmitted frames, + * not including framing characters (i.e. less framing bits). + * To determine the total octets of transmitted frames, including + * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to + * this stat (the device always prepends 8 bytes of preamble for + * each frame) + * @tx_data_octets: Count of data and padding octets of successfully transmitted + * frames. + * @tx_mcast_frms: Count of successfully transmitted frames to a group address + * other than the broadcast address. + * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast + * group address. + * @tx_ucast_frms: Count of transmitted frames containing a unicast address. + * Includes discarded frames that are not sent to the network. + * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag. + * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network. + * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that + * are passed to the network. + * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due + * to problems within ICMP. + * @tx_tcp: Count of transmitted TCP segments. Does not include segments + * containing retransmitted octets. + * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag. + * @tx_udp: Count of transmitted UDP datagrams. + * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol, + * such as a new IPv6 extension header, or an unsupported Routing + * Type. The packet still has a checksum calculated but it may be + * incorrect. + * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed + * to the network. Increments because of: 1) An internal processing + * error (such as an uncorrectable ECC error). 2) A frame parsing + * error during IP checksum calculation. + * @tx_parse_error: Increments when the TPA is unable to parse a packet. This + * generally occurs when a packet is corrupt somehow, including + * packets that have IP version mismatches, invalid Layer 2 control + * fields, etc. L3/L4 checksums are not offloaded, but the packet + * is still be transmitted. + * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count + * of transmitted TCP segments. Does not include segments containing + * retransmitted octets. + * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the + * total number of segments retransmitted. Retransmitted segments + * that are sourced by the host are counted by the host. + * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count + * of transmitted IP datagrams that could not be passed to the + * network. + * + * XMAC Vpath TX Statistics. + */ +struct vxge_hw_xmac_vpath_tx_stats { + u64 tx_ttl_eth_frms; + u64 tx_ttl_eth_octets; + u64 tx_data_octets; + u64 tx_mcast_frms; + u64 tx_bcast_frms; + u64 tx_ucast_frms; + u64 tx_tagged_frms; + u64 tx_vld_ip; + u64 tx_vld_ip_octets; + u64 tx_icmp; + u64 tx_tcp; + u64 tx_rst_tcp; + u64 tx_udp; + u32 tx_unknown_protocol; + u32 tx_lost_ip; + u32 unused1; + u32 tx_parse_error; + u64 tx_tcp_offload; + u64 tx_retx_tcp_offload; + u64 tx_lost_ip_offload; +} __packed; + +/** + * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics + * + * @rx_ttl_eth_frms: Count of successfully received MAC frames. + * @rx_vld_frms: Count of successfully received MAC frames. Does not include + * frames received with frame-too-long, FCS, or length errors. + * @rx_offload_frms: Count of offloaded received frames that are passed to + * the host. + * @rx_ttl_eth_octets: Count of total octets of received frames, not including + * framing characters (i.e. less framing bits). Only counts octets + * of frames that are at least 14 bytes (18 bytes for VLAN-tagged) + * before FCS. To determine the total octets of received frames, + * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and + * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames + * that have the required 8 bytes of preamble). + * @rx_data_octets: Count of data and padding octets of successfully received + * frames. Does not include frames received with frame-too-long, + * FCS, or length errors. + * @rx_offload_octets: Count of total octets, not including framing characters, + * of offloaded received frames that are passed to the host. + * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a + * nonbroadcast group address. Does not include frames received with + * frame-too-long, FCS, or length errors. + * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the + * broadcast group address. Does not include frames received with + * frame-too-long, FCS, or length errors. + * @rx_accepted_ucast_frms: Count of successfully received frames containing + * a unicast address. Only includes frames that are passed to the + * system. + * @rx_accepted_nucast_frms: Count of successfully received frames containing + * a non-unicast (broadcast or multicast) address. Only includes + * frames that are passed to the system. Could include, for instance, + * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG + * register is set to pass FCS-errored frames to the host. + * @rx_tagged_frms: Count of received frames containing a VLAN tag. + * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN + * + 18 bytes (+ 22 bytes if VLAN-tagged). + * @rx_usized_frms: Count of received frames of length (including FCS, but not + * framing bits) less than 64 octets, that are otherwise well-formed. + * In other words, counts runts. + * @rx_osized_frms: Count of received frames of length (including FCS, but not + * framing bits) more than 1518 octets, that are otherwise + * well-formed. + * @rx_frag_frms: Count of received frames of length (including FCS, but not + * framing bits) less than 64 octets that had bad FCS. + * In other words, counts fragments. + * @rx_jabber_frms: Count of received frames of length (including FCS, but not + * framing bits) more than 1518 octets that had bad FCS. In other + * words, counts jabbers. + * @rx_ttl_64_frms: Count of total received MAC frames with length (including + * FCS, but not framing bits) of exactly 64 octets. Includes frames + * received with frame-too-long, FCS, or length errors. + * @rx_ttl_65_127_frms: Count of total received MAC frames + * with length (including + * FCS, but not framing bits) of between 65 and 127 octets inclusive. + * Includes frames received with frame-too-long, FCS, + * or length errors. + * @rx_ttl_128_255_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) + * of between 128 and 255 octets + * inclusive. Includes frames received with frame-too-long, FCS, + * or length errors. + * @rx_ttl_256_511_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) + * of between 256 and 511 octets + * inclusive. Includes frames received with frame-too-long, FCS, or + * length errors. + * @rx_ttl_512_1023_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 512 and 1023 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 1024 and 1518 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 1519 and 4095 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 4096 and 8191 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_8192_max_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 8192 and + * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received + * with frame-too-long, FCS, or length errors. + * @rx_ttl_gt_max_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18 + * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames + * received with frame-too-long, FCS, or length errors. + * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams. + * @rx_accepted_ip: Count of received IP datagrams that + * are passed to the system. + * @rx_ip_octets: Count of number of octets in received IP datagrams. + * Includes errored IP datagrams. + * @rx_err_ip: Count of received IP datagrams containing errors. For example, + * bad IP checksum. + * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages. + * @rx_tcp: Count of received TCP segments. Includes errored TCP segments. + * Note: This stat contains a count of all received TCP segments, + * regardless of whether or not they pertain to an established + * connection. + * @rx_udp: Count of received UDP datagrams. + * @rx_err_tcp: Count of received TCP segments containing errors. For example, + * bad TCP checksum. + * @rx_lost_frms: Count of received frames that could not be passed to the host. + * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD + * for a list of reasons. + * @rx_lost_ip: Count of received IP datagrams that could not be passed to + * the host. See RX_LOST_FRMS for a list of reasons. + * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count + * of received IP datagrams that could not be passed to the host. + * See RX_LOST_FRMS for a list of reasons. + * @rx_various_discard: Count of received frames that are discarded because + * the target receive queue is full. + * @rx_sleep_discard: Count of received frames that are discarded because the + * target VPATH is asleep (a Wake-on-LAN magic packet can be used + * to awaken the VPATH). + * @rx_red_discard: Count of received frames that are discarded because of RED + * (Random Early Discard). + * @rx_queue_full_discard: Count of received frames that are discarded because + * the target receive queue is full. + * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks. + * + * XMAC Vpath RX Statistics. + */ +struct vxge_hw_xmac_vpath_rx_stats { + u64 rx_ttl_eth_frms; + u64 rx_vld_frms; + u64 rx_offload_frms; + u64 rx_ttl_eth_octets; + u64 rx_data_octets; + u64 rx_offload_octets; + u64 rx_vld_mcast_frms; + u64 rx_vld_bcast_frms; + u64 rx_accepted_ucast_frms; + u64 rx_accepted_nucast_frms; + u64 rx_tagged_frms; + u64 rx_long_frms; + u64 rx_usized_frms; + u64 rx_osized_frms; + u64 rx_frag_frms; + u64 rx_jabber_frms; + u64 rx_ttl_64_frms; + u64 rx_ttl_65_127_frms; + u64 rx_ttl_128_255_frms; + u64 rx_ttl_256_511_frms; + u64 rx_ttl_512_1023_frms; + u64 rx_ttl_1024_1518_frms; + u64 rx_ttl_1519_4095_frms; + u64 rx_ttl_4096_8191_frms; + u64 rx_ttl_8192_max_frms; + u64 rx_ttl_gt_max_frms; + u64 rx_ip; + u64 rx_accepted_ip; + u64 rx_ip_octets; + u64 rx_err_ip; + u64 rx_icmp; + u64 rx_tcp; + u64 rx_udp; + u64 rx_err_tcp; + u64 rx_lost_frms; + u64 rx_lost_ip; + u64 rx_lost_ip_offload; + u16 rx_various_discard; + u16 rx_sleep_discard; + u16 rx_red_discard; + u16 rx_queue_full_discard; + u64 rx_mpa_ok_frms; +} __packed; + +/** + * struct vxge_hw_xmac_stats - XMAC Statistics + * + * @aggr_stats: Statistics on aggregate port(port 0, port 1) + * @port_stats: Staticstics on ports(wire 0, wire 1, lag) + * @vpath_tx_stats: Per vpath XMAC TX stats + * @vpath_rx_stats: Per vpath XMAC RX stats + * + * XMAC Statistics. + */ +struct vxge_hw_xmac_stats { + struct vxge_hw_xmac_aggr_stats + aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID]; + struct vxge_hw_xmac_port_stats + port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1]; + struct vxge_hw_xmac_vpath_tx_stats + vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS]; + struct vxge_hw_xmac_vpath_rx_stats + vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS]; +}; + +/** + * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics. + * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block + * for the given VPATH + * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block + * @ini_num_cpl_rcvd: The number of PCI read completions received by the + * PIC block + * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC + * block to the host + * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by + * the PIC block + * @wrcrdtarb_xoff: TBD + * @rdcrdtarb_xoff: TBD + * @vpath_genstats_count0: TBD + * @vpath_genstats_count1: TBD + * @vpath_genstats_count2: TBD + * @vpath_genstats_count3: TBD + * @vpath_genstats_count4: TBD + * @vpath_gennstats_count5: TBD + * @tx_stats: Transmit stats + * @rx_stats: Receive stats + * @prog_event_vnum1: Programmable statistic. Increments when internal logic + * detects a certain event. See register + * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information. + * @prog_event_vnum0: Programmable statistic. Increments when internal logic + * detects a certain event. See register + * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information. + * @prog_event_vnum3: Programmable statistic. Increments when internal logic + * detects a certain event. See register + * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information. + * @prog_event_vnum2: Programmable statistic. Increments when internal logic + * detects a certain event. See register + * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information. + * @rx_multi_cast_frame_discard: TBD + * @rx_frm_transferred: TBD + * @rxd_returned: TBD + * @rx_mpa_len_fail_frms: Count of received frames + * that fail the MPA length check + * @rx_mpa_mrk_fail_frms: Count of received frames + * that fail the MPA marker check + * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check + * @rx_permitted_frms: Count of frames that pass through the FAU and on to the + * frame buffer (and subsequently to the host). + * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded + * because the VPATH is in reset + * @rx_wol_frms: Count of received "magic packet" frames. Stat increments + * whenever the received frame matches the VPATH's Wake-on-LAN + * signature(s) CRC. + * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded + * because the VPATH is in reset. Includes frames that are discarded + * because the current VPIN does not match that VPIN of the frame + * + * Titan vpath hardware statistics. + */ +struct vxge_hw_vpath_stats_hw_info { +/*0x000*/ u32 ini_num_mwr_sent; +/*0x004*/ u32 unused1; +/*0x008*/ u32 ini_num_mrd_sent; +/*0x00c*/ u32 unused2; +/*0x010*/ u32 ini_num_cpl_rcvd; +/*0x014*/ u32 unused3; +/*0x018*/ u64 ini_num_mwr_byte_sent; +/*0x020*/ u64 ini_num_cpl_byte_rcvd; +/*0x028*/ u32 wrcrdtarb_xoff; +/*0x02c*/ u32 unused4; +/*0x030*/ u32 rdcrdtarb_xoff; +/*0x034*/ u32 unused5; +/*0x038*/ u32 vpath_genstats_count0; +/*0x03c*/ u32 vpath_genstats_count1; +/*0x040*/ u32 vpath_genstats_count2; +/*0x044*/ u32 vpath_genstats_count3; +/*0x048*/ u32 vpath_genstats_count4; +/*0x04c*/ u32 unused6; +/*0x050*/ u32 vpath_genstats_count5; +/*0x054*/ u32 unused7; +/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats; +/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats; +/*0x220*/ u64 unused9; +/*0x228*/ u32 prog_event_vnum1; +/*0x22c*/ u32 prog_event_vnum0; +/*0x230*/ u32 prog_event_vnum3; +/*0x234*/ u32 prog_event_vnum2; +/*0x238*/ u16 rx_multi_cast_frame_discard; +/*0x23a*/ u8 unused10[6]; +/*0x240*/ u32 rx_frm_transferred; +/*0x244*/ u32 unused11; +/*0x248*/ u16 rxd_returned; +/*0x24a*/ u8 unused12[6]; +/*0x252*/ u16 rx_mpa_len_fail_frms; +/*0x254*/ u16 rx_mpa_mrk_fail_frms; +/*0x256*/ u16 rx_mpa_crc_fail_frms; +/*0x258*/ u16 rx_permitted_frms; +/*0x25c*/ u64 rx_vp_reset_discarded_frms; +/*0x25e*/ u64 rx_wol_frms; +/*0x260*/ u64 tx_vp_reset_discarded_frms; +} __packed; + + +/** + * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics. + * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated + * by the adapter that were discarded because the VPATH is out of service + * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the + * adapter that were discared because the VPATH is out of service + * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by + * the adapter that were discarded because the VPATH instance number does + * not match + * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated + * by the adapter that were discarded because the VPATH instance number + * does not match + * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer + * to the GENSTATS0_CFG for information on configuring this statistic + * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer + * to the GENSTATS1_CFG for information on configuring this statistic + * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer + * to the GENSTATS2_CFG for information on configuring this statistic + * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer + * to the GENSTATS3_CFG for information on configuring this statistic + * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer + * to the GENSTATS4_CFG for information on configuring this statistic + * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer + * to the GENSTATS5_CFG for information on configuring this statistic + * @pci.rstdrop_cpl 0x01c8 4 + * @pci.rstdrop_msg 0x01cc 4 + * @pci.rstdrop_client1 0x01d0 4 + * @pci.rstdrop_client0 0x01d4 4 + * @pci.rstdrop_client2 0x01d8 4 + * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion + * header credits were depleted + * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted + * header credits were depleted + * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted + * header credits were depleted + * @pci.depl_cplh[vplane1] 0x01ea 2 + * @pci.depl_nph[vplane1] 0x01ec 2 + * @pci.depl_ph[vplane1] 0x01ee 2 + * @pci.depl_cplh[vplane2] 0x01f2 2 + * @pci.depl_nph[vplane2] 0x01f4 2 + * @pci.depl_ph[vplane2] 0x01f6 2 + * @pci.depl_cplh[vplane3] 0x01fa 2 + * @pci.depl_nph[vplane3] 0x01fc 2 + * @pci.depl_ph[vplane3] 0x01fe 2 + * @pci.depl_cplh[vplane4] 0x0202 2 + * @pci.depl_nph[vplane4] 0x0204 2 + * @pci.depl_ph[vplane4] 0x0206 2 + * @pci.depl_cplh[vplane5] 0x020a 2 + * @pci.depl_nph[vplane5] 0x020c 2 + * @pci.depl_ph[vplane5] 0x020e 2 + * @pci.depl_cplh[vplane6] 0x0212 2 + * @pci.depl_nph[vplane6] 0x0214 2 + * @pci.depl_ph[vplane6] 0x0216 2 + * @pci.depl_cplh[vplane7] 0x021a 2 + * @pci.depl_nph[vplane7] 0x021c 2 + * @pci.depl_ph[vplane7] 0x021e 2 + * @pci.depl_cplh[vplane8] 0x0222 2 + * @pci.depl_nph[vplane8] 0x0224 2 + * @pci.depl_ph[vplane8] 0x0226 2 + * @pci.depl_cplh[vplane9] 0x022a 2 + * @pci.depl_nph[vplane9] 0x022c 2 + * @pci.depl_ph[vplane9] 0x022e 2 + * @pci.depl_cplh[vplane10] 0x0232 2 + * @pci.depl_nph[vplane10] 0x0234 2 + * @pci.depl_ph[vplane10] 0x0236 2 + * @pci.depl_cplh[vplane11] 0x023a 2 + * @pci.depl_nph[vplane11] 0x023c 2 + * @pci.depl_ph[vplane11] 0x023e 2 + * @pci.depl_cplh[vplane12] 0x0242 2 + * @pci.depl_nph[vplane12] 0x0244 2 + * @pci.depl_ph[vplane12] 0x0246 2 + * @pci.depl_cplh[vplane13] 0x024a 2 + * @pci.depl_nph[vplane13] 0x024c 2 + * @pci.depl_ph[vplane13] 0x024e 2 + * @pci.depl_cplh[vplane14] 0x0252 2 + * @pci.depl_nph[vplane14] 0x0254 2 + * @pci.depl_ph[vplane14] 0x0256 2 + * @pci.depl_cplh[vplane15] 0x025a 2 + * @pci.depl_nph[vplane15] 0x025c 2 + * @pci.depl_ph[vplane15] 0x025e 2 + * @pci.depl_cplh[vplane16] 0x0262 2 + * @pci.depl_nph[vplane16] 0x0264 2 + * @pci.depl_ph[vplane16] 0x0266 2 + * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data + * credits were depleted + * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data + * credits were depleted + * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data + * credits were depleted + * @pci.depl_cpld[vplane1] 0x0272 2 + * @pci.depl_npd[vplane1] 0x0274 2 + * @pci.depl_pd[vplane1] 0x0276 2 + * @pci.depl_cpld[vplane2] 0x027a 2 + * @pci.depl_npd[vplane2] 0x027c 2 + * @pci.depl_pd[vplane2] 0x027e 2 + * @pci.depl_cpld[vplane3] 0x0282 2 + * @pci.depl_npd[vplane3] 0x0284 2 + * @pci.depl_pd[vplane3] 0x0286 2 + * @pci.depl_cpld[vplane4] 0x028a 2 + * @pci.depl_npd[vplane4] 0x028c 2 + * @pci.depl_pd[vplane4] 0x028e 2 + * @pci.depl_cpld[vplane5] 0x0292 2 + * @pci.depl_npd[vplane5] 0x0294 2 + * @pci.depl_pd[vplane5] 0x0296 2 + * @pci.depl_cpld[vplane6] 0x029a 2 + * @pci.depl_npd[vplane6] 0x029c 2 + * @pci.depl_pd[vplane6] 0x029e 2 + * @pci.depl_cpld[vplane7] 0x02a2 2 + * @pci.depl_npd[vplane7] 0x02a4 2 + * @pci.depl_pd[vplane7] 0x02a6 2 + * @pci.depl_cpld[vplane8] 0x02aa 2 + * @pci.depl_npd[vplane8] 0x02ac 2 + * @pci.depl_pd[vplane8] 0x02ae 2 + * @pci.depl_cpld[vplane9] 0x02b2 2 + * @pci.depl_npd[vplane9] 0x02b4 2 + * @pci.depl_pd[vplane9] 0x02b6 2 + * @pci.depl_cpld[vplane10] 0x02ba 2 + * @pci.depl_npd[vplane10] 0x02bc 2 + * @pci.depl_pd[vplane10] 0x02be 2 + * @pci.depl_cpld[vplane11] 0x02c2 2 + * @pci.depl_npd[vplane11] 0x02c4 2 + * @pci.depl_pd[vplane11] 0x02c6 2 + * @pci.depl_cpld[vplane12] 0x02ca 2 + * @pci.depl_npd[vplane12] 0x02cc 2 + * @pci.depl_pd[vplane12] 0x02ce 2 + * @pci.depl_cpld[vplane13] 0x02d2 2 + * @pci.depl_npd[vplane13] 0x02d4 2 + * @pci.depl_pd[vplane13] 0x02d6 2 + * @pci.depl_cpld[vplane14] 0x02da 2 + * @pci.depl_npd[vplane14] 0x02dc 2 + * @pci.depl_pd[vplane14] 0x02de 2 + * @pci.depl_cpld[vplane15] 0x02e2 2 + * @pci.depl_npd[vplane15] 0x02e4 2 + * @pci.depl_pd[vplane15] 0x02e6 2 + * @pci.depl_cpld[vplane16] 0x02ea 2 + * @pci.depl_npd[vplane16] 0x02ec 2 + * @pci.depl_pd[vplane16] 0x02ee 2 + * @xgmac_port[3]; + * @xgmac_aggr[2]; + * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic. + * Increments when internal logic detects a certain event. See register + * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information. + * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic. + * Increments when internal logic detects a certain event. See register + * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information. + * @xgmac.orp_lro_events 0x0af8 8 + * @xgmac.orp_bs_events 0x0b00 8 + * @xgmac.orp_iwarp_events 0x0b08 8 + * @xgmac.tx_permitted_frms 0x0b14 4 + * @xgmac.port2_tx_any_frms 0x0b1d 1 + * @xgmac.port1_tx_any_frms 0x0b1e 1 + * @xgmac.port0_tx_any_frms 0x0b1f 1 + * @xgmac.port2_rx_any_frms 0x0b25 1 + * @xgmac.port1_rx_any_frms 0x0b26 1 + * @xgmac.port0_rx_any_frms 0x0b27 1 + * + * Titan mrpcim hardware statistics. + */ +struct vxge_hw_device_stats_mrpcim_info { +/*0x0000*/ u32 pic_ini_rd_drop; +/*0x0004*/ u32 pic_ini_wr_drop; +/*0x0008*/ struct { + /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted; + /*0x0004*/ u32 unused1; + } pic_wrcrdtarb_ph_crdt_depleted_vplane[17]; +/*0x0090*/ struct { + /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted; + /*0x0004*/ u32 unused2; + } pic_wrcrdtarb_pd_crdt_depleted_vplane[17]; +/*0x0118*/ struct { + /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted; + /*0x0004*/ u32 unused3; + } pic_rdcrdtarb_nph_crdt_depleted_vplane[17]; +/*0x01a0*/ u32 pic_ini_rd_vpin_drop; +/*0x01a4*/ u32 pic_ini_wr_vpin_drop; +/*0x01a8*/ u32 pic_genstats_count0; +/*0x01ac*/ u32 pic_genstats_count1; +/*0x01b0*/ u32 pic_genstats_count2; +/*0x01b4*/ u32 pic_genstats_count3; +/*0x01b8*/ u32 pic_genstats_count4; +/*0x01bc*/ u32 unused4; +/*0x01c0*/ u32 pic_genstats_count5; +/*0x01c4*/ u32 unused5; +/*0x01c8*/ u32 pci_rstdrop_cpl; +/*0x01cc*/ u32 pci_rstdrop_msg; +/*0x01d0*/ u32 pci_rstdrop_client1; +/*0x01d4*/ u32 pci_rstdrop_client0; +/*0x01d8*/ u32 pci_rstdrop_client2; +/*0x01dc*/ u32 unused6; +/*0x01e0*/ struct { + /*0x0000*/ u16 unused7; + /*0x0002*/ u16 pci_depl_cplh; + /*0x0004*/ u16 pci_depl_nph; + /*0x0006*/ u16 pci_depl_ph; + } pci_depl_h_vplane[17]; +/*0x0268*/ struct { + /*0x0000*/ u16 unused8; + /*0x0002*/ u16 pci_depl_cpld; + /*0x0004*/ u16 pci_depl_npd; + /*0x0006*/ u16 pci_depl_pd; + } pci_depl_d_vplane[17]; +/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3]; +/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2]; +/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0; +/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1; +/*0x0af0*/ u64 unused7; +/*0x0af8*/ u64 unused8; +/*0x0b00*/ u64 unused9; +/*0x0b08*/ u64 unused10; +/*0x0b10*/ u32 unused11; +/*0x0b14*/ u32 xgmac_tx_permitted_frms; +/*0x0b18*/ u32 unused12; +/*0x0b1c*/ u8 unused13; +/*0x0b1d*/ u8 xgmac_port2_tx_any_frms; +/*0x0b1e*/ u8 xgmac_port1_tx_any_frms; +/*0x0b1f*/ u8 xgmac_port0_tx_any_frms; +/*0x0b20*/ u32 unused14; +/*0x0b24*/ u8 unused15; +/*0x0b25*/ u8 xgmac_port2_rx_any_frms; +/*0x0b26*/ u8 xgmac_port1_rx_any_frms; +/*0x0b27*/ u8 xgmac_port0_rx_any_frms; +} __packed; + +/** + * struct vxge_hw_device_stats_hw_info - Titan hardware statistics. + * @vpath_info: VPath statistics + * @vpath_info_sav: Vpath statistics saved + * + * Titan hardware statistics. + */ +struct vxge_hw_device_stats_hw_info { + struct vxge_hw_vpath_stats_hw_info + *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS]; + struct vxge_hw_vpath_stats_hw_info + vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS]; +}; + +/** + * struct vxge_hw_vpath_stats_sw_common_info - HW common + * statistics for queues. + * @full_cnt: Number of times the queue was full + * @usage_cnt: usage count. + * @usage_max: Maximum usage + * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage. + * @total_compl_cnt: Total descriptor completion count. + * + * Hw queue counters + * See also: struct vxge_hw_vpath_stats_sw_fifo_info{}, + * struct vxge_hw_vpath_stats_sw_ring_info{}, + */ +struct vxge_hw_vpath_stats_sw_common_info { + u32 full_cnt; + u32 usage_cnt; + u32 usage_max; + u32 reserve_free_swaps_cnt; + u32 total_compl_cnt; +}; + +/** + * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics + * @common_stats: Common counters for all queues + * @total_posts: Total number of postings on the queue. + * @total_buffers: Total number of buffers posted. + * @txd_t_code_err_cnt: Array of transmit transfer codes. The position + * (index) in this array reflects the transfer code type, for instance + * 0xA - "loss of link". + * Value txd_t_code_err_cnt[i] reflects the + * number of times the corresponding transfer code was encountered. + * + * HW fifo counters + * See also: struct vxge_hw_vpath_stats_sw_common_info{}, + * struct vxge_hw_vpath_stats_sw_ring_info{}, + */ +struct vxge_hw_vpath_stats_sw_fifo_info { + struct vxge_hw_vpath_stats_sw_common_info common_stats; + u32 total_posts; + u32 total_buffers; + u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE]; +}; + +/** + * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics + * @common_stats: Common counters for all queues + * @rxd_t_code_err_cnt: Array of receive transfer codes. The position + * (index) in this array reflects the transfer code type, + * for instance + * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC. + * Value rxd_t_code_err_cnt[i] reflects the + * number of times the corresponding transfer code was encountered. + * + * HW ring counters + * See also: struct vxge_hw_vpath_stats_sw_common_info{}, + * struct vxge_hw_vpath_stats_sw_fifo_info{}, + */ +struct vxge_hw_vpath_stats_sw_ring_info { + struct vxge_hw_vpath_stats_sw_common_info common_stats; + u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE]; + +}; + +/** + * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics + * @unknown_alarms: + * @network_sustained_fault: + * @network_sustained_ok: + * @kdfcctl_fifo0_overwrite: + * @kdfcctl_fifo0_poison: + * @kdfcctl_fifo0_dma_error: + * @dblgen_fifo0_overflow: + * @statsb_pif_chain_error: + * @statsb_drop_timeout: + * @target_illegal_access: + * @ini_serr_det: + * @prc_ring_bumps: + * @prc_rxdcm_sc_err: + * @prc_rxdcm_sc_abort: + * @prc_quanta_size_err: + * + * HW vpath error statistics + */ +struct vxge_hw_vpath_stats_sw_err { + u32 unknown_alarms; + u32 network_sustained_fault; + u32 network_sustained_ok; + u32 kdfcctl_fifo0_overwrite; + u32 kdfcctl_fifo0_poison; + u32 kdfcctl_fifo0_dma_error; + u32 dblgen_fifo0_overflow; + u32 statsb_pif_chain_error; + u32 statsb_drop_timeout; + u32 target_illegal_access; + u32 ini_serr_det; + u32 prc_ring_bumps; + u32 prc_rxdcm_sc_err; + u32 prc_rxdcm_sc_abort; + u32 prc_quanta_size_err; +}; + +/** + * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics + * @soft_reset_cnt: Number of times soft reset is done on this vpath. + * @error_stats: error counters for the vpath + * @ring_stats: counters for ring belonging to the vpath + * @fifo_stats: counters for fifo belonging to the vpath + * + * HW vpath sw statistics + * See also: struct vxge_hw_device_info{} }. + */ +struct vxge_hw_vpath_stats_sw_info { + u32 soft_reset_cnt; + struct vxge_hw_vpath_stats_sw_err error_stats; + struct vxge_hw_vpath_stats_sw_ring_info ring_stats; + struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats; +}; + +/** + * struct vxge_hw_device_stats_sw_info - HW own per-device statistics. + * + * @not_traffic_intr_cnt: Number of times the host was interrupted + * without new completions. + * "Non-traffic interrupt counter". + * @traffic_intr_cnt: Number of traffic interrupts for the device. + * @total_intr_cnt: Total number of traffic interrupts for the device. + * @total_intr_cnt == @traffic_intr_cnt + + * @not_traffic_intr_cnt + * @soft_reset_cnt: Number of times soft reset is done on this device. + * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{} + * HW per-device statistics. + */ +struct vxge_hw_device_stats_sw_info { + u32 not_traffic_intr_cnt; + u32 traffic_intr_cnt; + u32 total_intr_cnt; + u32 soft_reset_cnt; + struct vxge_hw_vpath_stats_sw_info + vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS]; +}; + +/** + * struct vxge_hw_device_stats_sw_err - HW device error statistics. + * @vpath_alarms: Number of vpath alarms + * + * HW Device error stats + */ +struct vxge_hw_device_stats_sw_err { + u32 vpath_alarms; +}; + +/** + * struct vxge_hw_device_stats - Contains HW per-device statistics, + * including hw. + * @devh: HW device handle. + * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats. + * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory + * space. + * @hw_info_dma_acch: One more DMA handle used subsequently to free the + * DMA object. Note that this and the previous handle have + * physical meaning for Solaris; on Windows and Linux the + * corresponding value will be simply pointer to PCI device. + * + * @hw_dev_info_stats: Titan statistics maintained by the hardware. + * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number + * of completions per interrupt. + * @sw_dev_err_stats: HW's "soft" device error statistics. + * + * Structure-container of HW per-device statistics. Note that per-channel + * statistics are kept in separate structures under HW's fifo and ring + * channels. + */ +struct vxge_hw_device_stats { + /* handles */ + struct __vxge_hw_device *devh; + + /* HW device hardware statistics */ + struct vxge_hw_device_stats_hw_info hw_dev_info_stats; + + /* HW device "soft" stats */ + struct vxge_hw_device_stats_sw_err sw_dev_err_stats; + struct vxge_hw_device_stats_sw_info sw_dev_info_stats; + +}; + +enum vxge_hw_status vxge_hw_device_hw_stats_enable( + struct __vxge_hw_device *devh); + +enum vxge_hw_status vxge_hw_device_stats_get( + struct __vxge_hw_device *devh, + struct vxge_hw_device_stats_hw_info *hw_stats); + +enum vxge_hw_status vxge_hw_driver_stats_get( + struct __vxge_hw_device *devh, + struct vxge_hw_device_stats_sw_info *sw_stats); + +enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh); + +enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh); + +enum vxge_hw_status +vxge_hw_mrpcim_stats_access( + struct __vxge_hw_device *devh, + u32 operation, + u32 location, + u32 offset, + u64 *stat); + +enum vxge_hw_status +vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port, + struct vxge_hw_xmac_aggr_stats *aggr_stats); + +enum vxge_hw_status +vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port, + struct vxge_hw_xmac_port_stats *port_stats); + +enum vxge_hw_status +vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh, + struct vxge_hw_xmac_stats *xmac_stats); + +/** + * enum enum vxge_hw_mgmt_reg_type - Register types. + * + * @vxge_hw_mgmt_reg_type_legacy: Legacy registers + * @vxge_hw_mgmt_reg_type_toc: TOC Registers + * @vxge_hw_mgmt_reg_type_common: Common Registers + * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers + * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers + * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers + * @vxge_hw_mgmt_reg_type_vpath: vpath registers + * + * Register type enumaration + */ +enum vxge_hw_mgmt_reg_type { + vxge_hw_mgmt_reg_type_legacy = 0, + vxge_hw_mgmt_reg_type_toc = 1, + vxge_hw_mgmt_reg_type_common = 2, + vxge_hw_mgmt_reg_type_mrpcim = 3, + vxge_hw_mgmt_reg_type_srpcim = 4, + vxge_hw_mgmt_reg_type_vpmgmt = 5, + vxge_hw_mgmt_reg_type_vpath = 6 +}; + +enum vxge_hw_status +vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh, + enum vxge_hw_mgmt_reg_type type, + u32 index, + u32 offset, + u64 *value); + +enum vxge_hw_status +vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh, + enum vxge_hw_mgmt_reg_type type, + u32 index, + u32 offset, + u64 value); + +/** + * enum enum vxge_hw_rxd_state - Descriptor (RXD) state. + * @VXGE_HW_RXD_STATE_NONE: Invalid state. + * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation. + * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the + * device. + * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for + * filling-in and posting later. + * + * Titan/HW descriptor states. + * + */ +enum vxge_hw_rxd_state { + VXGE_HW_RXD_STATE_NONE = 0, + VXGE_HW_RXD_STATE_AVAIL = 1, + VXGE_HW_RXD_STATE_POSTED = 2, + VXGE_HW_RXD_STATE_FREED = 3 +}; + +/** + * struct vxge_hw_ring_rxd_info - Extended information associated with a + * completed ring descriptor. + * @syn_flag: SYN flag + * @is_icmp: Is ICMP + * @fast_path_eligible: Fast Path Eligible flag + * @l3_cksum: in L3 checksum is valid + * @l3_cksum: Result of IP checksum check (by Titan hardware). + * This field containing VXGE_HW_L3_CKSUM_OK would mean that + * the checksum is correct, otherwise - the datagram is + * corrupted. + * @l4_cksum: in L4 checksum is valid + * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware). + * This field containing VXGE_HW_L4_CKSUM_OK would mean that + * the checksum is correct. Otherwise - the packet is + * corrupted. + * @frame: Zero or more of enum vxge_hw_frame_type flags. + * See enum vxge_hw_frame_type{}. + * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for + * various higher-layer protocols, including (but note restricted to) + * TCP and UDP. See enum vxge_hw_frame_proto{}. + * @is_vlan: If vlan tag is valid + * @vlan: VLAN tag extracted from the received frame. + * @rth_bucket: RTH bucket + * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware + * has a matching entry in the Indirection table. + * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware + * has a matching entry in the Socket Pair Direct Match table. + * @rth_hash_type: RTH hash code of the function used to calculate the hash. + * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan + * hardware if RTH is enabled. + */ +struct vxge_hw_ring_rxd_info { + u32 syn_flag; + u32 is_icmp; + u32 fast_path_eligible; + u32 l3_cksum_valid; + u32 l3_cksum; + u32 l4_cksum_valid; + u32 l4_cksum; + u32 frame; + u32 proto; + u32 is_vlan; + u32 vlan; + u32 rth_bucket; + u32 rth_it_hit; + u32 rth_spdm_hit; + u32 rth_hash_type; + u32 rth_value; +}; + +/** + * enum enum vxge_hw_ring_hash_type - RTH hash types + * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash + * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4 + * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4 + * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4 + * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6 + * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6 + * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6 + * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension + * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension + * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension + * + * RTH hash types + */ +enum vxge_hw_ring_hash_type { + VXGE_HW_RING_HASH_TYPE_NONE = 0x0, + VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1, + VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2, + VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3, + VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4, + VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5, + VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6, + VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7, + VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8, + VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9 +}; + +enum vxge_hw_status vxge_hw_ring_rxd_reserve( + struct __vxge_hw_ring *ring_handle, + void **rxdh); + +void +vxge_hw_ring_rxd_pre_post( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +void +vxge_hw_ring_rxd_post_post( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +enum vxge_hw_status +vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag); + +void +vxge_hw_ring_rxd_post_post_wmb( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +void vxge_hw_ring_rxd_post( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +enum vxge_hw_status vxge_hw_ring_rxd_next_completed( + struct __vxge_hw_ring *ring_handle, + void **rxdh, + u8 *t_code); + +enum vxge_hw_status vxge_hw_ring_handle_tcode( + struct __vxge_hw_ring *ring_handle, + void *rxdh, + u8 t_code); + +void vxge_hw_ring_rxd_free( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +/** + * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols. + * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN. + * @VXGE_HW_FRAME_PROTO_IPV4: IPv4. + * @VXGE_HW_FRAME_PROTO_IPV6: IPv6. + * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented. + * @VXGE_HW_FRAME_PROTO_TCP: TCP. + * @VXGE_HW_FRAME_PROTO_UDP: UDP. + * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP. + * + * Higher layer ethernet protocols and options. + */ +enum vxge_hw_frame_proto { + VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80, + VXGE_HW_FRAME_PROTO_IPV4 = 0x10, + VXGE_HW_FRAME_PROTO_IPV6 = 0x08, + VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04, + VXGE_HW_FRAME_PROTO_TCP = 0x02, + VXGE_HW_FRAME_PROTO_UDP = 0x01, + VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \ + VXGE_HW_FRAME_PROTO_UDP) +}; + +/** + * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD + * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL + * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL + * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL + * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL. + * + * These gather codes are used to indicate the position of a TxD in a TxD list + */ +enum vxge_hw_fifo_gather_code { + VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2, + VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0, + VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1, + VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3 +}; + +/** + * enum enum vxge_hw_fifo_tcode - tcodes used in fifo + * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK + * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or + * frame data) returned with corrupt data. + * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned + * with no data. + * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a + * frame or LSO MSS that was too long (>9800B). + * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send + * Offload operation, due to improper header template, + * unsupported protocol, etc. + * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused + * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple + * data buffer transfer errors are encountered (see below). + * Otherwise it is set to 0. + * + * These tcodes are returned in various API for TxD status + */ +enum vxge_hw_fifo_tcode { + VXGE_HW_FIFO_T_CODE_OK = 0x0, + VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1, + VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2, + VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3, + VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4, + VXGE_HW_FIFO_T_CODE_UNUSED = 0x7, + VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8 +}; + +enum vxge_hw_status vxge_hw_fifo_txdl_reserve( + struct __vxge_hw_fifo *fifoh, + void **txdlh, + void **txdl_priv); + +void vxge_hw_fifo_txdl_buffer_set( + struct __vxge_hw_fifo *fifo_handle, + void *txdlh, + u32 frag_idx, + dma_addr_t dma_pointer, + u32 size); + +void vxge_hw_fifo_txdl_post( + struct __vxge_hw_fifo *fifo_handle, + void *txdlh); + +u32 vxge_hw_fifo_free_txdl_count_get( + struct __vxge_hw_fifo *fifo_handle); + +enum vxge_hw_status vxge_hw_fifo_txdl_next_completed( + struct __vxge_hw_fifo *fifoh, + void **txdlh, + enum vxge_hw_fifo_tcode *t_code); + +enum vxge_hw_status vxge_hw_fifo_handle_tcode( + struct __vxge_hw_fifo *fifoh, + void *txdlh, + enum vxge_hw_fifo_tcode t_code); + +void vxge_hw_fifo_txdl_free( + struct __vxge_hw_fifo *fifoh, + void *txdlh); + +/* + * Device + */ + +#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8) +#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16) +#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64 + +/* + * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data. + * @dma_addr: DMA (mapped) address of _this_ descriptor. + * @dma_handle: DMA handle used to map the descriptor onto device. + * @dma_offset: Descriptor's offset in the memory block. HW allocates + * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE + * bytes. Each memblock is contiguous DMA-able memory. Each + * memblock contains 1 or more 4KB RxD blocks visible to the + * Titan hardware. + * @dma_object: DMA address and handle of the memory block that contains + * the descriptor. This member is used only in the "checked" + * version of the HW (to enforce certain assertions); + * otherwise it gets compiled out. + * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage. + * + * Per-receive decsriptor HW-private data. HW uses the space to keep DMA + * information associated with the descriptor. Note that driver can ask HW + * to allocate additional per-descriptor space for its own (driver-specific) + * purposes. + */ +struct __vxge_hw_ring_rxd_priv { + dma_addr_t dma_addr; + struct pci_dev *dma_handle; + ptrdiff_t dma_offset; +#ifdef VXGE_DEBUG_ASSERT + struct vxge_hw_mempool_dma *dma_object; +#endif +}; + +/* ========================= RING PRIVATE API ============================= */ +u64 +__vxge_hw_ring_first_block_address_get( + struct __vxge_hw_ring *ringh); + +enum vxge_hw_status +__vxge_hw_ring_create( + struct __vxge_hw_vpath_handle *vpath_handle, + struct vxge_hw_ring_attr *attr); + +enum vxge_hw_status +__vxge_hw_ring_abort( + struct __vxge_hw_ring *ringh); + +enum vxge_hw_status +__vxge_hw_ring_reset( + struct __vxge_hw_ring *ringh); + +enum vxge_hw_status +__vxge_hw_ring_delete( + struct __vxge_hw_vpath_handle *vpath_handle); + +/* ========================= FIFO PRIVATE API ============================= */ + +struct vxge_hw_fifo_attr; + +enum vxge_hw_status +__vxge_hw_fifo_create( + struct __vxge_hw_vpath_handle *vpath_handle, + struct vxge_hw_fifo_attr *attr); + +enum vxge_hw_status +__vxge_hw_fifo_abort( + struct __vxge_hw_fifo *fifoh); + +enum vxge_hw_status +__vxge_hw_fifo_reset( + struct __vxge_hw_fifo *ringh); + +enum vxge_hw_status +__vxge_hw_fifo_delete( + struct __vxge_hw_vpath_handle *vpath_handle); + +struct vxge_hw_mempool_cbs { + void (*item_func_alloc)( + struct vxge_hw_mempool *mempoolh, + u32 memblock_index, + struct vxge_hw_mempool_dma *dma_object, + u32 index, + u32 is_last); +}; + +void +__vxge_hw_mempool_destroy( + struct vxge_hw_mempool *mempool); + +#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \ + ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next) + +enum vxge_hw_status +__vxge_hw_vpath_rts_table_get( + struct __vxge_hw_vpath_handle *vpath_handle, + u32 action, + u32 rts_table, + u32 offset, + u64 *data1, + u64 *data2); + +enum vxge_hw_status +__vxge_hw_vpath_rts_table_set( + struct __vxge_hw_vpath_handle *vpath_handle, + u32 action, + u32 rts_table, + u32 offset, + u64 data1, + u64 data2); + +enum vxge_hw_status +__vxge_hw_vpath_reset( + struct __vxge_hw_device *devh, + u32 vp_id); + +enum vxge_hw_status +__vxge_hw_vpath_sw_reset( + struct __vxge_hw_device *devh, + u32 vp_id); + +enum vxge_hw_status +__vxge_hw_vpath_enable( + struct __vxge_hw_device *devh, + u32 vp_id); + +void +__vxge_hw_vpath_prc_configure( + struct __vxge_hw_device *devh, + u32 vp_id); + +enum vxge_hw_status +__vxge_hw_vpath_kdfc_configure( + struct __vxge_hw_device *devh, + u32 vp_id); + +enum vxge_hw_status +__vxge_hw_vpath_mac_configure( + struct __vxge_hw_device *devh, + u32 vp_id); + +enum vxge_hw_status +__vxge_hw_vpath_tim_configure( + struct __vxge_hw_device *devh, + u32 vp_id); + +enum vxge_hw_status +__vxge_hw_vpath_initialize( + struct __vxge_hw_device *devh, + u32 vp_id); + +enum vxge_hw_status +__vxge_hw_vp_initialize( + struct __vxge_hw_device *devh, + u32 vp_id, + struct vxge_hw_vp_config *config); + +void +__vxge_hw_vp_terminate( + struct __vxge_hw_device *devh, + u32 vp_id); + +enum vxge_hw_status +__vxge_hw_vpath_alarm_process( + struct __vxge_hw_virtualpath *vpath, + u32 skip_alarms); + +void vxge_hw_device_intr_enable( + struct __vxge_hw_device *devh); + +u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode); + +void vxge_hw_device_intr_disable( + struct __vxge_hw_device *devh); + +void vxge_hw_device_mask_all( + struct __vxge_hw_device *devh); + +void vxge_hw_device_unmask_all( + struct __vxge_hw_device *devh); + +enum vxge_hw_status vxge_hw_device_begin_irq( + struct __vxge_hw_device *devh, + u32 skip_alarms, + u64 *reason); + +void vxge_hw_device_clear_tx_rx( + struct __vxge_hw_device *devh); + +/* + * Virtual Paths + */ + +u32 vxge_hw_vpath_id( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_vpath_mac_addr_add_mode { + VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0, + VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1, + VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2 +}; + +enum vxge_hw_status +vxge_hw_vpath_mac_addr_add( + struct __vxge_hw_vpath_handle *vpath_handle, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN], + enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); + +enum vxge_hw_status +vxge_hw_vpath_mac_addr_get( + struct __vxge_hw_vpath_handle *vpath_handle, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]); + +enum vxge_hw_status +vxge_hw_vpath_mac_addr_get_next( + struct __vxge_hw_vpath_handle *vpath_handle, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]); + +enum vxge_hw_status +vxge_hw_vpath_mac_addr_delete( + struct __vxge_hw_vpath_handle *vpath_handle, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]); + +enum vxge_hw_status +vxge_hw_vpath_vid_add( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 vid); + +enum vxge_hw_status +vxge_hw_vpath_vid_get( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 *vid); + +enum vxge_hw_status +vxge_hw_vpath_vid_get_next( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 *vid); + +enum vxge_hw_status +vxge_hw_vpath_vid_delete( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 vid); + +enum vxge_hw_status +vxge_hw_vpath_etype_add( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 etype); + +enum vxge_hw_status +vxge_hw_vpath_etype_get( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 *etype); + +enum vxge_hw_status +vxge_hw_vpath_etype_get_next( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 *etype); + +enum vxge_hw_status +vxge_hw_vpath_etype_delete( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 etype); + +enum vxge_hw_status vxge_hw_vpath_promisc_enable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_promisc_disable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_bcast_enable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_mcast_enable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_mcast_disable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_poll_rx( + struct __vxge_hw_ring *ringh); + +enum vxge_hw_status vxge_hw_vpath_poll_tx( + struct __vxge_hw_fifo *fifoh, + void **skb_ptr); + +enum vxge_hw_status vxge_hw_vpath_alarm_process( + struct __vxge_hw_vpath_handle *vpath_handle, + u32 skip_alarms); + +enum vxge_hw_status +vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle, + int *tim_msix_id, int alarm_msix_id); + +void +vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, + int msix_id); + +void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); + +void +vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle, + int msix_id); + +void +vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle, + int msix_id); + +void +vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_intr_enable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_intr_disable( + struct __vxge_hw_vpath_handle *vpath_handle); + +void vxge_hw_vpath_inta_mask_tx_rx( + struct __vxge_hw_vpath_handle *vpath_handle); + +void vxge_hw_vpath_inta_unmask_tx_rx( + struct __vxge_hw_vpath_handle *vpath_handle); + +void +vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id); + +void +vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); + +enum vxge_hw_status +vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh); + +void +vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh); + +void +vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, + void **dtrh); + +void +vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel); + +void +vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh); + +int +vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); + +/* ========================== PRIVATE API ================================= */ + +enum vxge_hw_status +__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev); + +enum vxge_hw_status +__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev); + +enum vxge_hw_status +__vxge_hw_device_handle_error( + struct __vxge_hw_device *hldev, + u32 vp_id, + enum vxge_hw_event type); + +#endif diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h new file mode 100644 index 00000000000..7da02c545ed --- /dev/null +++ b/drivers/net/vxge/vxge-version.h @@ -0,0 +1,23 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-version.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2009 Neterion Inc. + ******************************************************************************/ +#ifndef VXGE_VERSION_H + +#define VXGE_VERSION_H + +#define VXGE_VERSION_MAJOR "2" +#define VXGE_VERSION_MINOR "0" +#define VXGE_VERSION_FIX "1" +#define VXGE_VERSION_BUILD "17129" +#define VXGE_VERSION_FOR "k" +#endif diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c index 032db815b0f..f3492110b1a 100644 --- a/drivers/parport/parport_serial.c +++ b/drivers/parport/parport_serial.c @@ -30,6 +30,7 @@ enum parport_pc_pci_cards { titan_210l, netmos_9xx5_combo, netmos_9855, + netmos_9855_2p, avlab_1s1p, avlab_1s2p, avlab_2s1p, @@ -62,7 +63,7 @@ struct parport_pc_pci { struct parport_pc_pci *card, int failed); }; -static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *card, int autoirq, int autodma) +static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *par, int autoirq, int autodma) { /* the rule described below doesn't hold for this device */ if (dev->device == PCI_DEVICE_ID_NETMOS_9835 && @@ -74,9 +75,17 @@ static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc * and serial ports. The form is 0x00PS, where <P> is the number of * parallel ports and <S> is the number of serial ports. */ - card->numports = (dev->subsystem_device & 0xf0) >> 4; - if (card->numports > ARRAY_SIZE(card->addr)) - card->numports = ARRAY_SIZE(card->addr); + par->numports = (dev->subsystem_device & 0xf0) >> 4; + if (par->numports > ARRAY_SIZE(par->addr)) + par->numports = ARRAY_SIZE(par->addr); + /* + * This function is currently only called for cards with up to + * one parallel port. + * Parallel port BAR is either before or after serial ports BARS; + * hence, lo should be either 0 or equal to the number of serial ports. + */ + if (par->addr[0].lo != 0) + par->addr[0].lo = dev->subsystem_device & 0xf; return 0; } @@ -84,7 +93,8 @@ static struct parport_pc_pci cards[] __devinitdata = { /* titan_110l */ { 1, { { 3, -1 }, } }, /* titan_210l */ { 1, { { 3, -1 }, } }, /* netmos_9xx5_combo */ { 1, { { 2, -1 }, }, netmos_parallel_init }, - /* netmos_9855 */ { 1, { { 2, -1 }, }, netmos_parallel_init }, + /* netmos_9855 */ { 1, { { 0, -1 }, }, netmos_parallel_init }, + /* netmos_9855_2p */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* avlab_1s1p */ { 1, { { 1, 2}, } }, /* avlab_1s2p */ { 2, { { 1, 2}, { 3, 4 },} }, /* avlab_2s1p */ { 1, { { 2, 3}, } }, @@ -110,6 +120,10 @@ static struct pci_device_id parport_serial_pci_tbl[] = { { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9845, PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9xx5_combo }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855, + 0x1000, 0x0020, 0, 0, netmos_9855_2p }, + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855, + 0x1000, 0x0022, 0, 0, netmos_9855_2p }, + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9855 }, /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/ { PCI_VENDOR_ID_AFAVLAB, 0x2110, @@ -192,6 +206,12 @@ static struct pciserial_board pci_parport_serial_boards[] __devinitdata = { .uart_offset = 8, }, [netmos_9855] = { + .flags = FL_BASE2 | FL_BASE_BARS, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [netmos_9855_2p] = { .flags = FL_BASE4 | FL_BASE_BARS, .num_ports = 1, .base_baud = 115200, diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index 996f6483807..cfe86853feb 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c @@ -94,7 +94,6 @@ struct pnp_dev_node_info node_info; #ifdef CONFIG_HOTPLUG -static int unloading = 0; static struct completion unload_sem; /* @@ -158,7 +157,7 @@ static int pnp_dock_thread(void *unused) int docked = -1, d = 0; set_freezable(); - while (!unloading) { + while (1) { int status; /* @@ -575,8 +574,6 @@ fs_initcall(pnpbios_init); static int __init pnpbios_thread_init(void) { - struct task_struct *task; - #if defined(CONFIG_PPC) if (check_legacy_ioport(PNPBIOS_BASE)) return 0; @@ -584,10 +581,13 @@ static int __init pnpbios_thread_init(void) if (pnpbios_disabled) return 0; #ifdef CONFIG_HOTPLUG - init_completion(&unload_sem); - task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd"); - if (!IS_ERR(task)) - unloading = 0; + { + struct task_struct *task; + init_completion(&unload_sem); + task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd"); + if (IS_ERR(task)) + return PTR_ERR(task); + } #endif return 0; } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 09d5cd33a3f..56002f7d26b 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -225,11 +225,11 @@ config RTC_DRV_PCF8583 will be called rtc-pcf8583. config RTC_DRV_M41T80 - tristate "ST M41T65/M41T80/81/82/83/84/85/87" + tristate "ST M41T62/65/M41T80/81/82/83/84/85/87" help If you say Y here you will get support for the ST M41T60 and M41T80 RTC chips series. Currently, the following chips are - supported: M41T65, M41T80, M41T81, M41T82, M41T83, M41ST84, + supported: M41T62, M41T65, M41T80, M41T81, M41T82, M41T83, M41ST84, M41ST85, and M41ST87. This driver can also be built as a module. If so, the module diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 893f7dece23..60fe266f0f4 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -64,10 +64,12 @@ #define M41T80_FEATURE_BL (1 << 1) /* Battery low indicator */ #define M41T80_FEATURE_SQ (1 << 2) /* Squarewave feature */ #define M41T80_FEATURE_WD (1 << 3) /* Extra watchdog resolution */ +#define M41T80_FEATURE_SQ_ALT (1 << 4) /* RSx bits are in reg 4 */ #define DRV_VERSION "0.05" static const struct i2c_device_id m41t80_id[] = { + { "m41t62", M41T80_FEATURE_SQ | M41T80_FEATURE_SQ_ALT }, { "m41t65", M41T80_FEATURE_HT | M41T80_FEATURE_WD }, { "m41t80", M41T80_FEATURE_SQ }, { "m41t81", M41T80_FEATURE_HT | M41T80_FEATURE_SQ}, @@ -393,12 +395,15 @@ static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct m41t80_data *clientdata = i2c_get_clientdata(client); - int val; + int val, reg_sqw; if (!(clientdata->features & M41T80_FEATURE_SQ)) return -EINVAL; - val = i2c_smbus_read_byte_data(client, M41T80_REG_SQW); + reg_sqw = M41T80_REG_SQW; + if (clientdata->features & M41T80_FEATURE_SQ_ALT) + reg_sqw = M41T80_REG_WDAY; + val = i2c_smbus_read_byte_data(client, reg_sqw); if (val < 0) return -EIO; val = (val >> 4) & 0xf; @@ -419,7 +424,7 @@ static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct m41t80_data *clientdata = i2c_get_clientdata(client); - int almon, sqw; + int almon, sqw, reg_sqw; int val = simple_strtoul(buf, NULL, 0); if (!(clientdata->features & M41T80_FEATURE_SQ)) @@ -440,13 +445,16 @@ static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev, almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); if (almon < 0) return -EIO; - sqw = i2c_smbus_read_byte_data(client, M41T80_REG_SQW); + reg_sqw = M41T80_REG_SQW; + if (clientdata->features & M41T80_FEATURE_SQ_ALT) + reg_sqw = M41T80_REG_WDAY; + sqw = i2c_smbus_read_byte_data(client, reg_sqw); if (sqw < 0) return -EIO; sqw = (sqw & 0x0f) | (val << 4); if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, almon & ~M41T80_ALMON_SQWE) < 0 || - i2c_smbus_write_byte_data(client, M41T80_REG_SQW, sqw) < 0) + i2c_smbus_write_byte_data(client, reg_sqw, sqw) < 0) return -EIO; if (val && i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, almon | M41T80_ALMON_SQWE) < 0) diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c index 66955cc9c74..ad164056feb 100644 --- a/drivers/rtc/rtc-v3020.c +++ b/drivers/rtc/rtc-v3020.c @@ -27,17 +27,162 @@ #include <linux/bcd.h> #include <linux/rtc-v3020.h> #include <linux/delay.h> +#include <linux/gpio.h> #include <linux/io.h> #undef DEBUG +struct v3020; + +struct v3020_chip_ops { + int (*map_io)(struct v3020 *chip, struct platform_device *pdev, + struct v3020_platform_data *pdata); + void (*unmap_io)(struct v3020 *chip); + unsigned char (*read_bit)(struct v3020 *chip); + void (*write_bit)(struct v3020 *chip, unsigned char bit); +}; + +#define V3020_CS 0 +#define V3020_WR 1 +#define V3020_RD 2 +#define V3020_IO 3 + +struct v3020_gpio { + const char *name; + unsigned int gpio; +}; + struct v3020 { + /* MMIO access */ void __iomem *ioaddress; int leftshift; + + /* GPIO access */ + struct v3020_gpio *gpio; + + struct v3020_chip_ops *ops; + struct rtc_device *rtc; }; + +static int v3020_mmio_map(struct v3020 *chip, struct platform_device *pdev, + struct v3020_platform_data *pdata) +{ + if (pdev->num_resources != 1) + return -EBUSY; + + if (pdev->resource[0].flags != IORESOURCE_MEM) + return -EBUSY; + + chip->leftshift = pdata->leftshift; + chip->ioaddress = ioremap(pdev->resource[0].start, 1); + if (chip->ioaddress == NULL) + return -EBUSY; + + return 0; +} + +static void v3020_mmio_unmap(struct v3020 *chip) +{ + iounmap(chip->ioaddress); +} + +static void v3020_mmio_write_bit(struct v3020 *chip, unsigned char bit) +{ + writel(bit << chip->leftshift, chip->ioaddress); +} + +static unsigned char v3020_mmio_read_bit(struct v3020 *chip) +{ + return readl(chip->ioaddress) & (1 << chip->leftshift); +} + +static struct v3020_chip_ops v3020_mmio_ops = { + .map_io = v3020_mmio_map, + .unmap_io = v3020_mmio_unmap, + .read_bit = v3020_mmio_read_bit, + .write_bit = v3020_mmio_write_bit, +}; + +static struct v3020_gpio v3020_gpio[] = { + { "RTC CS", 0 }, + { "RTC WR", 0 }, + { "RTC RD", 0 }, + { "RTC IO", 0 }, +}; + +static int v3020_gpio_map(struct v3020 *chip, struct platform_device *pdev, + struct v3020_platform_data *pdata) +{ + int i, err; + + v3020_gpio[V3020_CS].gpio = pdata->gpio_cs; + v3020_gpio[V3020_WR].gpio = pdata->gpio_wr; + v3020_gpio[V3020_RD].gpio = pdata->gpio_rd; + v3020_gpio[V3020_IO].gpio = pdata->gpio_io; + + for (i = 0; i < ARRAY_SIZE(v3020_gpio); i++) { + err = gpio_request(v3020_gpio[i].gpio, v3020_gpio[i].name); + if (err) + goto err_request; + + gpio_direction_output(v3020_gpio[i].gpio, 1); + } + + chip->gpio = v3020_gpio; + + return 0; + +err_request: + while (--i >= 0) + gpio_free(v3020_gpio[i].gpio); + + return err; +} + +static void v3020_gpio_unmap(struct v3020 *chip) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(v3020_gpio); i++) + gpio_free(v3020_gpio[i].gpio); +} + +static void v3020_gpio_write_bit(struct v3020 *chip, unsigned char bit) +{ + gpio_direction_output(chip->gpio[V3020_IO].gpio, bit); + gpio_set_value(chip->gpio[V3020_CS].gpio, 0); + gpio_set_value(chip->gpio[V3020_WR].gpio, 0); + udelay(1); + gpio_set_value(chip->gpio[V3020_WR].gpio, 1); + gpio_set_value(chip->gpio[V3020_CS].gpio, 1); +} + +static unsigned char v3020_gpio_read_bit(struct v3020 *chip) +{ + int bit; + + gpio_direction_input(chip->gpio[V3020_IO].gpio); + gpio_set_value(chip->gpio[V3020_CS].gpio, 0); + gpio_set_value(chip->gpio[V3020_RD].gpio, 0); + udelay(1); + bit = !!gpio_get_value(chip->gpio[V3020_IO].gpio); + udelay(1); + gpio_set_value(chip->gpio[V3020_RD].gpio, 1); + gpio_set_value(chip->gpio[V3020_CS].gpio, 1); + + return bit; +} + +static struct v3020_chip_ops v3020_gpio_ops = { + .map_io = v3020_gpio_map, + .unmap_io = v3020_gpio_unmap, + .read_bit = v3020_gpio_read_bit, + .write_bit = v3020_gpio_write_bit, +}; + static void v3020_set_reg(struct v3020 *chip, unsigned char address, unsigned char data) { @@ -46,7 +191,7 @@ static void v3020_set_reg(struct v3020 *chip, unsigned char address, tmp = address; for (i = 0; i < 4; i++) { - writel((tmp & 1) << chip->leftshift, chip->ioaddress); + chip->ops->write_bit(chip, (tmp & 1)); tmp >>= 1; udelay(1); } @@ -54,7 +199,7 @@ static void v3020_set_reg(struct v3020 *chip, unsigned char address, /* Commands dont have data */ if (!V3020_IS_COMMAND(address)) { for (i = 0; i < 8; i++) { - writel((data & 1) << chip->leftshift, chip->ioaddress); + chip->ops->write_bit(chip, (data & 1)); data >>= 1; udelay(1); } @@ -67,14 +212,14 @@ static unsigned char v3020_get_reg(struct v3020 *chip, unsigned char address) int i; for (i = 0; i < 4; i++) { - writel((address & 1) << chip->leftshift, chip->ioaddress); + chip->ops->write_bit(chip, (address & 1)); address >>= 1; udelay(1); } for (i = 0; i < 8; i++) { data >>= 1; - if (readl(chip->ioaddress) & (1 << chip->leftshift)) + if (chip->ops->read_bit(chip)) data |= 0x80; udelay(1); } @@ -164,25 +309,23 @@ static int rtc_probe(struct platform_device *pdev) int i; int temp; - if (pdev->num_resources != 1) - return -EBUSY; - - if (pdev->resource[0].flags != IORESOURCE_MEM) - return -EBUSY; - chip = kzalloc(sizeof *chip, GFP_KERNEL); if (!chip) return -ENOMEM; - chip->leftshift = pdata->leftshift; - chip->ioaddress = ioremap(pdev->resource[0].start, 1); - if (chip->ioaddress == NULL) + if (pdata->use_gpio) + chip->ops = &v3020_gpio_ops; + else + chip->ops = &v3020_mmio_ops; + + retval = chip->ops->map_io(chip, pdev, pdata); + if (retval) goto err_chip; /* Make sure the v3020 expects a communication cycle * by reading 8 times */ for (i = 0; i < 8; i++) - temp = readl(chip->ioaddress); + temp = chip->ops->read_bit(chip); /* Test chip by doing a write/read sequence * to the chip ram */ @@ -196,10 +339,17 @@ static int rtc_probe(struct platform_device *pdev) * are all disabled */ v3020_set_reg(chip, V3020_STATUS_0, 0x0); - dev_info(&pdev->dev, "Chip available at physical address 0x%llx," - "data connected to D%d\n", - (unsigned long long)pdev->resource[0].start, - chip->leftshift); + if (pdata->use_gpio) + dev_info(&pdev->dev, "Chip available at GPIOs " + "%d, %d, %d, %d\n", + chip->gpio[V3020_CS].gpio, chip->gpio[V3020_WR].gpio, + chip->gpio[V3020_RD].gpio, chip->gpio[V3020_IO].gpio); + else + dev_info(&pdev->dev, "Chip available at " + "physical address 0x%llx," + "data connected to D%d\n", + (unsigned long long)pdev->resource[0].start, + chip->leftshift); platform_set_drvdata(pdev, chip); @@ -214,7 +364,7 @@ static int rtc_probe(struct platform_device *pdev) return 0; err_io: - iounmap(chip->ioaddress); + chip->ops->unmap_io(chip); err_chip: kfree(chip); @@ -229,7 +379,7 @@ static int rtc_remove(struct platform_device *dev) if (rtc) rtc_device_unregister(rtc); - iounmap(chip->ioaddress); + chip->ops->unmap_io(chip); kfree(chip); return 0; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index aab8123c596..e8d032b9dfb 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -94,7 +94,7 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) static void zfcp_wka_port_offline(struct work_struct *work) { - struct delayed_work *dw = container_of(work, struct delayed_work, work); + struct delayed_work *dw = to_delayed_work(work); struct zfcp_wka_port *wka_port = container_of(dw, struct zfcp_wka_port, work); diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c index d2866c293de..26bd03e6185 100644 --- a/drivers/spi/spi_gpio.c +++ b/drivers/spi/spi_gpio.c @@ -178,8 +178,10 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active) if (is_active) setsck(spi, spi->mode & SPI_CPOL); - /* SPI is normally active-low */ - gpio_set_value(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); + if (cs != SPI_GPIO_NO_CHIPSELECT) { + /* SPI is normally active-low */ + gpio_set_value(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); + } } static int spi_gpio_setup(struct spi_device *spi) @@ -191,15 +193,17 @@ static int spi_gpio_setup(struct spi_device *spi) return -EINVAL; if (!spi->controller_state) { - status = gpio_request(cs, dev_name(&spi->dev)); - if (status) - return status; - status = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH); + if (cs != SPI_GPIO_NO_CHIPSELECT) { + status = gpio_request(cs, dev_name(&spi->dev)); + if (status) + return status; + status = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH); + } } if (!status) status = spi_bitbang_setup(spi); if (status) { - if (!spi->controller_state) + if (!spi->controller_state && cs != SPI_GPIO_NO_CHIPSELECT) gpio_free(cs); } return status; @@ -209,7 +213,8 @@ static void spi_gpio_cleanup(struct spi_device *spi) { unsigned long cs = (unsigned long) spi->controller_data; - gpio_free(cs); + if (cs != SPI_GPIO_NO_CHIPSELECT) + gpio_free(cs); spi_bitbang_cleanup(spi); } diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c index e5752f615e0..80f9cc7137c 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c @@ -719,7 +719,7 @@ void ieee80211_softmac_scan(struct ieee80211_device *ieee) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) void ieee80211_softmac_scan_wq(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq); #else void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee) @@ -777,7 +777,7 @@ out: #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) void ieee80211_softmac_scan_wq(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, softmac_scan_wq); #else void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee) @@ -2980,7 +2980,7 @@ void ieee80211_start_monitor_mode(struct ieee80211_device *ieee) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) void ieee80211_start_ibss_wq(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq); #else void ieee80211_start_ibss_wq(struct ieee80211_device *ieee) @@ -3162,7 +3162,7 @@ void ieee80211_disassociate(struct ieee80211_device *ieee) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) void ieee80211_associate_retry_wq(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq); #else void ieee80211_associate_retry_wq(struct ieee80211_device *ieee) diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c index 66de5cc8ddf..ff1f23f99f2 100644 --- a/drivers/staging/rtl8187se/r8180_core.c +++ b/drivers/staging/rtl8187se/r8180_core.c @@ -5438,7 +5438,7 @@ void rtl8180_hw_wakeup_wq (struct work_struct *work) // struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq); // struct ieee80211_device * ieee = (struct ieee80211_device*) // container_of(work, struct ieee80211_device, watch_dog_wq); - struct delayed_work *dwork = container_of(work,struct delayed_work,work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_wakeup_wq); struct net_device *dev = ieee->dev; #else @@ -5459,7 +5459,7 @@ void rtl8180_hw_sleep_wq (struct work_struct *work) // struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq); // struct ieee80211_device * ieee = (struct ieee80211_device*) // container_of(work, struct ieee80211_device, watch_dog_wq); - struct delayed_work *dwork = container_of(work,struct delayed_work,work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_sleep_wq); struct net_device *dev = ieee->dev; #else @@ -6407,7 +6407,7 @@ priv->txnpring)/8); void rtl8180_tx_irq_wq(struct work_struct *work) { //struct r8180_priv *priv = container_of(work, struct r8180_priv, reset_wq); - struct delayed_work *dwork = container_of(work,struct delayed_work,work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device * ieee = (struct ieee80211_device*) container_of(dwork, struct ieee80211_device, watch_dog_wq); struct net_device *dev = ieee->dev; @@ -6691,7 +6691,7 @@ lizhaoming--------------------------- RF power on/power off ----------------- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) void GPIOChangeRFWorkItemCallBack(struct work_struct *work) { - //struct delayed_work *dwork = container_of(work, struct delayed_work, work); + //struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, GPIOChangeRFWorkItem.work); struct net_device *dev = ieee->dev; struct r8180_priv *priv = ieee80211_priv(dev); diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index f0aac0cf315..386eaa22d21 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c @@ -471,7 +471,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc) */ static void wusbhc_keep_alive_run(struct work_struct *ws) { - struct delayed_work *dw = container_of(ws, struct delayed_work, work); + struct delayed_work *dw = to_delayed_work(ws); struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer); mutex_lock(&wusbhc->mutex); diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c index d9627b57eb4..135ae18bfce 100644 --- a/drivers/video/nvidia/nv_setup.c +++ b/drivers/video/nvidia/nv_setup.c @@ -362,6 +362,7 @@ int NVCommonSetup(struct fb_info *info) case 0x0186: case 0x0187: case 0x018D: + case 0x01D7: case 0x0228: case 0x0286: case 0x028C: diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 442bd8bbd4a..3ebe9726a9e 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c @@ -69,7 +69,7 @@ static u8 w1_touch_bit(struct w1_master *dev, int bit) return w1_read_bit(dev); else { w1_write_bit(dev, 0); - return(0); + return 0; } } @@ -184,17 +184,17 @@ static u8 w1_read_bit(struct w1_master *dev) */ u8 w1_triplet(struct w1_master *dev, int bdir) { - if ( dev->bus_master->triplet ) - return(dev->bus_master->triplet(dev->bus_master->data, bdir)); + if (dev->bus_master->triplet) + return dev->bus_master->triplet(dev->bus_master->data, bdir); else { u8 id_bit = w1_touch_bit(dev, 1); u8 comp_bit = w1_touch_bit(dev, 1); u8 retval; - if ( id_bit && comp_bit ) - return(0x03); /* error */ + if (id_bit && comp_bit) + return 0x03; /* error */ - if ( !id_bit && !comp_bit ) { + if (!id_bit && !comp_bit) { /* Both bits are valid, take the direction given */ retval = bdir ? 0x04 : 0; } else { @@ -203,11 +203,11 @@ u8 w1_triplet(struct w1_master *dev, int bdir) retval = id_bit ? 0x05 : 0x02; } - if ( dev->bus_master->touch_bit ) + if (dev->bus_master->touch_bit) w1_touch_bit(dev, bdir); else w1_write_bit(dev, bdir); - return(retval); + return retval; } } |