From 7405f74badf46b5d023c5d2b670b4471525f6c91 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 2 Jan 2007 11:10:43 -0700 Subject: dmaengine: refactor dmaengine around dma_async_tx_descriptor The current dmaengine interface defines mutliple routines per operation, i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding more operation types (xor, crc, etc) to this model would result in an unmanageable number of method permutations. Are we really going to add a set of hooks for each DMA engine whizbang feature? - Jeff Garzik The descriptor creation process is refactored using the new common dma_async_tx_descriptor structure. Instead of per driver do___to_ methods, drivers integrate dma_async_tx_descriptor into their private software descriptor and then define a 'prep' routine per operation. The prep routine allocates a descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines are valid. Descriptor creation and submission becomes: struct dma_device *dev; struct dma_chan *chan; struct dma_async_tx_descriptor *tx; tx = dev->device_prep_dma_(chan, len, int_flag) tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */) tx->tx_set_dest(dma_addr_t, tx, index) tx->tx_submit(tx) In addition to the refactoring, dma_async_tx_descriptor also lays the groundwork for definining cross-channel-operation dependencies, and a callback facility for asynchronous notification of operation completion. Changelog: * drop dma mapping methods, suggested by Chris Leech * fix ioat_dma_dependency_added, also caught by Andrew Morton * fix dma_sync_wait, change from Andrew Morton * uninline large functions, change from Andrew Morton * add tx->callback = NULL to dmaengine calls to interoperate with async_tx calls * hookup ioat_tx_submit * convert channel capabilities to a 'cpumask_t like' bitmap * removed DMA_TX_ARRAY_INIT, no longer needed * checkpatch.pl fixes * make set_src, set_dest, and tx_submit descriptor specific methods * fixup git-ioat merge * move group_list and phys to dma_async_tx_descriptor Cc: Jeff Garzik Cc: Chris Leech Signed-off-by: Shannon Nelson Signed-off-by: Dan Williams Acked-by: David S. Miller --- include/linux/dmaengine.h | 237 +++++++++++++++++++++++++++++----------------- 1 file changed, 149 insertions(+), 88 deletions(-) (limited to 'include') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c94d8f1d62e..3de1cf71031 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -21,13 +21,12 @@ #ifndef DMAENGINE_H #define DMAENGINE_H -#ifdef CONFIG_DMA_ENGINE - #include #include #include #include #include +#include /** * enum dma_event - resource PNP/power managment events @@ -64,6 +63,31 @@ enum dma_status { DMA_ERROR, }; +/** + * enum dma_transaction_type - DMA transaction types/indexes + */ +enum dma_transaction_type { + DMA_MEMCPY, + DMA_XOR, + DMA_PQ_XOR, + DMA_DUAL_XOR, + DMA_PQ_UPDATE, + DMA_ZERO_SUM, + DMA_PQ_ZERO_SUM, + DMA_MEMSET, + DMA_MEMCPY_CRC32C, + DMA_INTERRUPT, +}; + +/* last transaction type for creation of the capabilities mask */ +#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) + +/** + * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. + * See linux/cpumask.h + */ +typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; + /** * struct dma_chan_percpu - the per-CPU part of struct dma_chan * @refcount: local_t used for open-coded "bigref" counting @@ -157,48 +181,106 @@ struct dma_client { struct list_head global_node; }; +typedef void (*dma_async_tx_callback)(void *dma_async_param); +/** + * struct dma_async_tx_descriptor - async transaction descriptor + * ---dma generic offload fields--- + * @cookie: tracking cookie for this transaction, set to -EBUSY if + * this tx is sitting on a dependency list + * @ack: the descriptor can not be reused until the client acknowledges + * receipt, i.e. has has a chance to establish any dependency chains + * @phys: physical address of the descriptor + * @tx_list: driver common field for operations that require multiple + * descriptors + * @chan: target channel for this operation + * @tx_submit: set the prepared descriptor(s) to be executed by the engine + * @tx_set_dest: set a destination address in a hardware descriptor + * @tx_set_src: set a source address in a hardware descriptor + * @callback: routine to call after this operation is complete + * @callback_param: general parameter to pass to the callback routine + * ---async_tx api specific fields--- + * @depend_list: at completion this list of transactions are submitted + * @depend_node: allow this transaction to be executed after another + * transaction has completed, possibly on another channel + * @parent: pointer to the next level up in the dependency chain + * @lock: protect the dependency list + */ +struct dma_async_tx_descriptor { + dma_cookie_t cookie; + int ack; + dma_addr_t phys; + struct list_head tx_list; + struct dma_chan *chan; + dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); + void (*tx_set_dest)(dma_addr_t addr, + struct dma_async_tx_descriptor *tx, int index); + void (*tx_set_src)(dma_addr_t addr, + struct dma_async_tx_descriptor *tx, int index); + dma_async_tx_callback callback; + void *callback_param; + struct list_head depend_list; + struct list_head depend_node; + struct dma_async_tx_descriptor *parent; + spinlock_t lock; +}; + /** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list + * @cap_mask: one or more dma_capability flags + * @max_xor: maximum number of xor sources, 0 if no capability * @refcount: reference count * @done: IO completion struct * @dev_id: unique device ID + * @dev: struct device reference for dma mapping api * @device_alloc_chan_resources: allocate resources and return the * number of allocated descriptors * @device_free_chan_resources: release DMA channel's resources - * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer - * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page - * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset - * @device_memcpy_complete: poll the status of an IOAT DMA transaction - * @device_memcpy_issue_pending: push appended descriptors to hardware + * @device_prep_dma_memcpy: prepares a memcpy operation + * @device_prep_dma_xor: prepares a xor operation + * @device_prep_dma_zero_sum: prepares a zero_sum operation + * @device_prep_dma_memset: prepares a memset operation + * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_dependency_added: async_tx notifies the channel about new deps + * @device_issue_pending: push pending transactions to hardware */ struct dma_device { unsigned int chancnt; struct list_head channels; struct list_head global_node; + dma_cap_mask_t cap_mask; + int max_xor; struct kref refcount; struct completion done; int dev_id; + struct device *dev; int (*device_alloc_chan_resources)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan); - dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan, - void *dest, void *src, size_t len); - dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan, - struct page *page, unsigned int offset, void *kdata, - size_t len); - dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan, - struct page *dest_pg, unsigned int dest_off, - struct page *src_pg, unsigned int src_off, size_t len); - enum dma_status (*device_memcpy_complete)(struct dma_chan *chan, + + struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( + struct dma_chan *chan, size_t len, int int_en); + struct dma_async_tx_descriptor *(*device_prep_dma_xor)( + struct dma_chan *chan, unsigned int src_cnt, size_t len, + int int_en); + struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( + struct dma_chan *chan, unsigned int src_cnt, size_t len, + u32 *result, int int_en); + struct dma_async_tx_descriptor *(*device_prep_dma_memset)( + struct dma_chan *chan, int value, size_t len, int int_en); + struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( + struct dma_chan *chan); + + void (*device_dependency_added)(struct dma_chan *chan); + enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used); - void (*device_memcpy_issue_pending)(struct dma_chan *chan); + void (*device_issue_pending)(struct dma_chan *chan); }; /* --- public DMA engine API --- */ @@ -207,96 +289,72 @@ struct dma_client *dma_async_client_register(dma_event_callback event_callback); void dma_async_client_unregister(struct dma_client *client); void dma_async_client_chan_request(struct dma_client *client, unsigned int number); +dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, + void *dest, void *src, size_t len); +dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, + struct page *page, unsigned int offset, void *kdata, size_t len); +dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, + struct page *dest_pg, unsigned int dest_off, struct page *src_pg, + unsigned int src_off, size_t len); +void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, + struct dma_chan *chan); -/** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses - * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) - * @len: length - * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). - */ -static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, - void *dest, void *src, size_t len) -{ - int cpu = get_cpu(); - per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; - per_cpu_ptr(chan->local, cpu)->memcpy_count++; - put_cpu(); - return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len); +static inline void +async_tx_ack(struct dma_async_tx_descriptor *tx) +{ + tx->ack = 1; } -/** - * dma_async_memcpy_buf_to_pg - offloaded copy from address to page - * @chan: DMA channel to offload copy to - * @page: destination page - * @offset: offset in page to copy to - * @kdata: source address (virtual) - * @len: length - * - * Both @page/@offset and @kdata must be mappable to a bus address according - * to the DMA mapping API rules for streaming mappings. - * Both @page/@offset and @kdata must stay memory resident (kernel memory or - * locked user space pages) - */ -static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, - struct page *page, unsigned int offset, void *kdata, size_t len) +#define first_dma_cap(mask) __first_dma_cap(&(mask)) +static inline int __first_dma_cap(const dma_cap_mask_t *srcp) { - int cpu = get_cpu(); - per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; - per_cpu_ptr(chan->local, cpu)->memcpy_count++; - put_cpu(); + return min_t(int, DMA_TX_TYPE_END, + find_first_bit(srcp->bits, DMA_TX_TYPE_END)); +} - return chan->device->device_memcpy_buf_to_pg(chan, page, offset, - kdata, len); +#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) +static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) +{ + return min_t(int, DMA_TX_TYPE_END, + find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); } -/** - * dma_async_memcpy_pg_to_pg - offloaded copy from page to page - * @chan: DMA channel to offload copy to - * @dest_pg: destination page - * @dest_off: offset in page to copy to - * @src_pg: source page - * @src_off: offset in page to copy from - * @len: length - * - * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus - * address according to the DMA mapping API rules for streaming mappings. - * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident - * (kernel memory or locked user space pages). - */ -static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, - struct page *dest_pg, unsigned int dest_off, struct page *src_pg, - unsigned int src_off, size_t len) +#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) +static inline void +__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) { - int cpu = get_cpu(); - per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; - per_cpu_ptr(chan->local, cpu)->memcpy_count++; - put_cpu(); + set_bit(tx_type, dstp->bits); +} - return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off, - src_pg, src_off, len); +#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) +static inline int +__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) +{ + return test_bit(tx_type, srcp->bits); } +#define for_each_dma_cap_mask(cap, mask) \ + for ((cap) = first_dma_cap(mask); \ + (cap) < DMA_TX_TYPE_END; \ + (cap) = next_dma_cap((cap), (mask))) + /** - * dma_async_memcpy_issue_pending - flush pending copies to HW + * dma_async_issue_pending - flush pending transactions to HW * @chan: target DMA channel * * This allows drivers to push copies to HW in batches, * reducing MMIO writes where possible. */ -static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan) +static inline void dma_async_issue_pending(struct dma_chan *chan) { - return chan->device->device_memcpy_issue_pending(chan); + return chan->device->device_issue_pending(chan); } +#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) + /** - * dma_async_memcpy_complete - poll for transaction completion + * dma_async_is_tx_complete - poll for transaction completion * @chan: DMA channel * @cookie: transaction identifier to check status of * @last: returns last completed cookie, can be NULL @@ -306,12 +364,15 @@ static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan) * internal state and can be used with dma_async_is_complete() to check * the status of multiple cookies without re-checking hardware state. */ -static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan, +static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) { - return chan->device->device_memcpy_complete(chan, cookie, last, used); + return chan->device->device_is_tx_complete(chan, cookie, last, used); } +#define dma_async_memcpy_complete(chan, cookie, last, used)\ + dma_async_is_tx_complete(chan, cookie, last, used) + /** * dma_async_is_complete - test a cookie against chan state * @cookie: transaction identifier to test status of @@ -334,6 +395,7 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, return DMA_IN_PROGRESS; } +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); /* --- DMA device --- */ @@ -362,5 +424,4 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, struct dma_pinned_list *pinned_list, struct page *page, unsigned int offset, size_t len); -#endif /* CONFIG_DMA_ENGINE */ #endif /* DMAENGINE_H */ -- cgit v1.2.3 From d379b01e9087a582d58f4b678208a4f8d8376fe7 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Jul 2007 11:56:42 -0700 Subject: dmaengine: make clients responsible for managing channels The current implementation assumes that a channel will only be used by one client at a time. In order to enable channel sharing the dmaengine core is changed to a model where clients subscribe to channel-available-events. Instead of tracking how many channels a client wants and how many it has received the core just broadcasts the available channels and lets the clients optionally take a reference. The core learns about the clients' needs at dma_event_callback time. In support of multiple operation types, clients can specify a capability mask to only be notified of channels that satisfy a certain set of capabilities. Changelog: * removed DMA_TX_ARRAY_INIT, no longer needed * dma_client_chan_free -> dma_chan_release: switch to global reference counting only at device unregistration time, before it was also happening at client unregistration time * clients now return dma_state_client to dmaengine (ack, dup, nak) * checkpatch.pl fixes * fixup merge with git-ioat Cc: Chris Leech Signed-off-by: Shannon Nelson Signed-off-by: Dan Williams Acked-by: David S. Miller --- include/linux/dmaengine.h | 58 ++++++++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 3de1cf71031..a3b6035b6c8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -29,19 +29,31 @@ #include /** - * enum dma_event - resource PNP/power managment events + * enum dma_state - resource PNP/power managment state * @DMA_RESOURCE_SUSPEND: DMA device going into low power state * @DMA_RESOURCE_RESUME: DMA device returning to full power - * @DMA_RESOURCE_ADDED: DMA device added to the system + * @DMA_RESOURCE_AVAILABLE: DMA device available to the system * @DMA_RESOURCE_REMOVED: DMA device removed from the system */ -enum dma_event { +enum dma_state { DMA_RESOURCE_SUSPEND, DMA_RESOURCE_RESUME, - DMA_RESOURCE_ADDED, + DMA_RESOURCE_AVAILABLE, DMA_RESOURCE_REMOVED, }; +/** + * enum dma_state_client - state of the channel in the client + * @DMA_ACK: client would like to use, or was using this channel + * @DMA_DUP: client has already seen this channel, or is not using this channel + * @DMA_NAK: client does not want to see any more channels + */ +enum dma_state_client { + DMA_ACK, + DMA_DUP, + DMA_NAK, +}; + /** * typedef dma_cookie_t - an opaque DMA cookie * @@ -104,7 +116,6 @@ struct dma_chan_percpu { /** * struct dma_chan - devices supply DMA channels, clients use them - * @client: ptr to the client user of this chan, will be %NULL when unused * @device: ptr to the dma device who supplies this channel, always !%NULL * @cookie: last cookie value returned to client * @chan_id: channel ID for sysfs @@ -112,12 +123,10 @@ struct dma_chan_percpu { * @refcount: kref, used in "bigref" slow-mode * @slow_ref: indicates that the DMA channel is free * @rcu: the DMA channel's RCU head - * @client_node: used to add this to the client chan list * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu */ struct dma_chan { - struct dma_client *client; struct dma_device *device; dma_cookie_t cookie; @@ -129,11 +138,11 @@ struct dma_chan { int slow_ref; struct rcu_head rcu; - struct list_head client_node; struct list_head device_node; struct dma_chan_percpu *local; }; + void dma_chan_cleanup(struct kref *kref); static inline void dma_chan_get(struct dma_chan *chan) @@ -158,26 +167,31 @@ static inline void dma_chan_put(struct dma_chan *chan) /* * typedef dma_event_callback - function pointer to a DMA event callback + * For each channel added to the system this routine is called for each client. + * If the client would like to use the channel it returns '1' to signal (ack) + * the dmaengine core to take out a reference on the channel and its + * corresponding device. A client must not 'ack' an available channel more + * than once. When a channel is removed all clients are notified. If a client + * is using the channel it must 'ack' the removal. A client must not 'ack' a + * removed channel more than once. + * @client - 'this' pointer for the client context + * @chan - channel to be acted upon + * @state - available or removed */ -typedef void (*dma_event_callback) (struct dma_client *client, - struct dma_chan *chan, enum dma_event event); +struct dma_client; +typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, + struct dma_chan *chan, enum dma_state state); /** * struct dma_client - info on the entity making use of DMA services * @event_callback: func ptr to call when something happens - * @chan_count: number of chans allocated - * @chans_desired: number of chans requested. Can be +/- chan_count - * @lock: protects access to the channels list - * @channels: the list of DMA channels allocated + * @cap_mask: only return channels that satisfy the requested capabilities + * a value of zero corresponds to any capability * @global_node: list_head for global dma_client_list */ struct dma_client { dma_event_callback event_callback; - unsigned int chan_count; - unsigned int chans_desired; - - spinlock_t lock; - struct list_head channels; + dma_cap_mask_t cap_mask; struct list_head global_node; }; @@ -285,10 +299,9 @@ struct dma_device { /* --- public DMA engine API --- */ -struct dma_client *dma_async_client_register(dma_event_callback event_callback); +void dma_async_client_register(struct dma_client *client); void dma_async_client_unregister(struct dma_client *client); -void dma_async_client_chan_request(struct dma_client *client, - unsigned int number); +void dma_async_client_chan_request(struct dma_client *client); dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, @@ -299,7 +312,6 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan); - static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) { -- cgit v1.2.3 From 685784aaf3cd0e3ff5e36c7ecf6f441cdbf57f73 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Jul 2007 11:56:42 -0700 Subject: xor: make 'xor_blocks' a library routine for use with async_tx The async_tx api tries to use a dma engine for an operation, but will fall back to an optimized software routine otherwise. Xor support is implemented using the raid5 xor routines. For organizational purposes this routine is moved to a common area. The following fixes are also made: * rename xor_block => xor_blocks, suggested by Adrian Bunk * ensure that xor.o initializes before md.o in the built-in case * checkpatch.pl fixes * mark calibrate_xor_blocks __init, Adrian Bunk Cc: Adrian Bunk Cc: NeilBrown Cc: Herbert Xu Signed-off-by: Dan Williams --- include/linux/raid/xor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index f0d67cbdea4..7d6c20b654f 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h @@ -5,7 +5,7 @@ #define MAX_XOR_BLOCKS 5 -extern void xor_block(unsigned int count, unsigned int bytes, void **ptr); +extern void xor_blocks(unsigned int count, unsigned int bytes, void **ptr); struct xor_block_template { struct xor_block_template *next; -- cgit v1.2.3 From 9bc89cd82d6f88fb0ca39b30445c329a430fd66b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 2 Jan 2007 11:10:44 -0700 Subject: async_tx: add the async_tx api The async_tx api provides methods for describing a chain of asynchronous bulk memory transfers/transforms with support for inter-transactional dependencies. It is implemented as a dmaengine client that smooths over the details of different hardware offload engine implementations. Code that is written to the api can optimize for asynchronous operation and the api will fit the chain of operations to the available offload resources. I imagine that any piece of ADMA hardware would register with the 'async_*' subsystem, and a call to async_X would be routed as appropriate, or be run in-line. - Neil Brown async_tx exploits the capabilities of struct dma_async_tx_descriptor to provide an api of the following general format: struct dma_async_tx_descriptor * async_(..., struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_param) { struct dma_chan *chan = async_tx_find_channel(depend_tx, ); struct dma_device *device = chan ? chan->device : NULL; int int_en = cb_fn ? 1 : 0; struct dma_async_tx_descriptor *tx = device ? device->device_prep_dma_(chan, len, int_en) : NULL; if (tx) { /* run asynchronously */ ... tx->tx_set_dest(addr, tx, index); ... tx->tx_set_src(addr, tx, index); ... async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); } else { /* run synchronously */ ... ... async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); } return tx; } async_tx_find_channel() returns a capable channel from its pool. The channel pool is organized as a per-cpu array of channel pointers. The async_tx_rebalance() routine is tasked with managing these arrays. In the uniprocessor case async_tx_rebalance() tries to spread responsibility evenly over channels of similar capabilities. For example if there are two copy+xor channels, one will handle copy operations and the other will handle xor. In the SMP case async_tx_rebalance() attempts to spread the operations evenly over the cpus, e.g. cpu0 gets copy channel0 and xor channel0 while cpu1 gets copy channel 1 and xor channel 1. When a dependency is specified async_tx_find_channel defaults to keeping the operation on the same channel. A xor->copy->xor chain will stay on one channel if it supports both operation types, otherwise the transaction will transition between a copy and a xor resource. Currently the raid5 implementation in the MD raid456 driver has been converted to the async_tx api. A driver for the offload engines on the Intel Xscale series of I/O processors, iop-adma, is provided in a later commit. With the iop-adma driver and async_tx, raid456 is able to offload copy, xor, and xor-zero-sum operations to hardware engines. On iop342 tiobench showed higher throughput for sequential writes (20 - 30% improvement) and sequential reads to a degraded array (40 - 55% improvement). For the other cases performance was roughly equal, +/- a few percentage points. On a x86-smp platform the performance of the async_tx implementation (in synchronous mode) was also +/- a few percentage points of the original implementation. According to 'top' on iop342 CPU utilization drops from ~50% to ~15% during a 'resync' while the speed according to /proc/mdstat doubles from ~25 MB/s to ~50 MB/s. The tiobench command line used for testing was: tiobench --size 2048 --block 4096 --block 131072 --dir /mnt/raid --numruns 5 * iop342 had 1GB of memory available Details: * if CONFIG_DMA_ENGINE=n the asynchronous path is compiled away by making async_tx_find_channel a static inline routine that always returns NULL * when a callback is specified for a given transaction an interrupt will fire at operation completion time and the callback will occur in a tasklet. if the the channel does not support interrupts then a live polling wait will be performed * the api is written as a dmaengine client that requests all available channels * In support of dependencies the api implicitly schedules channel-switch interrupts. The interrupt triggers the cleanup tasklet which causes pending operations to be scheduled on the next channel * Xor engines treat an xor destination address differently than a software xor routine. To the software routine the destination address is an implied source, whereas engines treat it as a write-only destination. This patch modifies the xor_blocks routine to take a an explicit destination address to mirror the hardware. Changelog: * fixed a leftover debug print * don't allow callbacks in async_interrupt_cond * fixed xor_block changes * fixed usage of ASYNC_TX_XOR_DROP_DEST * drop dma mapping methods, suggested by Chris Leech * printk warning fixups from Andrew Morton * don't use inline in C files, Adrian Bunk * select the API when MD is enabled * BUG_ON xor source counts <= 1 * implicitly handle hardware concerns like channel switching and interrupts, Neil Brown * remove the per operation type list, and distribute operation capabilities evenly amongst the available channels * simplify async_tx_find_channel to optimize the fast path * introduce the channel_table_initialized flag to prevent early calls to the api * reorganize the code to mimic crypto * include mm.h as not all archs include it in dma-mapping.h * make the Kconfig options non-user visible, Adrian Bunk * move async_tx under crypto since it is meant as 'core' functionality, and the two may share algorithms in the future * move large inline functions into c files * checkpatch.pl fixes * gpl v2 only correction Cc: Herbert Xu Signed-off-by: Dan Williams Acked-By: NeilBrown --- include/linux/async_tx.h | 156 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/raid/xor.h | 5 +- 2 files changed, 159 insertions(+), 2 deletions(-) create mode 100644 include/linux/async_tx.h (limited to 'include') diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h new file mode 100644 index 00000000000..ff1255079fa --- /dev/null +++ b/include/linux/async_tx.h @@ -0,0 +1,156 @@ +/* + * Copyright © 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef _ASYNC_TX_H_ +#define _ASYNC_TX_H_ +#include +#include +#include + +/** + * dma_chan_ref - object used to manage dma channels received from the + * dmaengine core. + * @chan - the channel being tracked + * @node - node for the channel to be placed on async_tx_master_list + * @rcu - for list_del_rcu + * @count - number of times this channel is listed in the pool + * (for channels with multiple capabiities) + */ +struct dma_chan_ref { + struct dma_chan *chan; + struct list_head node; + struct rcu_head rcu; + atomic_t count; +}; + +/** + * async_tx_flags - modifiers for the async_* calls + * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the + * the destination address is not a source. The asynchronous case handles this + * implicitly, the synchronous case needs to zero the destination block. + * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is + * also one of the source addresses. In the synchronous case the destination + * address is an implied source, whereas the asynchronous case it must be listed + * as a source. The destination address must be the first address in the source + * array. + * @ASYNC_TX_ASSUME_COHERENT: skip cache maintenance operations + * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a + * dependency chain + * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. + * @ASYNC_TX_KMAP_SRC: if the transaction is to be performed synchronously + * take an atomic mapping (KM_USER0) on the source page(s) + * @ASYNC_TX_KMAP_DST: if the transaction is to be performed synchronously + * take an atomic mapping (KM_USER0) on the dest page(s) + */ +enum async_tx_flags { + ASYNC_TX_XOR_ZERO_DST = (1 << 0), + ASYNC_TX_XOR_DROP_DST = (1 << 1), + ASYNC_TX_ASSUME_COHERENT = (1 << 2), + ASYNC_TX_ACK = (1 << 3), + ASYNC_TX_DEP_ACK = (1 << 4), + ASYNC_TX_KMAP_SRC = (1 << 5), + ASYNC_TX_KMAP_DST = (1 << 6), +}; + +#ifdef CONFIG_DMA_ENGINE +void async_tx_issue_pending_all(void); +enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); +void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx); +struct dma_chan * +async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, + enum dma_transaction_type tx_type); +#else +static inline void async_tx_issue_pending_all(void) +{ + do { } while (0); +} + +static inline enum dma_status +dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) +{ + return DMA_SUCCESS; +} + +static inline void +async_tx_run_dependencies(struct dma_async_tx_descriptor *tx, + struct dma_chan *host_chan) +{ + do { } while (0); +} + +static inline struct dma_chan * +async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, + enum dma_transaction_type tx_type) +{ + return NULL; +} +#endif + +/** + * async_tx_sync_epilog - actions to take if an operation is run synchronously + * @flags: async_tx flags + * @depend_tx: transaction depends on depend_tx + * @cb_fn: function to call when the transaction completes + * @cb_fn_param: parameter to pass to the callback routine + */ +static inline void +async_tx_sync_epilog(unsigned long flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param) +{ + if (cb_fn) + cb_fn(cb_fn_param); + + if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) + async_tx_ack(depend_tx); +} + +void +async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, + enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_xor(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_xor_zero_sum(struct page *dest, struct page **src_list, + unsigned int offset, int src_cnt, size_t len, + u32 *result, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, + unsigned int src_offset, size_t len, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_memset(struct page *dest, int val, unsigned int offset, + size_t len, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); + +struct dma_async_tx_descriptor * +async_trigger_callback(enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_fn_param); +#endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index 7d6c20b654f..3e120587ead 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h @@ -3,9 +3,10 @@ #include -#define MAX_XOR_BLOCKS 5 +#define MAX_XOR_BLOCKS 4 -extern void xor_blocks(unsigned int count, unsigned int bytes, void **ptr); +extern void xor_blocks(unsigned int count, unsigned int bytes, + void *dest, void **srcs); struct xor_block_template { struct xor_block_template *next; -- cgit v1.2.3 From a445685647e825c713175d180ffc8dd54d90589b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Jul 2007 11:56:43 -0700 Subject: raid5: refactor handle_stripe5 and handle_stripe6 (v3) handle_stripe5 and handle_stripe6 have very deep logic paths handling the various states of a stripe_head. By introducing the 'stripe_head_state' and 'r6_state' objects, large portions of the logic can be moved to sub-routines. 'struct stripe_head_state' consumes all of the automatic variables that previously stood alone in handle_stripe5,6. 'struct r6_state' contains the handle_stripe6 specific variables like p_failed and q_failed. One of the nice side effects of the 'stripe_head_state' change is that it allows for further reductions in code duplication between raid5 and raid6. The following new routines are shared between raid5 and raid6: handle_completed_write_requests handle_requests_to_failed_array handle_stripe_expansion Changes: * v2: fixed 'conf->raid_disk-1' for the raid6 'handle_stripe_expansion' path * v3: removed the unused 'dirty' field from struct stripe_head_state * v3: coalesced open coded bi_end_io routines into return_io() Signed-off-by: Dan Williams Acked-By: NeilBrown --- include/linux/raid/raid5.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include') diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index d8286db60b9..b99d354f612 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -145,6 +145,22 @@ struct stripe_head { unsigned long flags; } dev[1]; /* allocated with extra space depending of RAID geometry */ }; + +/* stripe_head_state - collects and tracks the dynamic state of a stripe_head + * for handle_stripe. It is only valid under spin_lock(sh->lock); + */ +struct stripe_head_state { + int syncing, expanding, expanded; + int locked, uptodate, to_read, to_write, failed, written; + int non_overwrite; + int failed_num; +}; + +/* r6_state - extra state data only relevant to r6 */ +struct r6_state { + int p_failed, q_failed, qd_idx, failed_num[2]; +}; + /* Flags */ #define R5_UPTODATE 0 /* page contains current data */ #define R5_LOCKED 1 /* IO has been submitted on "req" */ -- cgit v1.2.3 From 91c00924846a0034020451c280c76baa4299f9dc Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 2 Jan 2007 13:52:30 -0700 Subject: md: raid5_run_ops - run stripe operations outside sh->lock When the raid acceleration work was proposed, Neil laid out the following attack plan: 1/ move the xor and copy operations outside spin_lock(&sh->lock) 2/ find/implement an asynchronous offload api The raid5_run_ops routine uses the asynchronous offload api (async_tx) and the stripe_operations member of a stripe_head to carry out xor+copy operations asynchronously, outside the lock. To perform operations outside the lock a new set of state flags is needed to track new requests, in-flight requests, and completed requests. In this new model handle_stripe is tasked with scanning the stripe_head for work, updating the stripe_operations structure, and finally dropping the lock and calling raid5_run_ops for processing. The following flags outline the requests that handle_stripe can make of raid5_run_ops: STRIPE_OP_BIOFILL - copy data into request buffers to satisfy a read request STRIPE_OP_COMPUTE_BLK - generate a missing block in the cache from the other blocks STRIPE_OP_PREXOR - subtract existing data as part of the read-modify-write process STRIPE_OP_BIODRAIN - copy data out of request buffers to satisfy a write request STRIPE_OP_POSTXOR - recalculate parity for new data that has entered the cache STRIPE_OP_CHECK - verify that the parity is correct STRIPE_OP_IO - submit i/o to the member disks (note this was already performed outside the stripe lock, but it made sense to add it as an operation type The flow is: 1/ handle_stripe sets STRIPE_OP_* in sh->ops.pending 2/ raid5_run_ops reads sh->ops.pending, sets sh->ops.ack, and submits the operation to the async_tx api 3/ async_tx triggers the completion callback routine to set sh->ops.complete and release the stripe 4/ handle_stripe runs again to finish the operation and optionally submit new operations that were previously blocked Note this patch just defines raid5_run_ops, subsequent commits (one per major operation type) modify handle_stripe to take advantage of this routine. Changelog: * removed ops_complete_biodrain in favor of ops_complete_postxor and ops_complete_write. * removed the raid5_run_ops workqueue * call bi_end_io for reads in ops_complete_biofill, saves a call to handle_stripe * explicitly handle the 2-disk raid5 case (xor becomes memcpy), Neil Brown * fix race between async engines and bi_end_io call for reads, Neil Brown * remove unnecessary spin_lock from ops_complete_biofill * remove test_and_set/test_and_clear BUG_ONs, Neil Brown * remove explicit interrupt handling for channel switching, this feature was absorbed (i.e. it is now implicit) by the async_tx api * use return_io in ops_complete_biofill Signed-off-by: Dan Williams Acked-By: NeilBrown --- include/linux/raid/raid5.h | 81 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 78 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index b99d354f612..6fb9d94e6f2 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -116,13 +116,46 @@ * attach a request to an active stripe (add_stripe_bh()) * lockdev attach-buffer unlockdev * handle a stripe (handle_stripe()) - * lockstripe clrSTRIPE_HANDLE ... (lockdev check-buffers unlockdev) .. change-state .. record io needed unlockstripe schedule io + * lockstripe clrSTRIPE_HANDLE ... + * (lockdev check-buffers unlockdev) .. + * change-state .. + * record io/ops needed unlockstripe schedule io/ops * release an active stripe (release_stripe()) * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev * * The refcount counts each thread that have activated the stripe, * plus raid5d if it is handling it, plus one for each active request - * on a cached buffer. + * on a cached buffer, and plus one if the stripe is undergoing stripe + * operations. + * + * Stripe operations are performed outside the stripe lock, + * the stripe operations are: + * -copying data between the stripe cache and user application buffers + * -computing blocks to save a disk access, or to recover a missing block + * -updating the parity on a write operation (reconstruct write and + * read-modify-write) + * -checking parity correctness + * -running i/o to disk + * These operations are carried out by raid5_run_ops which uses the async_tx + * api to (optionally) offload operations to dedicated hardware engines. + * When requesting an operation handle_stripe sets the pending bit for the + * operation and increments the count. raid5_run_ops is then run whenever + * the count is non-zero. + * There are some critical dependencies between the operations that prevent some + * from being requested while another is in flight. + * 1/ Parity check operations destroy the in cache version of the parity block, + * so we prevent parity dependent operations like writes and compute_blocks + * from starting while a check is in progress. Some dma engines can perform + * the check without damaging the parity block, in these cases the parity + * block is re-marked up to date (assuming the check was successful) and is + * not re-read from disk. + * 2/ When a write operation is requested we immediately lock the affected + * blocks, and mark them as not up to date. This causes new read requests + * to be held off, as well as parity checks and compute block operations. + * 3/ Once a compute block operation has been requested handle_stripe treats + * that block as if it is up to date. raid5_run_ops guaruntees that any + * operation that is dependent on the compute block result is initiated after + * the compute block completes. */ struct stripe_head { @@ -136,11 +169,26 @@ struct stripe_head { spinlock_t lock; int bm_seq; /* sequence number for bitmap flushes */ int disks; /* disks in stripe */ + /* stripe_operations + * @pending - pending ops flags (set for request->issue->complete) + * @ack - submitted ops flags (set for issue->complete) + * @complete - completed ops flags (set for complete) + * @target - STRIPE_OP_COMPUTE_BLK target + * @count - raid5_runs_ops is set to run when this is non-zero + */ + struct stripe_operations { + unsigned long pending; + unsigned long ack; + unsigned long complete; + int target; + int count; + u32 zero_sum_result; + } ops; struct r5dev { struct bio req; struct bio_vec vec; struct page *page; - struct bio *toread, *towrite, *written; + struct bio *toread, *read, *towrite, *written; sector_t sector; /* sector of this page */ unsigned long flags; } dev[1]; /* allocated with extra space depending of RAID geometry */ @@ -174,6 +222,15 @@ struct r6_state { #define R5_ReWrite 9 /* have tried to over-write the readerror */ #define R5_Expanded 10 /* This block now has post-expand data */ +#define R5_Wantcompute 11 /* compute_block in progress treat as + * uptodate + */ +#define R5_Wantfill 12 /* dev->toread contains a bio that needs + * filling + */ +#define R5_Wantprexor 13 /* distinguish blocks ready for rmw from + * other "towrites" + */ /* * Write method */ @@ -195,6 +252,24 @@ struct r6_state { #define STRIPE_EXPANDING 9 #define STRIPE_EXPAND_SOURCE 10 #define STRIPE_EXPAND_READY 11 +/* + * Operations flags (in issue order) + */ +#define STRIPE_OP_BIOFILL 0 +#define STRIPE_OP_COMPUTE_BLK 1 +#define STRIPE_OP_PREXOR 2 +#define STRIPE_OP_BIODRAIN 3 +#define STRIPE_OP_POSTXOR 4 +#define STRIPE_OP_CHECK 5 +#define STRIPE_OP_IO 6 + +/* modifiers to the base operations + * STRIPE_OP_MOD_REPAIR_PD - compute the parity block and write it back + * STRIPE_OP_MOD_DMA_CHECK - parity is not corrupted by the check + */ +#define STRIPE_OP_MOD_REPAIR_PD 7 +#define STRIPE_OP_MOD_DMA_CHECK 8 + /* * Plugging: * -- cgit v1.2.3 From f38e12199a94ca458e4f03c5a2c984fb80adadc5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 2 Jan 2007 13:52:30 -0700 Subject: md: handle_stripe5 - add request/completion logic for async compute ops handle_stripe will compute a block when a backing disk has failed, or when it determines it can save a disk read by computing the block from all the other up-to-date blocks. Previously a block would be computed under the lock and subsequent logic in handle_stripe could use the newly up-to-date block. With the raid5_run_ops implementation the compute operation is carried out a later time outside the lock. To preserve the old functionality we take advantage of the dependency chain feature of async_tx to flag the block as R5_Wantcompute and then let other parts of handle_stripe operate on the block as if it were up-to-date. raid5_run_ops guarantees that the block will be ready before it is used in another operation. However, this only works in cases where the compute and the dependent operation are scheduled at the same time. If a previous call to handle_stripe sets the R5_Wantcompute flag there is no facility to pass the async_tx dependency chain across successive calls to raid5_run_ops. The req_compute variable protects against this case. Changelog: * remove the req_compute BUG_ON Signed-off-by: Dan Williams Acked-By: NeilBrown --- include/linux/raid/raid5.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index 6fb9d94e6f2..2293015de1d 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -200,7 +200,7 @@ struct stripe_head { struct stripe_head_state { int syncing, expanding, expanded; int locked, uptodate, to_read, to_write, failed, written; - int non_overwrite; + int compute, req_compute, non_overwrite; int failed_num; }; -- cgit v1.2.3 From b5e98d65d34a1c11a2135ea8a9b2619dbc7216c8 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 2 Jan 2007 13:52:31 -0700 Subject: md: handle_stripe5 - add request/completion logic for async read ops When a read bio is attached to the stripe and the corresponding block is marked R5_UPTODATE, then a read (biofill) operation is scheduled to copy the data from the stripe cache to the bio buffer. handle_stripe flags the blocks to be operated on with the R5_Wantfill flag. If new read requests arrive while raid5_run_ops is running they will not be handled until handle_stripe is scheduled to run again. Changelog: * cleanup to_read and to_fill accounting * do not fail reads that have reached the cache Signed-off-by: Dan Williams Acked-By: NeilBrown --- include/linux/raid/raid5.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index 2293015de1d..93678f57ccb 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -200,7 +200,7 @@ struct stripe_head { struct stripe_head_state { int syncing, expanding, expanded; int locked, uptodate, to_read, to_write, failed, written; - int compute, req_compute, non_overwrite; + int to_fill, compute, req_compute, non_overwrite; int failed_num; }; -- cgit v1.2.3 From c211092313b90f898dec61f35207fc282d1eadc3 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 2 Jan 2007 13:52:26 -0700 Subject: dmaengine: driver for the iop32x, iop33x, and iop13xx raid engines The Intel(R) IOP series of i/o processors integrate an Xscale core with raid acceleration engines. The capabilities per platform are: iop219: (2) copy engines iop321: (2) copy engines (1) xor and block fill engine iop33x: (2) copy and crc32c engines (1) xor, xor zero sum, pq, pq zero sum, and block fill engine iop34x (iop13xx): (2) copy, crc32c, xor, xor zero sum, and block fill engines (1) copy, crc32c, xor, xor zero sum, pq, pq zero sum, and block fill engine The driver supports the features of the async_tx api: * asynchronous notification of operation completion * implicit (interupt triggered) handling of inter-channel transaction dependencies The driver adapts to the platform it is running by two methods. 1/ #include which defines the hardware specific iop_chan_* and iop_desc_* routines as a series of static inline functions 2/ The private platform data attached to the platform_device defines the capabilities of the channels 20070626: Callbacks are run in a tasklet. Given the recent discussion on LKML about killing tasklets in favor of workqueues I did a quick conversion of the driver. Raid5 resync performance dropped from 50MB/s to 30MB/s, so the tasklet implementation remains until a generic softirq interface is available. Changelog: * fixed a slot allocation bug in do_iop13xx_adma_xor that caused too few slots to be requested eventually leading to data corruption * enabled the slot allocation routine to attempt to free slots before returning -ENOMEM * switched the cleanup routine to solely use the software chain and the status register to determine if a descriptor is complete. This is necessary to support other IOP engines that do not have status writeback capability * make the driver iop generic * modified the allocation routines to understand allocating a group of slots for a single operation * added a null xor initialization operation for the xor only channel on iop3xx * support xor operations on buffers larger than the hardware maximum * split the do_* routines into separate prep, src/dest set, submit stages * added async_tx support (dependent operations initiation at cleanup time) * simplified group handling * added interrupt support (callbacks via tasklets) * brought the pending depth inline with ioat (i.e. 4 descriptors) * drop dma mapping methods, suggested by Chris Leech * don't use inline in C files, Adrian Bunk * remove static tasklet declarations * make iop_adma_alloc_slots easier to read and remove chances for a corrupted descriptor chain * fix locking bug in iop_adma_alloc_chan_resources, Benjamin Herrenschmidt * convert capabilities over to dma_cap_mask_t * fixup sparse warnings * add descriptor flush before iop_chan_enable * checkpatch.pl fixes * gpl v2 only correction * move set_src, set_dest, submit to async_tx methods * move group_list and phys to async_tx Cc: Russell King Signed-off-by: Dan Williams --- include/asm-arm/hardware/iop_adma.h | 118 ++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) create mode 100644 include/asm-arm/hardware/iop_adma.h (limited to 'include') diff --git a/include/asm-arm/hardware/iop_adma.h b/include/asm-arm/hardware/iop_adma.h new file mode 100644 index 00000000000..ca8e71f4434 --- /dev/null +++ b/include/asm-arm/hardware/iop_adma.h @@ -0,0 +1,118 @@ +/* + * Copyright © 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef IOP_ADMA_H +#define IOP_ADMA_H +#include +#include +#include + +#define IOP_ADMA_SLOT_SIZE 32 +#define IOP_ADMA_THRESHOLD 4 + +/** + * struct iop_adma_device - internal representation of an ADMA device + * @pdev: Platform device + * @id: HW ADMA Device selector + * @dma_desc_pool: base of DMA descriptor region (DMA address) + * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) + * @common: embedded struct dma_device + */ +struct iop_adma_device { + struct platform_device *pdev; + int id; + dma_addr_t dma_desc_pool; + void *dma_desc_pool_virt; + struct dma_device common; +}; + +/** + * struct iop_adma_chan - internal representation of an ADMA device + * @pending: allows batching of hardware operations + * @completed_cookie: identifier for the most recently completed operation + * @lock: serializes enqueue/dequeue operations to the slot pool + * @mmr_base: memory mapped register base + * @chain: device chain view of the descriptors + * @device: parent device + * @common: common dmaengine channel object members + * @last_used: place holder for allocation to continue from where it left off + * @all_slots: complete domain of slots usable by the channel + * @cleanup_watchdog: workaround missed interrupts on iop3xx + * @slots_allocated: records the actual size of the descriptor slot pool + * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs + */ +struct iop_adma_chan { + int pending; + dma_cookie_t completed_cookie; + spinlock_t lock; /* protects the descriptor slot pool */ + void __iomem *mmr_base; + struct list_head chain; + struct iop_adma_device *device; + struct dma_chan common; + struct iop_adma_desc_slot *last_used; + struct list_head all_slots; + struct timer_list cleanup_watchdog; + int slots_allocated; + struct tasklet_struct irq_tasklet; +}; + +/** + * struct iop_adma_desc_slot - IOP-ADMA software descriptor + * @slot_node: node on the iop_adma_chan.all_slots list + * @chain_node: node on the op_adma_chan.chain list + * @hw_desc: virtual address of the hardware descriptor chain + * @phys: hardware address of the hardware descriptor chain + * @group_head: first operation in a transaction + * @slot_cnt: total slots used in an transaction (group of operations) + * @slots_per_op: number of slots per operation + * @idx: pool index + * @unmap_src_cnt: number of xor sources + * @unmap_len: transaction bytecount + * @async_tx: support for the async_tx api + * @group_list: list of slots that make up a multi-descriptor transaction + * for example transfer lengths larger than the supported hw max + * @xor_check_result: result of zero sum + * @crc32_result: result crc calculation + */ +struct iop_adma_desc_slot { + struct list_head slot_node; + struct list_head chain_node; + void *hw_desc; + struct iop_adma_desc_slot *group_head; + u16 slot_cnt; + u16 slots_per_op; + u16 idx; + u16 unmap_src_cnt; + size_t unmap_len; + struct dma_async_tx_descriptor async_tx; + union { + u32 *xor_check_result; + u32 *crc32_result; + }; +}; + +struct iop_adma_platform_data { + int hw_id; + dma_cap_mask_t cap_mask; + size_t pool_size; +}; + +#define to_iop_sw_desc(addr_hw_desc) \ + container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc) +#define iop_hw_desc_slot_idx(hw_desc, idx) \ + ( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) ) +#endif -- cgit v1.2.3 From 39a8d7d13c113e4a98bfdfc45c7233188e4d715f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 2 Jan 2007 13:52:31 -0700 Subject: iop13xx: surface the iop13xx adma units to the iop-adma driver Adds the platform device definitions and the architecture specific support routines (i.e. register initialization and descriptor formats) for the iop-adma driver. Changelog: * added 'descriptor pool size' to the platform data * add base support for buffer sizes larger than 16MB (hw max) * build error fix from Kirill A. Shutemov * rebase for async_tx changes * add interrupt support * do not call platform register macros in driver code * remove unnecessary ARM assembly statement * checkpatch.pl fixes * gpl v2 only correction Cc: Russell King Signed-off-by: Dan Williams --- include/asm-arm/arch-iop13xx/adma.h | 544 +++++++++++++++++++++++++++++++++ include/asm-arm/arch-iop13xx/iop13xx.h | 38 +-- 2 files changed, 560 insertions(+), 22 deletions(-) create mode 100644 include/asm-arm/arch-iop13xx/adma.h (limited to 'include') diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h new file mode 100644 index 00000000000..04006c1c5fd --- /dev/null +++ b/include/asm-arm/arch-iop13xx/adma.h @@ -0,0 +1,544 @@ +/* + * Copyright(c) 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef _ADMA_H +#define _ADMA_H +#include +#include +#include +#include + +#define ADMA_ACCR(chan) (chan->mmr_base + 0x0) +#define ADMA_ACSR(chan) (chan->mmr_base + 0x4) +#define ADMA_ADAR(chan) (chan->mmr_base + 0x8) +#define ADMA_IIPCR(chan) (chan->mmr_base + 0x18) +#define ADMA_IIPAR(chan) (chan->mmr_base + 0x1c) +#define ADMA_IIPUAR(chan) (chan->mmr_base + 0x20) +#define ADMA_ANDAR(chan) (chan->mmr_base + 0x24) +#define ADMA_ADCR(chan) (chan->mmr_base + 0x28) +#define ADMA_CARMD(chan) (chan->mmr_base + 0x2c) +#define ADMA_ABCR(chan) (chan->mmr_base + 0x30) +#define ADMA_DLADR(chan) (chan->mmr_base + 0x34) +#define ADMA_DUADR(chan) (chan->mmr_base + 0x38) +#define ADMA_SLAR(src, chan) (chan->mmr_base + (0x3c + (src << 3))) +#define ADMA_SUAR(src, chan) (chan->mmr_base + (0x40 + (src << 3))) + +struct iop13xx_adma_src { + u32 src_addr; + union { + u32 upper_src_addr; + struct { + unsigned int pq_upper_src_addr:24; + unsigned int pq_dmlt:8; + }; + }; +}; + +struct iop13xx_adma_desc_ctrl { + unsigned int int_en:1; + unsigned int xfer_dir:2; + unsigned int src_select:4; + unsigned int zero_result:1; + unsigned int block_fill_en:1; + unsigned int crc_gen_en:1; + unsigned int crc_xfer_dis:1; + unsigned int crc_seed_fetch_dis:1; + unsigned int status_write_back_en:1; + unsigned int endian_swap_en:1; + unsigned int reserved0:2; + unsigned int pq_update_xfer_en:1; + unsigned int dual_xor_en:1; + unsigned int pq_xfer_en:1; + unsigned int p_xfer_dis:1; + unsigned int reserved1:10; + unsigned int relax_order_en:1; + unsigned int no_snoop_en:1; +}; + +struct iop13xx_adma_byte_count { + unsigned int byte_count:24; + unsigned int host_if:3; + unsigned int reserved:2; + unsigned int zero_result_err_q:1; + unsigned int zero_result_err:1; + unsigned int tx_complete:1; +}; + +struct iop13xx_adma_desc_hw { + u32 next_desc; + union { + u32 desc_ctrl; + struct iop13xx_adma_desc_ctrl desc_ctrl_field; + }; + union { + u32 crc_addr; + u32 block_fill_data; + u32 q_dest_addr; + }; + union { + u32 byte_count; + struct iop13xx_adma_byte_count byte_count_field; + }; + union { + u32 dest_addr; + u32 p_dest_addr; + }; + union { + u32 upper_dest_addr; + u32 pq_upper_dest_addr; + }; + struct iop13xx_adma_src src[1]; +}; + +struct iop13xx_adma_desc_dual_xor { + u32 next_desc; + u32 desc_ctrl; + u32 reserved; + u32 byte_count; + u32 h_dest_addr; + u32 h_upper_dest_addr; + u32 src0_addr; + u32 upper_src0_addr; + u32 src1_addr; + u32 upper_src1_addr; + u32 h_src_addr; + u32 h_upper_src_addr; + u32 d_src_addr; + u32 d_upper_src_addr; + u32 d_dest_addr; + u32 d_upper_dest_addr; +}; + +struct iop13xx_adma_desc_pq_update { + u32 next_desc; + u32 desc_ctrl; + u32 reserved; + u32 byte_count; + u32 p_dest_addr; + u32 p_upper_dest_addr; + u32 src0_addr; + u32 upper_src0_addr; + u32 src1_addr; + u32 upper_src1_addr; + u32 p_src_addr; + u32 p_upper_src_addr; + u32 q_src_addr; + struct { + unsigned int q_upper_src_addr:24; + unsigned int q_dmlt:8; + }; + u32 q_dest_addr; + u32 q_upper_dest_addr; +}; + +static inline int iop_adma_get_max_xor(void) +{ + return 16; +} + +static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan) +{ + return __raw_readl(ADMA_ADAR(chan)); +} + +static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan, + u32 next_desc_addr) +{ + __raw_writel(next_desc_addr, ADMA_ANDAR(chan)); +} + +#define ADMA_STATUS_BUSY (1 << 13) + +static inline char iop_chan_is_busy(struct iop_adma_chan *chan) +{ + if (__raw_readl(ADMA_ACSR(chan)) & + ADMA_STATUS_BUSY) + return 1; + else + return 0; +} + +static inline int +iop_chan_get_desc_align(struct iop_adma_chan *chan, int num_slots) +{ + return 1; +} +#define iop_desc_is_aligned(x, y) 1 + +static inline int +iop_chan_memcpy_slot_count(size_t len, int *slots_per_op) +{ + *slots_per_op = 1; + return 1; +} + +#define iop_chan_interrupt_slot_count(s, c) iop_chan_memcpy_slot_count(0, s) + +static inline int +iop_chan_memset_slot_count(size_t len, int *slots_per_op) +{ + *slots_per_op = 1; + return 1; +} + +static inline int +iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) +{ + int num_slots; + /* slots_to_find = 1 for basic descriptor + 1 per 4 sources above 1 + * (1 source => 8 bytes) (1 slot => 32 bytes) + */ + num_slots = 1 + (((src_cnt - 1) << 3) >> 5); + if (((src_cnt - 1) << 3) & 0x1f) + num_slots++; + + *slots_per_op = num_slots; + + return num_slots; +} + +#define ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024) +#define IOP_ADMA_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT +#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT +#define IOP_ADMA_XOR_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT +#define iop_chan_zero_sum_slot_count(l, s, o) iop_chan_xor_slot_count(l, s, o) + +static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + return hw_desc->dest_addr; +} + +static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + return hw_desc->byte_count_field.byte_count; +} + +static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan, + int src_idx) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + return hw_desc->src[src_idx].src_addr; +} + +static inline u32 iop_desc_get_src_count(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + return hw_desc->desc_ctrl_field.src_select + 1; +} + +static inline void +iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop13xx_adma_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ + u_desc_ctrl.field.int_en = int_en; + hw_desc->desc_ctrl = u_desc_ctrl.value; + hw_desc->crc_addr = 0; +} + +static inline void +iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop13xx_adma_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ + u_desc_ctrl.field.block_fill_en = 1; + u_desc_ctrl.field.int_en = int_en; + hw_desc->desc_ctrl = u_desc_ctrl.value; + hw_desc->crc_addr = 0; +} + +/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ +static inline void +iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop13xx_adma_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + u_desc_ctrl.field.src_select = src_cnt - 1; + u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ + u_desc_ctrl.field.int_en = int_en; + hw_desc->desc_ctrl = u_desc_ctrl.value; + hw_desc->crc_addr = 0; + +} +#define iop_desc_init_null_xor(d, s, i) iop_desc_init_xor(d, s, i) + +/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */ +static inline int +iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop13xx_adma_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + u_desc_ctrl.field.src_select = src_cnt - 1; + u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ + u_desc_ctrl.field.zero_result = 1; + u_desc_ctrl.field.status_write_back_en = 1; + u_desc_ctrl.field.int_en = int_en; + hw_desc->desc_ctrl = u_desc_ctrl.value; + hw_desc->crc_addr = 0; + + return 1; +} + +static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan, + u32 byte_count) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + hw_desc->byte_count = byte_count; +} + +static inline void +iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) +{ + int slots_per_op = desc->slots_per_op; + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; + int i = 0; + + if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { + hw_desc->byte_count = len; + } else { + do { + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; + len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; + i += slots_per_op; + } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); + + if (len) { + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->byte_count = len; + } + } +} + + +static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan, + dma_addr_t addr) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + hw_desc->dest_addr = addr; + hw_desc->upper_dest_addr = 0; +} + +static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, + dma_addr_t addr) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + hw_desc->src[0].src_addr = addr; + hw_desc->src[0].upper_src_addr = 0; +} + +static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, + int src_idx, dma_addr_t addr) +{ + int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; + int i = 0; + + do { + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->src[src_idx].src_addr = addr; + iter->src[src_idx].upper_src_addr = 0; + slot_cnt -= slots_per_op; + if (slot_cnt) { + i += slots_per_op; + addr += IOP_ADMA_XOR_MAX_BYTE_COUNT; + } + } while (slot_cnt); +} + +static inline void +iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + iop_desc_init_memcpy(desc, 1); + iop_desc_set_byte_count(desc, chan, 0); + iop_desc_set_dest_addr(desc, chan, 0); + iop_desc_set_memcpy_src_addr(desc, 0); +} + +#define iop_desc_set_zero_sum_src_addr iop_desc_set_xor_src_addr + +static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, + u32 next_desc_addr) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + BUG_ON(hw_desc->next_desc); + hw_desc->next_desc = next_desc_addr; +} + +static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + return hw_desc->next_desc; +} + +static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + hw_desc->next_desc = 0; +} + +static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, + u32 val) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + hw_desc->block_fill_data = val; +} + +static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + struct iop13xx_adma_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; + struct iop13xx_adma_byte_count byte_count = hw_desc->byte_count_field; + + BUG_ON(!(byte_count.tx_complete && desc_ctrl.zero_result)); + + if (desc_ctrl.pq_xfer_en) + return byte_count.zero_result_err_q; + else + return byte_count.zero_result_err; +} + +static inline void iop_chan_append(struct iop_adma_chan *chan) +{ + u32 adma_accr; + + adma_accr = __raw_readl(ADMA_ACCR(chan)); + adma_accr |= 0x2; + __raw_writel(adma_accr, ADMA_ACCR(chan)); +} + +static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan) +{ + do { } while (0); +} + +static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) +{ + return __raw_readl(ADMA_ACSR(chan)); +} + +static inline void iop_chan_disable(struct iop_adma_chan *chan) +{ + u32 adma_chan_ctrl = __raw_readl(ADMA_ACCR(chan)); + adma_chan_ctrl &= ~0x1; + __raw_writel(adma_chan_ctrl, ADMA_ACCR(chan)); +} + +static inline void iop_chan_enable(struct iop_adma_chan *chan) +{ + u32 adma_chan_ctrl; + + adma_chan_ctrl = __raw_readl(ADMA_ACCR(chan)); + adma_chan_ctrl |= 0x1; + __raw_writel(adma_chan_ctrl, ADMA_ACCR(chan)); +} + +static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan) +{ + u32 status = __raw_readl(ADMA_ACSR(chan)); + status &= (1 << 12); + __raw_writel(status, ADMA_ACSR(chan)); +} + +static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan) +{ + u32 status = __raw_readl(ADMA_ACSR(chan)); + status &= (1 << 11); + __raw_writel(status, ADMA_ACSR(chan)); +} + +static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan) +{ + u32 status = __raw_readl(ADMA_ACSR(chan)); + status &= (1 << 9) | (1 << 5) | (1 << 4) | (1 << 3); + __raw_writel(status, ADMA_ACSR(chan)); +} + +static inline int +iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan) +{ + return test_bit(9, &status); +} + +static inline int +iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan) +{ + return test_bit(5, &status); +} + +static inline int +iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan) +{ + return test_bit(4, &status); +} + +static inline int +iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan) +{ + return test_bit(3, &status); +} + +static inline int +iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan) +{ + return 0; +} + +static inline int +iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan) +{ + return 0; +} + +static inline int +iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan) +{ + return 0; +} + +#endif /* _ADMA_H */ diff --git a/include/asm-arm/arch-iop13xx/iop13xx.h b/include/asm-arm/arch-iop13xx/iop13xx.h index e6736c3d1f7..d4e4f828577 100644 --- a/include/asm-arm/arch-iop13xx/iop13xx.h +++ b/include/asm-arm/arch-iop13xx/iop13xx.h @@ -166,12 +166,22 @@ static inline int iop13xx_cpu_id(void) #define IOP13XX_INIT_I2C_1 (1 << 1) #define IOP13XX_INIT_I2C_2 (1 << 2) -#define IQ81340_NUM_UART 2 -#define IQ81340_NUM_I2C 3 -#define IQ81340_NUM_PHYS_MAP_FLASH 1 -#define IQ81340_MAX_PLAT_DEVICES (IQ81340_NUM_UART +\ - IQ81340_NUM_I2C +\ - IQ81340_NUM_PHYS_MAP_FLASH) +/* ADMA selection flags */ +/* INIT_ADMA_DEFAULT = Rely on CONFIG_IOP13XX_ADMA* */ +#define IOP13XX_INIT_ADMA_DEFAULT (0) +#define IOP13XX_INIT_ADMA_0 (1 << 0) +#define IOP13XX_INIT_ADMA_1 (1 << 1) +#define IOP13XX_INIT_ADMA_2 (1 << 2) + +/* Platform devices */ +#define IQ81340_NUM_UART 2 +#define IQ81340_NUM_I2C 3 +#define IQ81340_NUM_PHYS_MAP_FLASH 1 +#define IQ81340_NUM_ADMA 3 +#define IQ81340_MAX_PLAT_DEVICES (IQ81340_NUM_UART + \ + IQ81340_NUM_I2C + \ + IQ81340_NUM_PHYS_MAP_FLASH + \ + IQ81340_NUM_ADMA) /*========================== PMMR offsets for key registers ============*/ #define IOP13XX_ATU0_PMMR_OFFSET 0x00048000 @@ -444,22 +454,6 @@ static inline int iop13xx_cpu_id(void) /*==============================ADMA UNITS===============================*/ #define IOP13XX_ADMA_PHYS_BASE(chan) IOP13XX_REG_ADDR32_PHYS((chan << 9)) #define IOP13XX_ADMA_UPPER_PA(chan) (IOP13XX_ADMA_PHYS_BASE(chan) + 0xc0) -#define IOP13XX_ADMA_OFFSET(chan, ofs) IOP13XX_REG_ADDR32((chan << 9) + (ofs)) - -#define IOP13XX_ADMA_ACCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x0) -#define IOP13XX_ADMA_ACSR(chan) IOP13XX_ADMA_OFFSET(chan, 0x4) -#define IOP13XX_ADMA_ADAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x8) -#define IOP13XX_ADMA_IIPCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x18) -#define IOP13XX_ADMA_IIPAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x1c) -#define IOP13XX_ADMA_IIPUAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x20) -#define IOP13XX_ADMA_ANDAR(chan) IOP13XX_ADMA_OFFSET(chan, 0x24) -#define IOP13XX_ADMA_ADCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x28) -#define IOP13XX_ADMA_CARMD(chan) IOP13XX_ADMA_OFFSET(chan, 0x2c) -#define IOP13XX_ADMA_ABCR(chan) IOP13XX_ADMA_OFFSET(chan, 0x30) -#define IOP13XX_ADMA_DLADR(chan) IOP13XX_ADMA_OFFSET(chan, 0x34) -#define IOP13XX_ADMA_DUADR(chan) IOP13XX_ADMA_OFFSET(chan, 0x38) -#define IOP13XX_ADMA_SLAR(src, chan) IOP13XX_ADMA_OFFSET(chan, 0x3c + (src <<3)) -#define IOP13XX_ADMA_SUAR(src, chan) IOP13XX_ADMA_OFFSET(chan, 0x40 + (src <<3)) /*==============================XSI BRIDGE===============================*/ #define IOP13XX_XBG_BECSR IOP13XX_REG_ADDR32(0x178c) -- cgit v1.2.3 From 2492c845189a961a92d8537a44d233e8e1e45c6d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 2 Jan 2007 13:52:31 -0700 Subject: iop3xx: surface the iop3xx DMA and AAU units to the iop-adma driver Adds the platform device definitions and the architecture specific support routines (i.e. register initialization and descriptor formats) for the iop-adma driver. Changelog: * add support for > 1k zero sum buffer sizes * added dma/aau platform devices to iq80321 and iq80332 setup * fixed the calculation in iop_desc_is_aligned * support xor buffer sizes larger than 16MB * fix places where software descriptors are assumed to be contiguous, only hardware descriptors are contiguous for up to a PAGE_SIZE buffer size * convert to async_tx * add interrupt support * add platform devices for 80219 boards * do not call platform register macros in driver code * remove switch() statements for compatible register offsets/layouts * change over to bitmap based capabilities * remove unnecessary ARM assembly statement * checkpatch.pl fixes * gpl v2 only correction * phys move to dma_async_tx_descriptor Cc: Russell King Signed-off-by: Dan Williams --- include/asm-arm/arch-iop32x/adma.h | 5 + include/asm-arm/arch-iop33x/adma.h | 5 + include/asm-arm/hardware/iop3xx-adma.h | 892 +++++++++++++++++++++++++++++++++ include/asm-arm/hardware/iop3xx.h | 68 +-- 4 files changed, 910 insertions(+), 60 deletions(-) create mode 100644 include/asm-arm/arch-iop32x/adma.h create mode 100644 include/asm-arm/arch-iop33x/adma.h create mode 100644 include/asm-arm/hardware/iop3xx-adma.h (limited to 'include') diff --git a/include/asm-arm/arch-iop32x/adma.h b/include/asm-arm/arch-iop32x/adma.h new file mode 100644 index 00000000000..5ed92037dd1 --- /dev/null +++ b/include/asm-arm/arch-iop32x/adma.h @@ -0,0 +1,5 @@ +#ifndef IOP32X_ADMA_H +#define IOP32X_ADMA_H +#include +#endif + diff --git a/include/asm-arm/arch-iop33x/adma.h b/include/asm-arm/arch-iop33x/adma.h new file mode 100644 index 00000000000..4b92f795f90 --- /dev/null +++ b/include/asm-arm/arch-iop33x/adma.h @@ -0,0 +1,5 @@ +#ifndef IOP33X_ADMA_H +#define IOP33X_ADMA_H +#include +#endif + diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h new file mode 100644 index 00000000000..10834b54f68 --- /dev/null +++ b/include/asm-arm/hardware/iop3xx-adma.h @@ -0,0 +1,892 @@ +/* + * Copyright © 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef _ADMA_H +#define _ADMA_H +#include +#include +#include +#include + +/* Memory copy units */ +#define DMA_CCR(chan) (chan->mmr_base + 0x0) +#define DMA_CSR(chan) (chan->mmr_base + 0x4) +#define DMA_DAR(chan) (chan->mmr_base + 0xc) +#define DMA_NDAR(chan) (chan->mmr_base + 0x10) +#define DMA_PADR(chan) (chan->mmr_base + 0x14) +#define DMA_PUADR(chan) (chan->mmr_base + 0x18) +#define DMA_LADR(chan) (chan->mmr_base + 0x1c) +#define DMA_BCR(chan) (chan->mmr_base + 0x20) +#define DMA_DCR(chan) (chan->mmr_base + 0x24) + +/* Application accelerator unit */ +#define AAU_ACR(chan) (chan->mmr_base + 0x0) +#define AAU_ASR(chan) (chan->mmr_base + 0x4) +#define AAU_ADAR(chan) (chan->mmr_base + 0x8) +#define AAU_ANDAR(chan) (chan->mmr_base + 0xc) +#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2))) +#define AAU_DAR(chan) (chan->mmr_base + 0x20) +#define AAU_ABCR(chan) (chan->mmr_base + 0x24) +#define AAU_ADCR(chan) (chan->mmr_base + 0x28) +#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2))) +#define AAU_EDCR0_IDX 8 +#define AAU_EDCR1_IDX 17 +#define AAU_EDCR2_IDX 26 + +#define DMA0_ID 0 +#define DMA1_ID 1 +#define AAU_ID 2 + +struct iop3xx_aau_desc_ctrl { + unsigned int int_en:1; + unsigned int blk1_cmd_ctrl:3; + unsigned int blk2_cmd_ctrl:3; + unsigned int blk3_cmd_ctrl:3; + unsigned int blk4_cmd_ctrl:3; + unsigned int blk5_cmd_ctrl:3; + unsigned int blk6_cmd_ctrl:3; + unsigned int blk7_cmd_ctrl:3; + unsigned int blk8_cmd_ctrl:3; + unsigned int blk_ctrl:2; + unsigned int dual_xor_en:1; + unsigned int tx_complete:1; + unsigned int zero_result_err:1; + unsigned int zero_result_en:1; + unsigned int dest_write_en:1; +}; + +struct iop3xx_aau_e_desc_ctrl { + unsigned int reserved:1; + unsigned int blk1_cmd_ctrl:3; + unsigned int blk2_cmd_ctrl:3; + unsigned int blk3_cmd_ctrl:3; + unsigned int blk4_cmd_ctrl:3; + unsigned int blk5_cmd_ctrl:3; + unsigned int blk6_cmd_ctrl:3; + unsigned int blk7_cmd_ctrl:3; + unsigned int blk8_cmd_ctrl:3; + unsigned int reserved2:7; +}; + +struct iop3xx_dma_desc_ctrl { + unsigned int pci_transaction:4; + unsigned int int_en:1; + unsigned int dac_cycle_en:1; + unsigned int mem_to_mem_en:1; + unsigned int crc_data_tx_en:1; + unsigned int crc_gen_en:1; + unsigned int crc_seed_dis:1; + unsigned int reserved:21; + unsigned int crc_tx_complete:1; +}; + +struct iop3xx_desc_dma { + u32 next_desc; + union { + u32 pci_src_addr; + u32 pci_dest_addr; + u32 src_addr; + }; + union { + u32 upper_pci_src_addr; + u32 upper_pci_dest_addr; + }; + union { + u32 local_pci_src_addr; + u32 local_pci_dest_addr; + u32 dest_addr; + }; + u32 byte_count; + union { + u32 desc_ctrl; + struct iop3xx_dma_desc_ctrl desc_ctrl_field; + }; + u32 crc_addr; +}; + +struct iop3xx_desc_aau { + u32 next_desc; + u32 src[4]; + u32 dest_addr; + u32 byte_count; + union { + u32 desc_ctrl; + struct iop3xx_aau_desc_ctrl desc_ctrl_field; + }; + union { + u32 src_addr; + u32 e_desc_ctrl; + struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field; + } src_edc[31]; +}; + +struct iop3xx_aau_gfmr { + unsigned int gfmr1:8; + unsigned int gfmr2:8; + unsigned int gfmr3:8; + unsigned int gfmr4:8; +}; + +struct iop3xx_desc_pq_xor { + u32 next_desc; + u32 src[3]; + union { + u32 data_mult1; + struct iop3xx_aau_gfmr data_mult1_field; + }; + u32 dest_addr; + u32 byte_count; + union { + u32 desc_ctrl; + struct iop3xx_aau_desc_ctrl desc_ctrl_field; + }; + union { + u32 src_addr; + u32 e_desc_ctrl; + struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field; + u32 data_multiplier; + struct iop3xx_aau_gfmr data_mult_field; + u32 reserved; + } src_edc_gfmr[19]; +}; + +struct iop3xx_desc_dual_xor { + u32 next_desc; + u32 src0_addr; + u32 src1_addr; + u32 h_src_addr; + u32 d_src_addr; + u32 h_dest_addr; + u32 byte_count; + union { + u32 desc_ctrl; + struct iop3xx_aau_desc_ctrl desc_ctrl_field; + }; + u32 d_dest_addr; +}; + +union iop3xx_desc { + struct iop3xx_desc_aau *aau; + struct iop3xx_desc_dma *dma; + struct iop3xx_desc_pq_xor *pq_xor; + struct iop3xx_desc_dual_xor *dual_xor; + void *ptr; +}; + +static inline int iop_adma_get_max_xor(void) +{ + return 32; +} + +static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan) +{ + int id = chan->device->id; + + switch (id) { + case DMA0_ID: + case DMA1_ID: + return __raw_readl(DMA_DAR(chan)); + case AAU_ID: + return __raw_readl(AAU_ADAR(chan)); + default: + BUG(); + } + return 0; +} + +static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan, + u32 next_desc_addr) +{ + int id = chan->device->id; + + switch (id) { + case DMA0_ID: + case DMA1_ID: + __raw_writel(next_desc_addr, DMA_NDAR(chan)); + break; + case AAU_ID: + __raw_writel(next_desc_addr, AAU_ANDAR(chan)); + break; + } + +} + +#define IOP_ADMA_STATUS_BUSY (1 << 10) +#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024) +#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024) +#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024) + +static inline int iop_chan_is_busy(struct iop_adma_chan *chan) +{ + u32 status = __raw_readl(DMA_CSR(chan)); + return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0; +} + +static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc, + int num_slots) +{ + /* num_slots will only ever be 1, 2, 4, or 8 */ + return (desc->idx & (num_slots - 1)) ? 0 : 1; +} + +/* to do: support large (i.e. > hw max) buffer sizes */ +static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op) +{ + *slots_per_op = 1; + return 1; +} + +/* to do: support large (i.e. > hw max) buffer sizes */ +static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op) +{ + *slots_per_op = 1; + return 1; +} + +static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt, + int *slots_per_op) +{ + const static int slot_count_table[] = { 0, + 1, 1, 1, 1, /* 01 - 04 */ + 2, 2, 2, 2, /* 05 - 08 */ + 4, 4, 4, 4, /* 09 - 12 */ + 4, 4, 4, 4, /* 13 - 16 */ + 8, 8, 8, 8, /* 17 - 20 */ + 8, 8, 8, 8, /* 21 - 24 */ + 8, 8, 8, 8, /* 25 - 28 */ + 8, 8, 8, 8, /* 29 - 32 */ + }; + *slots_per_op = slot_count_table[src_cnt]; + return *slots_per_op; +} + +static inline int +iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan) +{ + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + return iop_chan_memcpy_slot_count(0, slots_per_op); + case AAU_ID: + return iop3xx_aau_xor_slot_count(0, 2, slots_per_op); + default: + BUG(); + } + return 0; +} + +static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, + int *slots_per_op) +{ + int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op); + + if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT) + return slot_cnt; + + len -= IOP_ADMA_XOR_MAX_BYTE_COUNT; + while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) { + len -= IOP_ADMA_XOR_MAX_BYTE_COUNT; + slot_cnt += *slots_per_op; + } + + if (len) + slot_cnt += *slots_per_op; + + return slot_cnt; +} + +/* zero sum on iop3xx is limited to 1k at a time so it requires multiple + * descriptors + */ +static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, + int *slots_per_op) +{ + int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op); + + if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) + return slot_cnt; + + len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; + while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { + len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; + slot_cnt += *slots_per_op; + } + + if (len) + slot_cnt += *slots_per_op; + + return slot_cnt; +} + +static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; + + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + return hw_desc.dma->dest_addr; + case AAU_ID: + return hw_desc.aau->dest_addr; + default: + BUG(); + } + return 0; +} + +static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; + + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + return hw_desc.dma->byte_count; + case AAU_ID: + return hw_desc.aau->byte_count; + default: + BUG(); + } + return 0; +} + +/* translate the src_idx to a descriptor word index */ +static inline int __desc_idx(int src_idx) +{ + const static int desc_idx_table[] = { 0, 0, 0, 0, + 0, 1, 2, 3, + 5, 6, 7, 8, + 9, 10, 11, 12, + 14, 15, 16, 17, + 18, 19, 20, 21, + 23, 24, 25, 26, + 27, 28, 29, 30, + }; + + return desc_idx_table[src_idx]; +} + +static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan, + int src_idx) +{ + union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; + + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + return hw_desc.dma->src_addr; + case AAU_ID: + break; + default: + BUG(); + } + + if (src_idx < 4) + return hw_desc.aau->src[src_idx]; + else + return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr; +} + +static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc, + int src_idx, dma_addr_t addr) +{ + if (src_idx < 4) + hw_desc->src[src_idx] = addr; + else + hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr; +} + +static inline void +iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en) +{ + struct iop3xx_desc_dma *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop3xx_dma_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + u_desc_ctrl.field.mem_to_mem_en = 1; + u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */ + u_desc_ctrl.field.int_en = int_en; + hw_desc->desc_ctrl = u_desc_ctrl.value; + hw_desc->upper_pci_src_addr = 0; + hw_desc->crc_addr = 0; +} + +static inline void +iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en) +{ + struct iop3xx_desc_aau *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop3xx_aau_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */ + u_desc_ctrl.field.dest_write_en = 1; + u_desc_ctrl.field.int_en = int_en; + hw_desc->desc_ctrl = u_desc_ctrl.value; +} + +static inline u32 +iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en) +{ + int i, shift; + u32 edcr; + union { + u32 value; + struct iop3xx_aau_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + switch (src_cnt) { + case 25 ... 32: + u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ + edcr = 0; + shift = 1; + for (i = 24; i < src_cnt; i++) { + edcr |= (1 << shift); + shift += 3; + } + hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr; + src_cnt = 24; + /* fall through */ + case 17 ... 24: + if (!u_desc_ctrl.field.blk_ctrl) { + hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; + u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ + } + edcr = 0; + shift = 1; + for (i = 16; i < src_cnt; i++) { + edcr |= (1 << shift); + shift += 3; + } + hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr; + src_cnt = 16; + /* fall through */ + case 9 ... 16: + if (!u_desc_ctrl.field.blk_ctrl) + u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ + edcr = 0; + shift = 1; + for (i = 8; i < src_cnt; i++) { + edcr |= (1 << shift); + shift += 3; + } + hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr; + src_cnt = 8; + /* fall through */ + case 2 ... 8: + shift = 1; + for (i = 0; i < src_cnt; i++) { + u_desc_ctrl.value |= (1 << shift); + shift += 3; + } + + if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4) + u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ + } + + u_desc_ctrl.field.dest_write_en = 1; + u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */ + u_desc_ctrl.field.int_en = int_en; + hw_desc->desc_ctrl = u_desc_ctrl.value; + + return u_desc_ctrl.value; +} + +static inline void +iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) +{ + iop3xx_desc_init_xor(desc->hw_desc, src_cnt, int_en); +} + +/* return the number of operations */ +static inline int +iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) +{ + int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; + struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter; + union { + u32 value; + struct iop3xx_aau_desc_ctrl field; + } u_desc_ctrl; + int i, j; + + hw_desc = desc->hw_desc; + + for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0; + i += slots_per_op, j++) { + iter = iop_hw_desc_slot_idx(hw_desc, i); + u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, int_en); + u_desc_ctrl.field.dest_write_en = 0; + u_desc_ctrl.field.zero_result_en = 1; + u_desc_ctrl.field.int_en = int_en; + iter->desc_ctrl = u_desc_ctrl.value; + + /* for the subsequent descriptors preserve the store queue + * and chain them together + */ + if (i) { + prev_hw_desc = + iop_hw_desc_slot_idx(hw_desc, i - slots_per_op); + prev_hw_desc->next_desc = + (u32) (desc->async_tx.phys + (i << 5)); + } + } + + return j; +} + +static inline void +iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en) +{ + struct iop3xx_desc_aau *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop3xx_aau_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + switch (src_cnt) { + case 25 ... 32: + u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ + hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; + /* fall through */ + case 17 ... 24: + if (!u_desc_ctrl.field.blk_ctrl) { + hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; + u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ + } + hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0; + /* fall through */ + case 9 ... 16: + if (!u_desc_ctrl.field.blk_ctrl) + u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ + hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0; + /* fall through */ + case 1 ... 8: + if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4) + u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ + } + + u_desc_ctrl.field.dest_write_en = 0; + u_desc_ctrl.field.int_en = int_en; + hw_desc->desc_ctrl = u_desc_ctrl.value; +} + +static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan, + u32 byte_count) +{ + union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; + + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + hw_desc.dma->byte_count = byte_count; + break; + case AAU_ID: + hw_desc.aau->byte_count = byte_count; + break; + default: + BUG(); + } +} + +static inline void +iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; + + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + iop_desc_init_memcpy(desc, 1); + hw_desc.dma->byte_count = 0; + hw_desc.dma->dest_addr = 0; + hw_desc.dma->src_addr = 0; + break; + case AAU_ID: + iop_desc_init_null_xor(desc, 2, 1); + hw_desc.aau->byte_count = 0; + hw_desc.aau->dest_addr = 0; + hw_desc.aau->src[0] = 0; + hw_desc.aau->src[1] = 0; + break; + default: + BUG(); + } +} + +static inline void +iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) +{ + int slots_per_op = desc->slots_per_op; + struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; + int i = 0; + + if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { + hw_desc->byte_count = len; + } else { + do { + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; + len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; + i += slots_per_op; + } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); + + if (len) { + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->byte_count = len; + } + } +} + +static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan, + dma_addr_t addr) +{ + union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; + + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + hw_desc.dma->dest_addr = addr; + break; + case AAU_ID: + hw_desc.aau->dest_addr = addr; + break; + default: + BUG(); + } +} + +static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, + dma_addr_t addr) +{ + struct iop3xx_desc_dma *hw_desc = desc->hw_desc; + hw_desc->src_addr = addr; +} + +static inline void +iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx, + dma_addr_t addr) +{ + + struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; + int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; + int i; + + for (i = 0; (slot_cnt -= slots_per_op) >= 0; + i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) { + iter = iop_hw_desc_slot_idx(hw_desc, i); + iop3xx_aau_desc_set_src_addr(iter, src_idx, addr); + } +} + +static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, + int src_idx, dma_addr_t addr) +{ + + struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; + int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; + int i; + + for (i = 0; (slot_cnt -= slots_per_op) >= 0; + i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) { + iter = iop_hw_desc_slot_idx(hw_desc, i); + iop3xx_aau_desc_set_src_addr(iter, src_idx, addr); + } +} + +static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, + u32 next_desc_addr) +{ + /* hw_desc->next_desc is the same location for all channels */ + union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; + BUG_ON(hw_desc.dma->next_desc); + hw_desc.dma->next_desc = next_desc_addr; +} + +static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc) +{ + /* hw_desc->next_desc is the same location for all channels */ + union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; + return hw_desc.dma->next_desc; +} + +static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc) +{ + /* hw_desc->next_desc is the same location for all channels */ + union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; + hw_desc.dma->next_desc = 0; +} + +static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, + u32 val) +{ + struct iop3xx_desc_aau *hw_desc = desc->hw_desc; + hw_desc->src[0] = val; +} + +static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) +{ + struct iop3xx_desc_aau *hw_desc = desc->hw_desc; + struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; + + BUG_ON(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); + return desc_ctrl.zero_result_err; +} + +static inline void iop_chan_append(struct iop_adma_chan *chan) +{ + u32 dma_chan_ctrl; + /* workaround dropped interrupts on 3xx */ + mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3)); + + dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); + dma_chan_ctrl |= 0x2; + __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); +} + +static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan) +{ + if (!busy) + del_timer(&chan->cleanup_watchdog); +} + +static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) +{ + return __raw_readl(DMA_CSR(chan)); +} + +static inline void iop_chan_disable(struct iop_adma_chan *chan) +{ + u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); + dma_chan_ctrl &= ~1; + __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); +} + +static inline void iop_chan_enable(struct iop_adma_chan *chan) +{ + u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); + + dma_chan_ctrl |= 1; + __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); +} + +static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan) +{ + u32 status = __raw_readl(DMA_CSR(chan)); + status &= (1 << 9); + __raw_writel(status, DMA_CSR(chan)); +} + +static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan) +{ + u32 status = __raw_readl(DMA_CSR(chan)); + status &= (1 << 8); + __raw_writel(status, DMA_CSR(chan)); +} + +static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan) +{ + u32 status = __raw_readl(DMA_CSR(chan)); + + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1); + break; + case AAU_ID: + status &= (1 << 5); + break; + default: + BUG(); + } + + __raw_writel(status, DMA_CSR(chan)); +} + +static inline int +iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan) +{ + return 0; +} + +static inline int +iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan) +{ + return 0; +} + +static inline int +iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan) +{ + return 0; +} + +static inline int +iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan) +{ + return test_bit(5, &status); +} + +static inline int +iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan) +{ + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + return test_bit(2, &status); + default: + return 0; + } +} + +static inline int +iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan) +{ + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + return test_bit(3, &status); + default: + return 0; + } +} + +static inline int +iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan) +{ + switch (chan->device->id) { + case DMA0_ID: + case DMA1_ID: + return test_bit(1, &status); + default: + return 0; + } +} +#endif /* _ADMA_H */ diff --git a/include/asm-arm/hardware/iop3xx.h b/include/asm-arm/hardware/iop3xx.h index 63feceb7ede..81ca5d3e2bf 100644 --- a/include/asm-arm/hardware/iop3xx.h +++ b/include/asm-arm/hardware/iop3xx.h @@ -144,24 +144,9 @@ extern int init_atu; #define IOP3XX_IAR (volatile u32 *)IOP3XX_REG_ADDR(0x0380) /* DMA Controller */ -#define IOP3XX_DMA0_CCR (volatile u32 *)IOP3XX_REG_ADDR(0x0400) -#define IOP3XX_DMA0_CSR (volatile u32 *)IOP3XX_REG_ADDR(0x0404) -#define IOP3XX_DMA0_DAR (volatile u32 *)IOP3XX_REG_ADDR(0x040c) -#define IOP3XX_DMA0_NDAR (volatile u32 *)IOP3XX_REG_ADDR(0x0410) -#define IOP3XX_DMA0_PADR (volatile u32 *)IOP3XX_REG_ADDR(0x0414) -#define IOP3XX_DMA0_PUADR (volatile u32 *)IOP3XX_REG_ADDR(0x0418) -#define IOP3XX_DMA0_LADR (volatile u32 *)IOP3XX_REG_ADDR(0x041c) -#define IOP3XX_DMA0_BCR (volatile u32 *)IOP3XX_REG_ADDR(0x0420) -#define IOP3XX_DMA0_DCR (volatile u32 *)IOP3XX_REG_ADDR(0x0424) -#define IOP3XX_DMA1_CCR (volatile u32 *)IOP3XX_REG_ADDR(0x0440) -#define IOP3XX_DMA1_CSR (volatile u32 *)IOP3XX_REG_ADDR(0x0444) -#define IOP3XX_DMA1_DAR (volatile u32 *)IOP3XX_REG_ADDR(0x044c) -#define IOP3XX_DMA1_NDAR (volatile u32 *)IOP3XX_REG_ADDR(0x0450) -#define IOP3XX_DMA1_PADR (volatile u32 *)IOP3XX_REG_ADDR(0x0454) -#define IOP3XX_DMA1_PUADR (volatile u32 *)IOP3XX_REG_ADDR(0x0458) -#define IOP3XX_DMA1_LADR (volatile u32 *)IOP3XX_REG_ADDR(0x045c) -#define IOP3XX_DMA1_BCR (volatile u32 *)IOP3XX_REG_ADDR(0x0460) -#define IOP3XX_DMA1_DCR (volatile u32 *)IOP3XX_REG_ADDR(0x0464) +#define IOP3XX_DMA_PHYS_BASE(chan) (IOP3XX_PERIPHERAL_PHYS_BASE + \ + (0x400 + (chan << 6))) +#define IOP3XX_DMA_UPPER_PA(chan) (IOP3XX_DMA_PHYS_BASE(chan) + 0x27) /* Peripheral bus interface */ #define IOP3XX_PBCR (volatile u32 *)IOP3XX_REG_ADDR(0x0680) @@ -210,48 +195,8 @@ extern int init_atu; #define IOP_TMR_RATIO_1_1 0x00 /* Application accelerator unit */ -#define IOP3XX_AAU_ACR (volatile u32 *)IOP3XX_REG_ADDR(0x0800) -#define IOP3XX_AAU_ASR (volatile u32 *)IOP3XX_REG_ADDR(0x0804) -#define IOP3XX_AAU_ADAR (volatile u32 *)IOP3XX_REG_ADDR(0x0808) -#define IOP3XX_AAU_ANDAR (volatile u32 *)IOP3XX_REG_ADDR(0x080c) -#define IOP3XX_AAU_SAR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0810) -#define IOP3XX_AAU_SAR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0814) -#define IOP3XX_AAU_SAR3 (volatile u32 *)IOP3XX_REG_ADDR(0x0818) -#define IOP3XX_AAU_SAR4 (volatile u32 *)IOP3XX_REG_ADDR(0x081c) -#define IOP3XX_AAU_DAR (volatile u32 *)IOP3XX_REG_ADDR(0x0820) -#define IOP3XX_AAU_ABCR (volatile u32 *)IOP3XX_REG_ADDR(0x0824) -#define IOP3XX_AAU_ADCR (volatile u32 *)IOP3XX_REG_ADDR(0x0828) -#define IOP3XX_AAU_SAR5 (volatile u32 *)IOP3XX_REG_ADDR(0x082c) -#define IOP3XX_AAU_SAR6 (volatile u32 *)IOP3XX_REG_ADDR(0x0830) -#define IOP3XX_AAU_SAR7 (volatile u32 *)IOP3XX_REG_ADDR(0x0834) -#define IOP3XX_AAU_SAR8 (volatile u32 *)IOP3XX_REG_ADDR(0x0838) -#define IOP3XX_AAU_EDCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x083c) -#define IOP3XX_AAU_SAR9 (volatile u32 *)IOP3XX_REG_ADDR(0x0840) -#define IOP3XX_AAU_SAR10 (volatile u32 *)IOP3XX_REG_ADDR(0x0844) -#define IOP3XX_AAU_SAR11 (volatile u32 *)IOP3XX_REG_ADDR(0x0848) -#define IOP3XX_AAU_SAR12 (volatile u32 *)IOP3XX_REG_ADDR(0x084c) -#define IOP3XX_AAU_SAR13 (volatile u32 *)IOP3XX_REG_ADDR(0x0850) -#define IOP3XX_AAU_SAR14 (volatile u32 *)IOP3XX_REG_ADDR(0x0854) -#define IOP3XX_AAU_SAR15 (volatile u32 *)IOP3XX_REG_ADDR(0x0858) -#define IOP3XX_AAU_SAR16 (volatile u32 *)IOP3XX_REG_ADDR(0x085c) -#define IOP3XX_AAU_EDCR1 (volatile u32 *)IOP3XX_REG_ADDR(0x0860) -#define IOP3XX_AAU_SAR17 (volatile u32 *)IOP3XX_REG_ADDR(0x0864) -#define IOP3XX_AAU_SAR18 (volatile u32 *)IOP3XX_REG_ADDR(0x0868) -#define IOP3XX_AAU_SAR19 (volatile u32 *)IOP3XX_REG_ADDR(0x086c) -#define IOP3XX_AAU_SAR20 (volatile u32 *)IOP3XX_REG_ADDR(0x0870) -#define IOP3XX_AAU_SAR21 (volatile u32 *)IOP3XX_REG_ADDR(0x0874) -#define IOP3XX_AAU_SAR22 (volatile u32 *)IOP3XX_REG_ADDR(0x0878) -#define IOP3XX_AAU_SAR23 (volatile u32 *)IOP3XX_REG_ADDR(0x087c) -#define IOP3XX_AAU_SAR24 (volatile u32 *)IOP3XX_REG_ADDR(0x0880) -#define IOP3XX_AAU_EDCR2 (volatile u32 *)IOP3XX_REG_ADDR(0x0884) -#define IOP3XX_AAU_SAR25 (volatile u32 *)IOP3XX_REG_ADDR(0x0888) -#define IOP3XX_AAU_SAR26 (volatile u32 *)IOP3XX_REG_ADDR(0x088c) -#define IOP3XX_AAU_SAR27 (volatile u32 *)IOP3XX_REG_ADDR(0x0890) -#define IOP3XX_AAU_SAR28 (volatile u32 *)IOP3XX_REG_ADDR(0x0894) -#define IOP3XX_AAU_SAR29 (volatile u32 *)IOP3XX_REG_ADDR(0x0898) -#define IOP3XX_AAU_SAR30 (volatile u32 *)IOP3XX_REG_ADDR(0x089c) -#define IOP3XX_AAU_SAR31 (volatile u32 *)IOP3XX_REG_ADDR(0x08a0) -#define IOP3XX_AAU_SAR32 (volatile u32 *)IOP3XX_REG_ADDR(0x08a4) +#define IOP3XX_AAU_PHYS_BASE (IOP3XX_PERIPHERAL_PHYS_BASE + 0x800) +#define IOP3XX_AAU_UPPER_PA (IOP3XX_AAU_PHYS_BASE + 0xa7) /* I2C bus interface unit */ #define IOP3XX_ICR0 (volatile u32 *)IOP3XX_REG_ADDR(0x1680) @@ -329,6 +274,9 @@ static inline void write_tisr(u32 val) asm volatile("mcr p6, 0, %0, c6, c1, 0" : : "r" (val)); } +extern struct platform_device iop3xx_dma_0_channel; +extern struct platform_device iop3xx_dma_1_channel; +extern struct platform_device iop3xx_aau_channel; extern struct platform_device iop3xx_i2c0_device; extern struct platform_device iop3xx_i2c1_device; -- cgit v1.2.3 From 3039f0735a280b54c7364fbfe6a9287f7f0b510a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 13 Jul 2007 08:06:19 -0700 Subject: ioatdma: add the unisys "i/oat" pci vendor/device id Cc: John Magolan Signed-off-by: Shannon Nelson Signed-off-by: Dan Williams --- include/linux/pci_ids.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 5b1c9994f89..0275f6917c8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -475,6 +475,9 @@ #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 +#define PCI_VENDOR_ID_UNISYS 0x1018 +#define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C + #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ #define PCI_DEVICE_ID_COMPEX2_100VG 0x0005 -- cgit v1.2.3