Initial buffer wait/IRQ stuff
[kernel.git] / drivers / mfd / glamo / glamo-cmdq.c
1 /*
2  * SMedia Glamo 336x/337x command queue handling
3  *
4  * Copyright (C) 2008-2009 Thomas White <taw@bitwiz.org.uk>
5  * Copyright (C) 2009 Andreas Pokorny <andreas.pokorny@gmail.com>
6  * Based on xf86-video-glamo (see below for details)
7  *
8  * All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation; either version 2 of
13  * the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23  * MA 02111-1307 USA
24  *
25  * Command queue handling functions based on those from xf86-video-glamo, to
26  * which the following licence applies:
27  *
28  * Copyright  2007 OpenMoko, Inc.
29  * Copyright © 2009 Lars-Peter Clausen <lars@metafoo.de>
30  *
31  * This driver is based on Xati,
32  * Copyright  2004 Eric Anholt
33  *
34  * Permission to use, copy, modify, distribute, and sell this software and its
35  * documentation for any purpose is hereby granted without fee, provided that
36  * the above copyright notice appear in all copies and that both that copyright
37  * notice and this permission notice appear in supporting documentation, and
38  * that the name of the copyright holders not be used in advertising or
39  * publicity pertaining to distribution of the software without specific,
40  * written prior permission.  The copyright holders make no representations
41  * about the suitability of this software for any purpose.  It is provided "as
42  * is" without express or implied warranty.
43  *
44  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
45  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
46  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
47  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
48  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
49  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
50  * OF THIS SOFTWARE.
51  */
52
53
54 #include <linux/irq.h>
55 #include <linux/interrupt.h>
56 #include <drm/drmP.h>
57 #include <drm/glamo_drm.h>
58
59 #include "glamo-core.h"
60 #include "glamo-drm-private.h"
61 #include "glamo-regs.h"
62
63
64 static inline void reg_write(struct glamodrm_handle *gdrm,
65                              u_int16_t reg, u_int16_t val)
66 {
67         iowrite16(val, gdrm->reg_base + reg);
68 }
69
70
71 static inline u16 reg_read(struct glamodrm_handle *gdrm, u_int16_t reg)
72 {
73         return ioread16(gdrm->reg_base + reg);
74 }
75
76
77 static u32 glamo_get_read(struct glamodrm_handle *gdrm)
78 {
79         /* we could turn off clock here */
80         u32 ring_read = reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRL);
81         ring_read |= (reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRH) & 0x7) << 16;
82
83         return ring_read;
84 }
85
86
87 static u32 glamo_get_write(struct glamodrm_handle *gdrm)
88 {
89         u32 ring_write = reg_read(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL);
90         ring_write |= (reg_read(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH) & 0x7) << 16;
91
92         return ring_write;
93 }
94
95
96 static void glamo_enable_cmdq_irq(struct glamodrm_handle *gdrm)
97 {
98         uint16_t irq_status = reg_read(gdrm, GLAMO_REG_IRQ_ENABLE);
99         irq_status |= GLAMO_IRQ_CMDQUEUE;
100         reg_write(gdrm, GLAMO_REG_IRQ_ENABLE, irq_status);
101 }
102
103
104 static void glamo_set_cmdq_irq(struct glamodrm_handle *gdrm)
105 {
106         uint16_t irq_status = reg_read(gdrm, GLAMO_REG_IRQ_SET);
107         irq_status |= GLAMO_IRQ_CMDQUEUE;
108         reg_write(gdrm, GLAMO_REG_IRQ_SET, irq_status);
109 }
110
111
112 static void glamo_cmdq_irq(unsigned int irq, struct irq_desc *desc)
113 {
114         struct glamodrm_handle *gdrm = desc->handler_data;
115
116         if (!gdrm) return;
117         reg_write(gdrm, GLAMO_REG_IRQ_CLEAR, GLAMO_IRQ_CMDQUEUE);
118 }
119
120
121 /* Add commands to the ring buffer */
122 static int glamo_add_to_ring(struct glamodrm_handle *gdrm, u16 *addr,
123                              unsigned int count)
124 {
125         size_t ring_write, ring_read;
126         size_t new_ring_write;
127
128         down(&gdrm->add_to_ring);
129
130         ring_write = glamo_get_write(gdrm);
131
132         /* Calculate where we'll end up */
133         new_ring_write = (ring_write + count) % GLAMO_CMDQ_SIZE;
134
135         /* Wait until there is enough space to queue the cmd buffer */
136         if (new_ring_write > ring_write) {
137                 /* Loop while the read pointer is between the old and new
138                  * positions */
139                 do {
140                         ring_read = glamo_get_read(gdrm);
141                 } while (ring_read > ring_write && ring_read < new_ring_write);
142         } else {
143                 /* Same, but kind of inside-out */
144                 do {
145                         ring_read = glamo_get_read(gdrm);
146                 } while (ring_read > ring_write || ring_read < new_ring_write);
147         }
148
149         /* Are we about to wrap around? */
150         if (ring_write >= new_ring_write) {
151
152                 u32 rest_size;
153
154                 /* Wrap around */
155                 rest_size = GLAMO_CMDQ_SIZE - ring_write; /* Space left */
156
157                 /* Write from current position to end */
158                 memcpy_toio(gdrm->cmdq_base+ring_write, addr, rest_size);
159
160                 /* Write from start */
161                 memcpy_toio(gdrm->cmdq_base, addr+(rest_size>>1),
162                             count - rest_size);
163
164                 /* ring_write being 0 will result in a deadlock because the
165                  * cmdq read will never stop. To avoid such an behaviour insert
166                  * an empty instruction. */
167                 if (new_ring_write == 0) {
168                         iowrite16(0x0000, gdrm->cmdq_base);
169                         iowrite16(0x0000, gdrm->cmdq_base + 2);
170                         new_ring_write = 4;
171                 }
172
173                 /* Suppose we just filled the WHOLE ring buffer, and so the
174                  * write position ends up in the same place as it started.
175                  * No change in poginter means no activity from the command
176                  * queue engine.  So, insert a no-op */
177                 if (ring_write == new_ring_write) {
178                         iowrite16(0x0000, gdrm->cmdq_base + new_ring_write);
179                         iowrite16(0x0000, gdrm->cmdq_base + new_ring_write + 2);
180                         new_ring_write += 4;
181                 }
182
183         } else {
184
185                 memcpy_toio(gdrm->cmdq_base+ring_write, addr, count);
186
187         }
188
189         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH,
190                         (new_ring_write >> 16) & 0x7f);
191         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL,
192                         new_ring_write & 0xffff);
193
194         up(&gdrm->add_to_ring);
195
196         return 0;
197 }
198
199
200 /* Return true for a legal sequence of commands, otherwise false */
201 static int glamo_sanitize_buffer(u16 *cmds, unsigned int count)
202 {
203         /* XXX FIXME TODO: Implementation... */
204         return 1;
205 }
206
207
208 /* Substitute the real addresses in VRAM for any required buffer objects */
209 static int glamo_do_relocation(struct glamodrm_handle *gdrm,
210                                drm_glamo_cmd_buffer_t *cbuf, u16 *cmds,
211                                struct drm_device *dev,
212                                struct drm_file *file_priv)
213 {
214         u32 *handles;
215         int *offsets;
216         int nobjs =  cbuf->nobjs;
217         int i;
218
219         if ( nobjs > 32 ) return -EINVAL;       /* Get real... */
220
221         handles = drm_alloc(nobjs*sizeof(u32), DRM_MEM_DRIVER);
222         if ( handles == NULL ) return -1;
223         if ( copy_from_user(handles, cbuf->objs, nobjs*sizeof(u32)) )
224                 return -1;
225
226         offsets = drm_alloc(nobjs*sizeof(int), DRM_MEM_DRIVER);
227         if ( offsets == NULL ) return -1;
228         if ( copy_from_user(offsets, cbuf->obj_pos, nobjs*sizeof(int)) )
229                 return -1;
230
231         for ( i=0; i<nobjs; i++ ) {
232
233                 u32 handle = handles[i];
234                 int offset = offsets[i];
235                 struct drm_gem_object *obj;
236                 struct drm_glamo_gem_object *gobj;
237                 u32 addr;
238                 u16 addr_low, addr_high;
239
240                 if ( offset > cbuf->bufsz ) {
241                         printk(KERN_WARNING "[glamo-drm] Offset out of range"
242                                             " for this relocation!\n");
243                         goto fail;
244                 }
245
246                 obj = drm_gem_object_lookup(dev, file_priv, handle);
247                 if ( obj == NULL ) return -1;
248
249                 /* Unref the object now, or it'll never get freed.
250                  * This should really happen after the GPU has finished
251                  * the commands which are about to be submitted. */
252                 drm_gem_object_unreference(obj);
253
254                 gobj = obj->driver_private;
255                 if ( gobj == NULL ) {
256                         printk(KERN_WARNING "[glamo-drm] This object has no"
257                                             " private data!\n");
258                         goto fail;
259                 }
260
261                 addr = GLAMO_OFFSET_FB + gobj->block->start;
262                 addr_low = addr & 0xffff;
263                 addr_high = (addr >> 16) & 0x7f;
264
265                 /* FIXME: Should really check that the register is a
266                  * valid one for this relocation. */
267
268                 *(cmds+(offset/2)+1) = addr_low;
269                 *(cmds+(offset/2)+3) = addr_high;
270
271         }
272
273         drm_free(handles, 1, DRM_MEM_DRIVER);
274         drm_free(offsets, 1, DRM_MEM_DRIVER);
275         return 0;
276
277 fail:
278         drm_free(handles, 1, DRM_MEM_DRIVER);
279         drm_free(offsets, 1, DRM_MEM_DRIVER);
280         return -1;
281 }
282
283
284 /* This is DRM_IOCTL_GLAMO_CMDBUF */
285 int glamo_ioctl_cmdbuf(struct drm_device *dev, void *data,
286                        struct drm_file *file_priv)
287 {
288         int ret = 0;
289         struct glamodrm_handle *gdrm;
290         unsigned int count;
291         drm_glamo_cmd_buffer_t *cbuf = data;
292         u16 *cmds;
293
294         gdrm = dev->dev_private;
295
296         count = cbuf->bufsz;
297
298         if ( count > PAGE_SIZE ) return -EINVAL;
299
300         cmds = drm_alloc(count, DRM_MEM_DRIVER);
301         if ( cmds == NULL ) return -ENOMEM;
302         if ( copy_from_user(cmds, cbuf->buf, count) )   {
303                 printk(KERN_WARNING "[glamo-drm] copy from user failed\n");
304                 ret = -EINVAL;
305                 goto cleanup;
306         }
307
308         /* Check the buffer isn't going to tell Glamo to enact naughtiness */
309         if ( !glamo_sanitize_buffer(cmds, count) ) {
310                 printk(KERN_WARNING "[glamo-drm] sanitize buffer failed\n");
311                 ret = -EINVAL;
312                 goto cleanup;
313         }
314
315         /* Perform relocation, if necessary */
316         if ( cbuf->nobjs ) {
317                 if ( glamo_do_relocation(gdrm, cbuf, cmds, dev, file_priv) )
318                 {
319                         printk(KERN_WARNING "[glamo-drm] Relocation failed\n");
320                         ret = -EINVAL;
321                         goto cleanup;
322                 }
323         }
324
325         glamo_add_to_ring(gdrm, cmds, count);
326
327         glamo_set_cmdq_irq(gdrm);
328
329 cleanup:
330         drm_free(cmds, 1, DRM_MEM_DRIVER);
331
332         return ret;
333 }
334
335
336 /* TODO: Banish this to the nether regions of Hades */
337 static void glamo_cmdq_wait(struct glamodrm_handle *gdrm,
338                             enum glamo_engine engine)
339 {
340         u16 mask, val, status;
341         int i;
342
343         switch (engine)
344         {
345                 case GLAMO_ENGINE_ALL:
346                         mask = 1 << 2;
347                         val  = mask;
348                         break;
349                 default:
350                         return;
351         }
352
353         for ( i=0; i<1000; i++ ) {
354                 status = reg_read(gdrm, GLAMO_REG_CMDQ_STATUS);
355                 if ((status & mask) == val) break;
356                 mdelay(1);
357         }
358         if ( i == 1000 ) {
359                 size_t ring_read;
360                 printk(KERN_WARNING "[glamo-drm] CmdQ timeout!\n");
361                 printk(KERN_WARNING "[glamo-drm] status = %x\n", status);
362                 ring_read = reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRL);
363                 ring_read |= ((reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRH)
364                                 & 0x7) << 16);
365                 printk(KERN_INFO "[glamo-drm] ring_read now 0x%x\n",
366                                  ring_read);
367         }
368 }
369
370
371 int glamo_ioctl_gem_wait_rendering(struct drm_device *dev, void *data,
372                                    struct drm_file *file_priv)
373 {
374         struct glamodrm_handle *gdrm;
375
376         gdrm = dev->dev_private;
377         glamo_cmdq_wait(gdrm, GLAMO_ENGINE_ALL);
378
379         return 0;
380 }
381
382
383 int glamo_cmdq_init(struct glamodrm_handle *gdrm)
384 {
385         unsigned int i;
386
387         init_MUTEX(&gdrm->add_to_ring);
388
389         /* Enable 2D and 3D */
390         glamo_engine_enable(gdrm->glamo_core, GLAMO_ENGINE_2D);
391         glamo_engine_reset(gdrm->glamo_core, GLAMO_ENGINE_2D);
392
393         /* Start by zeroing the command queue memory */
394         for ( i=0; i<GLAMO_CMDQ_SIZE; i+=2 ) {
395                 iowrite16(0x0000, gdrm->cmdq_base+i);
396         }
397
398         glamo_engine_enable(gdrm->glamo_core, GLAMO_ENGINE_CMDQ);
399         glamo_engine_reset(gdrm->glamo_core, GLAMO_ENGINE_CMDQ);
400
401         /* Set up command queue location */
402         reg_write(gdrm, GLAMO_REG_CMDQ_BASE_ADDRL,
403                                         GLAMO_OFFSET_CMDQ & 0xffff);
404         reg_write(gdrm, GLAMO_REG_CMDQ_BASE_ADDRH,
405                                         (GLAMO_OFFSET_CMDQ >> 16) & 0x7f);
406
407         /* Length of command queue in 1k blocks, minus one */
408         reg_write(gdrm, GLAMO_REG_CMDQ_LEN, (GLAMO_CMDQ_SIZE >> 10)-1);
409         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH, 0);
410         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL, 0);
411         reg_write(gdrm, GLAMO_REG_CMDQ_CONTROL,
412                                          1 << 12 |   /* Turbo flip (?) */
413                                          5 << 8  |   /* no interrupt */
414                                          8 << 4);    /* HQ threshold */
415
416         /* Set up IRQ */
417         set_irq_handler(IRQ_GLAMO(GLAMO_IRQIDX_CMDQUEUE), glamo_cmdq_irq);
418         set_irq_data(IRQ_GLAMO(GLAMO_IRQIDX_CMDQUEUE), gdrm);
419
420         glamo_enable_cmdq_irq(gdrm);
421
422         return 0;
423 }
424
425
426 int glamo_cmdq_shutdown(struct glamodrm_handle *gdrm)
427 {
428         set_irq_handler(IRQ_GLAMO(GLAMO_IRQIDX_CMDQUEUE), handle_level_irq);
429         return 0;
430 }
431
432
433 void glamo_cmdq_suspend(struct glamodrm_handle *gdrm)
434 {
435         /* Placeholder... */
436 }
437
438
439 void glamo_cmdq_resume(struct glamodrm_handle *gdrm)
440 {
441         glamo_cmdq_init(gdrm);
442 }