442963cc876d9545dc4f61688f1ad843f430b07a
[kernel.git] / drivers / mfd / glamo / glamo-cmdq.c
1 /*
2  * SMedia Glamo 336x/337x command queue handling
3  *
4  * Copyright (C) 2008-2009 Thomas White <taw@bitwiz.org.uk>
5  * Copyright (C) 2009 Andreas Pokorny <andreas.pokorny@gmail.com>
6  * Based on xf86-video-glamo (see below for details)
7  *
8  * All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation; either version 2 of
13  * the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23  * MA 02111-1307 USA
24  *
25  * Command queue handling functions based on those from xf86-video-glamo, to
26  * which the following licence applies:
27  *
28  * Copyright  2007 OpenMoko, Inc.
29  * Copyright © 2009 Lars-Peter Clausen <lars@metafoo.de>
30  *
31  * This driver is based on Xati,
32  * Copyright  2004 Eric Anholt
33  *
34  * Permission to use, copy, modify, distribute, and sell this software and its
35  * documentation for any purpose is hereby granted without fee, provided that
36  * the above copyright notice appear in all copies and that both that copyright
37  * notice and this permission notice appear in supporting documentation, and
38  * that the name of the copyright holders not be used in advertising or
39  * publicity pertaining to distribution of the software without specific,
40  * written prior permission.  The copyright holders make no representations
41  * about the suitability of this software for any purpose.  It is provided "as
42  * is" without express or implied warranty.
43  *
44  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
45  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
46  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
47  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
48  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
49  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
50  * OF THIS SOFTWARE.
51  */
52
53
54 #include <drm/drmP.h>
55 #include <drm/glamo_drm.h>
56
57 #include "glamo-core.h"
58 #include "glamo-drm-private.h"
59 #include "glamo-regs.h"
60
61
62 #define GLAMO_CMDQ_SIZE (128 * 1024)    /* 128k ring buffer */
63
64
65 static inline void reg_write(struct glamodrm_handle *gdrm,
66                              u_int16_t reg, u_int16_t val)
67 {
68         iowrite16(val, gdrm->reg_base + reg);
69 }
70
71
72 static inline u16 reg_read(struct glamodrm_handle *gdrm, u_int16_t reg)
73 {
74         return ioread16(gdrm->reg_base + reg);
75 }
76
77
78 static u32 glamo_get_read(struct glamodrm_handle *gdrm)
79 {
80         /* we could turn off clock here */
81         u32 ring_read = reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRL);
82         ring_read |= (reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRH) & 0x7) << 16;
83
84         return ring_read;
85 }
86
87
88 static u32 glamo_get_write(struct glamodrm_handle *gdrm)
89 {
90         u32 ring_write = reg_read(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL);
91         ring_write |= (reg_read(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH) & 0x7) << 16;
92
93         return ring_write;
94 }
95
96
97 /* Add commands to the ring buffer */
98 int glamo_add_to_ring(struct glamodrm_handle *gdrm, u16 *addr,
99                       unsigned int count)
100 {
101         size_t ring_write, ring_read;
102         size_t new_ring_write;
103
104         if ( count >= GLAMO_CMDQ_SIZE ) {
105                 printk(KERN_WARNING "[glamo-drm] CmdQ submission too large\n");
106                 return -EINVAL;
107         }
108
109         down(&gdrm->add_to_ring);
110
111         ring_write = glamo_get_write(gdrm);
112
113         /* Calculate where we'll end up */
114         new_ring_write = (ring_write + count) % GLAMO_CMDQ_SIZE;
115
116         /* Wait until there is enough space to queue the cmd buffer */
117         if (new_ring_write > ring_write) {
118                 /* Loop while the read pointer is between the old and new
119                  * positions */
120                 do {
121                         ring_read = glamo_get_read(gdrm);
122                 } while (ring_read > ring_write && ring_read < new_ring_write);
123         } else {
124                 /* Same, but kind of inside-out */
125                 do {
126                         ring_read = glamo_get_read(gdrm);
127                 } while (ring_read > ring_write || ring_read < new_ring_write);
128         }
129
130         /* Are we about to wrap around? */
131         if (ring_write >= new_ring_write) {
132
133                 u32 rest_size;
134
135                 /* Wrap around */
136                 rest_size = GLAMO_CMDQ_SIZE - ring_write; /* Space left */
137
138                 /* Write from current position to end */
139                 memcpy_toio(gdrm->cmdq_base+ring_write, addr, rest_size);
140
141                 /* Write from start */
142                 memcpy_toio(gdrm->cmdq_base, addr+(rest_size>>1),
143                             count - rest_size);
144
145                 /* ring_write being 0 will result in a deadlock because the
146                  * cmdq read will never stop. To avoid such an behaviour insert
147                  * an empty instruction. */
148                 if (new_ring_write == 0) {
149                         iowrite16(0x0000, gdrm->cmdq_base);
150                         iowrite16(0x0000, gdrm->cmdq_base + 2);
151                         new_ring_write = 4;
152                 }
153
154         } else {
155
156                 memcpy_toio(gdrm->cmdq_base+ring_write, addr, count);
157
158         }
159
160         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH,
161                         (new_ring_write >> 16) & 0x7f);
162         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL,
163                         new_ring_write & 0xffff);
164
165         if ( !(reg_read(gdrm, GLAMO_REG_CMDQ_STATUS) & 1<<3)  ) {
166                 printk(KERN_ERR "[glamo-drm] CmdQ decode failure.\n");
167         }
168
169         up(&gdrm->add_to_ring);
170
171         return 0;
172 }
173
174
175 /* Return true for a legal sequence of commands, otherwise false */
176 static int glamo_sanitize_buffer(u16 *cmds, unsigned int count)
177 {
178         /* XXX FIXME TODO: Implementation... */
179         return 1;
180 }
181
182
183 /* Substitute the real addresses in VRAM for any required buffer objects */
184 static int glamo_do_relocation(struct glamodrm_handle *gdrm,
185                                drm_glamo_cmd_buffer_t *cbuf, u16 *cmds,
186                                struct drm_device *dev,
187                                struct drm_file *file_priv)
188 {
189         u32 *handles;
190         int *offsets;
191         int nobjs =  cbuf->nobjs;
192         int i;
193
194         if ( nobjs > 32 ) return -EINVAL;       /* Get real... */
195
196         handles = kmalloc(nobjs*sizeof(u32), GFP_KERNEL);
197         if ( handles == NULL ) return -1;
198         if ( copy_from_user(handles, cbuf->objs, nobjs*sizeof(u32)) )
199                 return -1;
200
201         offsets = kmalloc(nobjs*sizeof(int), GFP_KERNEL);
202         if ( offsets == NULL ) return -1;
203         if ( copy_from_user(offsets, cbuf->obj_pos, nobjs*sizeof(int)) )
204                 return -1;
205
206         for ( i=0; i<nobjs; i++ ) {
207
208                 u32 handle = handles[i];
209                 int offset = offsets[i];
210                 struct drm_gem_object *obj;
211                 struct drm_glamo_gem_object *gobj;
212                 u32 addr;
213                 u16 addr_low, addr_high;
214
215                 if ( offset > cbuf->bufsz ) {
216                         printk(KERN_WARNING "[glamo-drm] Offset out of range"
217                                             " for this relocation!\n");
218                         goto fail;
219                 }
220
221                 obj = drm_gem_object_lookup(dev, file_priv, handle);
222                 if ( obj == NULL ) return -1;
223
224                 /* Unref the object now, or it'll never get freed.
225                  * This should really happen after the GPU has finished
226                  * the commands which are about to be submitted. */
227                 drm_gem_object_unreference(obj);
228
229                 gobj = obj->driver_private;
230                 if ( gobj == NULL ) {
231                         printk(KERN_WARNING "[glamo-drm] This object has no"
232                                             " private data!\n");
233                         goto fail;
234                 }
235
236                 addr = GLAMO_OFFSET_FB + gobj->block->start;
237                 addr_low = addr & 0xffff;
238                 addr_high = (addr >> 16) & 0x7f;
239
240                 /* FIXME: Should really check that the register is a
241                  * valid one for this relocation. */
242
243                 *(cmds+(offset/2)+1) = addr_low;
244                 *(cmds+(offset/2)+3) = addr_high;
245
246         }
247
248         kfree(handles);
249         kfree(offsets);
250         return 0;
251
252 fail:
253         kfree(handles);
254         kfree(offsets);
255         return -1;
256 }
257
258
259 /* This is DRM_IOCTL_GLAMO_CMDBUF */
260 int glamo_ioctl_cmdbuf(struct drm_device *dev, void *data,
261                        struct drm_file *file_priv)
262 {
263         int ret = 0;
264         struct glamodrm_handle *gdrm;
265         unsigned int count;
266         drm_glamo_cmd_buffer_t *cbuf = data;
267         u16 *cmds;
268
269         gdrm = dev->dev_private;
270
271         count = cbuf->bufsz;
272
273         if ( count > PAGE_SIZE ) return -EINVAL;
274
275         cmds = kmalloc(count, GFP_KERNEL);
276         if ( cmds == NULL ) return -ENOMEM;
277         if ( copy_from_user(cmds, cbuf->buf, count) )   {
278                 printk(KERN_WARNING "[glamo-drm] copy from user failed\n");
279                 ret = -EINVAL;
280                 goto cleanup;
281         }
282
283         /* Check the buffer isn't going to tell Glamo to enact naughtiness */
284         if ( !glamo_sanitize_buffer(cmds, count) ) {
285                 printk(KERN_WARNING "[glamo-drm] sanitize buffer failed\n");
286                 ret = -EINVAL;
287                 goto cleanup;
288         }
289
290         /* Perform relocation, if necessary */
291         if ( cbuf->nobjs ) {
292                 if ( glamo_do_relocation(gdrm, cbuf, cmds, dev, file_priv) )
293                 {
294                         printk(KERN_WARNING "[glamo-drm] Relocation failed\n");
295                         ret = -EINVAL;
296                         goto cleanup;
297                 }
298         }
299
300         glamo_add_to_ring(gdrm, cmds, count);
301
302 cleanup:
303         kfree(cmds);
304
305         return ret;
306 }
307
308
309 /* Return true for a legal sequence of commands, otherwise false */
310 static int glamo_sanitize_burst(u16 base, u16 *cmds, unsigned int count)
311 {
312         /* XXX FIXME TODO: Implementation... */
313         return 1;
314 }
315
316
317 static int glamo_relocate_burst(struct glamodrm_handle *gdrm,
318                                 drm_glamo_cmd_burst_t *cbuf, u16 *data,
319                                 struct drm_device *dev,
320                                 struct drm_file *file_priv)
321 {
322         u32 *handles;
323         int *offsets;
324         int nobjs =  cbuf->nobjs;
325         int i;
326
327         if ( nobjs > 32 ) return -EINVAL;       /* Get real... */
328
329         handles = kmalloc(nobjs*sizeof(u32), GFP_KERNEL);
330         if ( handles == NULL ) return -1;
331         if ( copy_from_user(handles, cbuf->objs, nobjs*sizeof(u32)) )
332                 return -1;
333
334         offsets = kmalloc(nobjs*sizeof(int), GFP_KERNEL);
335         if ( offsets == NULL ) return -1;
336         if ( copy_from_user(offsets, cbuf->obj_pos, nobjs*sizeof(int)) )
337                 return -1;
338
339         for ( i=0; i<nobjs; i++ ) {
340
341                 u32 handle = handles[i];
342                 int offset = offsets[i];
343                 struct drm_gem_object *obj;
344                 struct drm_glamo_gem_object *gobj;
345                 u32 addr;
346                 u16 addr_low, addr_high;
347
348                 if ( offset > cbuf->bufsz ) {
349                         printk(KERN_WARNING "[glamo-drm] Offset out of range"
350                                             " for this relocation!\n");
351                         goto fail;
352                 }
353
354                 obj = drm_gem_object_lookup(dev, file_priv, handle);
355                 if ( obj == NULL ) return -1;
356
357                 /* Unref the object now, or it'll never get freed.
358                  * FIXME: This should really happen after the GPU has
359                  * finished executing these commands. */
360                 drm_gem_object_unreference(obj);
361
362                 gobj = obj->driver_private;
363                 if ( gobj == NULL ) {
364                         printk(KERN_WARNING "[glamo-drm] This object has no"
365                                             " private data!\n");
366                         goto fail;
367                 }
368
369                 addr = GLAMO_OFFSET_FB + gobj->block->start;
370                 addr_low = addr & 0xffff;
371                 addr_high = (addr >> 16) & 0x7f;
372
373                 /* FIXME: Should really check that the register is a
374                  * valid one for this relocation. */
375
376                 *(data+(offset/2)+0) = addr_low;
377                 *(data+(offset/2)+1) = addr_high;
378
379         }
380
381         kfree(handles);
382         kfree(offsets);
383         return 0;
384
385 fail:
386         kfree(handles);
387         kfree(offsets);
388         return -1;
389 }
390
391
392 /* This is DRM_IOCTL_GLAMO_CMDBURST */
393 int glamo_ioctl_cmdburst(struct drm_device *dev, void *data,
394                          struct drm_file *file_priv)
395 {
396         int ret = 0;
397         struct glamodrm_handle *gdrm;
398         drm_glamo_cmd_burst_t *cbuf = data;
399         u16 *burst;
400         size_t burst_size;
401         size_t data_size;
402
403         gdrm = dev->dev_private;
404
405         data_size = cbuf->bufsz;
406         if ( data_size % 4 ) data_size += 2;
407         if ( data_size % 4 ) return -EINVAL;
408         burst_size = data_size + 4;  /* Add space for header */
409         if ( burst_size > PAGE_SIZE ) return -EINVAL;
410
411         burst = kmalloc(burst_size, GFP_KERNEL);
412         if ( burst == NULL ) return -ENOMEM;
413
414         /* Get data from userspace */
415         if ( copy_from_user(burst+2, cbuf->data, cbuf->bufsz) )         {
416                 printk(KERN_WARNING "[glamo-drm] copy from user failed\n");
417                 ret = -EINVAL;
418                 goto cleanup;
419         }
420
421         /* Sanitise */
422         if ( !glamo_sanitize_burst(cbuf->base, burst+2, cbuf->bufsz) ) {
423                 printk(KERN_WARNING "[glamo-drm] sanitize buffer failed\n");
424                 ret = -EINVAL;
425                 goto cleanup;
426         }
427
428         /* Relocate */
429         if ( cbuf->nobjs ) {
430                 if ( glamo_relocate_burst(gdrm, cbuf, burst+2, dev, file_priv) )
431                 {
432                         printk(KERN_WARNING "[glamo-drm] Relocation failed\n");
433                         ret = -EINVAL;
434                         goto cleanup;
435                 }
436         }
437
438         /* Add burst header */
439         burst[0] = 1<<15 | cbuf->base;
440         burst[1] = data_size / 2;  /* -> 2-byte words */
441         if ( burst[1] & 0x01 ) {
442                 printk(KERN_WARNING "[glamo-drm] Burst not aligned!\n");
443                 goto cleanup;
444         }
445
446         /* Zero-pad if necessary */
447         if ( data_size % 4 ) {
448                 burst[burst_size-1] = 0x0000;
449         }
450
451         /* Add to command queue */
452         glamo_add_to_ring(gdrm, burst, burst_size);
453
454 cleanup:
455         kfree(burst);
456
457         return ret;
458 }
459
460
461 int glamo_cmdq_setup(struct glamodrm_handle *gdrm)
462 {
463         unsigned int i;
464
465         init_MUTEX(&gdrm->add_to_ring);
466
467         /* Enable 2D and 3D */
468         glamo_engine_enable(gdrm->glamo_core, GLAMO_ENGINE_2D);
469         glamo_engine_reset(gdrm->glamo_core, GLAMO_ENGINE_2D);
470
471         /* Start by zeroing the command queue memory */
472         for ( i=0; i<GLAMO_CMDQ_SIZE; i+=2 ) {
473                 iowrite16(0x0000, gdrm->cmdq_base+i);
474         }
475
476         glamo_engine_enable(gdrm->glamo_core, GLAMO_ENGINE_CMDQ);
477         glamo_engine_reset(gdrm->glamo_core, GLAMO_ENGINE_CMDQ);
478
479         /* Set up command queue location */
480         reg_write(gdrm, GLAMO_REG_CMDQ_BASE_ADDRL,
481                                         gdrm->cmdq_offs & 0xffff);
482         reg_write(gdrm, GLAMO_REG_CMDQ_BASE_ADDRH,
483                                         (gdrm->cmdq_offs >> 16) & 0x7f);
484
485         /* Length of command queue in 1k blocks, minus one */
486         reg_write(gdrm, GLAMO_REG_CMDQ_LEN, (GLAMO_CMDQ_SIZE >> 10)-1);
487         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH, 0);
488         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL, 0);
489         reg_write(gdrm, GLAMO_REG_CMDQ_CONTROL,
490                                          1 << 12 |   /* Turbo flip (?) */
491                                          5 << 8  |   /* no interrupt */
492                                          8 << 4);    /* HQ threshold */
493
494         return 0;
495 }
496
497
498 int glamo_cmdq_init(struct glamodrm_handle *gdrm)
499 {
500         struct drm_gem_object *obj;
501         struct drm_glamo_gem_object *gobj;
502         int ret = 0;
503
504         obj = glamo_gem_object_alloc(dev, GLAMO_CMDQ_SIZE, 4);
505         if ( !obj ) {
506                 printk(KERN_ERR "[glamo-drm] Failed to allocate CmdQ\n");
507                 ret = -ENOMEM;
508                 goto out;
509         }
510         gobj = fbo->driver_private;
511         gdrm->cmdq_offs = GLAMO_OFFSET_FB + gobj->block->start;
512         gdrm->cmdq_base = ioremap(gdrm->vram->start + offs, GLAMO_CMDQ_SIZE);
513
514         /* Set up registers */
515         glamo_cmdq_setup(gdrm);
516
517 out:
518         return ret;
519 }
520
521
522 int glamo_cmdq_shutdown(struct glamodrm_handle *gdrm)
523 {
524         return 0;
525 }
526
527
528 void glamo_cmdq_suspend(struct glamodrm_handle *gdrm)
529 {
530         /* Placeholder... */
531 }
532
533
534 void glamo_cmdq_resume(struct glamodrm_handle *gdrm)
535 {
536         glamo_cmdq_setup(gdrm);
537 }
538
539
540 /* Initialise an object's contents to zero.
541  * This is in glamo-cmdq.c in the hope that we can accelerate it later. */
542 void glamo_cmdq_blank(struct glamodrm_handle *gdrm, struct drm_gem_object *obj)
543 {
544         char __iomem *cookie;
545         struct drm_glamo_gem_object *gobj;
546         int i;
547
548         gobj = obj->driver_private;
549
550         cookie = ioremap(gdrm->vram->start + gobj->block->start, obj->size);
551         for ( i=0; i<obj->size; i+=2 ) {
552                 iowrite16(0, cookie+i);
553         }
554         iounmap(cookie);
555 }