Fix dynamic command queue allocation
[kernel.git] / drivers / mfd / glamo / glamo-cmdq.c
1 /*
2  * SMedia Glamo 336x/337x command queue handling
3  *
4  * Copyright (C) 2008-2009 Thomas White <taw@bitwiz.org.uk>
5  * Copyright (C) 2009 Andreas Pokorny <andreas.pokorny@gmail.com>
6  * Based on xf86-video-glamo (see below for details)
7  *
8  * All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation; either version 2 of
13  * the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23  * MA 02111-1307 USA
24  *
25  * Command queue handling functions based on those from xf86-video-glamo, to
26  * which the following licence applies:
27  *
28  * Copyright  2007 OpenMoko, Inc.
29  * Copyright © 2009 Lars-Peter Clausen <lars@metafoo.de>
30  *
31  * This driver is based on Xati,
32  * Copyright  2004 Eric Anholt
33  *
34  * Permission to use, copy, modify, distribute, and sell this software and its
35  * documentation for any purpose is hereby granted without fee, provided that
36  * the above copyright notice appear in all copies and that both that copyright
37  * notice and this permission notice appear in supporting documentation, and
38  * that the name of the copyright holders not be used in advertising or
39  * publicity pertaining to distribution of the software without specific,
40  * written prior permission.  The copyright holders make no representations
41  * about the suitability of this software for any purpose.  It is provided "as
42  * is" without express or implied warranty.
43  *
44  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
45  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
46  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
47  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
48  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
49  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
50  * OF THIS SOFTWARE.
51  */
52
53
54 #include <drm/drmP.h>
55 #include <drm/glamo_drm.h>
56
57 #include "glamo-core.h"
58 #include "glamo-drm-private.h"
59 #include "glamo-regs.h"
60 #include "glamo-buffer.h"
61
62
63 #define GLAMO_CMDQ_SIZE (128 * 1024)    /* 128k ring buffer */
64
65
66 static inline void reg_write(struct glamodrm_handle *gdrm,
67                              u_int16_t reg, u_int16_t val)
68 {
69         iowrite16(val, gdrm->reg_base + reg);
70 }
71
72
73 static inline u16 reg_read(struct glamodrm_handle *gdrm, u_int16_t reg)
74 {
75         return ioread16(gdrm->reg_base + reg);
76 }
77
78
79 static u32 glamo_get_read(struct glamodrm_handle *gdrm)
80 {
81         /* we could turn off clock here */
82         u32 ring_read = reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRL);
83         ring_read |= (reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRH) & 0x7) << 16;
84
85         return ring_read;
86 }
87
88
89 static u32 glamo_get_write(struct glamodrm_handle *gdrm)
90 {
91         u32 ring_write = reg_read(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL);
92         ring_write |= (reg_read(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH) & 0x7) << 16;
93
94         return ring_write;
95 }
96
97
98 /* Add commands to the ring buffer */
99 int glamo_add_to_ring(struct glamodrm_handle *gdrm, u16 *addr,
100                       unsigned int count)
101 {
102         size_t ring_write, ring_read;
103         size_t new_ring_write;
104
105         if ( count >= GLAMO_CMDQ_SIZE ) {
106                 printk(KERN_WARNING "[glamo-drm] CmdQ submission too large\n");
107                 return -EINVAL;
108         }
109
110         down(&gdrm->add_to_ring);
111
112         ring_write = glamo_get_write(gdrm);
113
114         /* Calculate where we'll end up */
115         new_ring_write = (ring_write + count) % GLAMO_CMDQ_SIZE;
116
117         /* Wait until there is enough space to queue the cmd buffer */
118         if (new_ring_write > ring_write) {
119                 /* Loop while the read pointer is between the old and new
120                  * positions */
121                 do {
122                         ring_read = glamo_get_read(gdrm);
123                 } while (ring_read > ring_write && ring_read < new_ring_write);
124         } else {
125                 /* Same, but kind of inside-out */
126                 do {
127                         ring_read = glamo_get_read(gdrm);
128                 } while (ring_read > ring_write || ring_read < new_ring_write);
129         }
130
131         /* Are we about to wrap around? */
132         if (ring_write >= new_ring_write) {
133
134                 u32 rest_size;
135
136                 /* Wrap around */
137                 rest_size = GLAMO_CMDQ_SIZE - ring_write; /* Space left */
138
139                 /* Write from current position to end */
140                 memcpy_toio(gdrm->cmdq_base+ring_write, addr, rest_size);
141
142                 /* Write from start */
143                 memcpy_toio(gdrm->cmdq_base, addr+(rest_size>>1),
144                             count - rest_size);
145
146                 /* ring_write being 0 will result in a deadlock because the
147                  * cmdq read will never stop. To avoid such an behaviour insert
148                  * an empty instruction. */
149                 if (new_ring_write == 0) {
150                         iowrite16(0x0000, gdrm->cmdq_base);
151                         iowrite16(0x0000, gdrm->cmdq_base + 2);
152                         new_ring_write = 4;
153                 }
154
155         } else {
156
157                 memcpy_toio(gdrm->cmdq_base+ring_write, addr, count);
158
159         }
160
161         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH,
162                         (new_ring_write >> 16) & 0x7f);
163         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL,
164                         new_ring_write & 0xffff);
165
166         if ( !(reg_read(gdrm, GLAMO_REG_CMDQ_STATUS) & 1<<3)  ) {
167                 printk(KERN_ERR "[glamo-drm] CmdQ decode failure.\n");
168         }
169
170         up(&gdrm->add_to_ring);
171
172         return 0;
173 }
174
175
176 /* Return true for a legal sequence of commands, otherwise false */
177 static int glamo_sanitize_buffer(u16 *cmds, unsigned int count)
178 {
179         /* XXX FIXME TODO: Implementation... */
180         return 1;
181 }
182
183
184 /* Substitute the real addresses in VRAM for any required buffer objects */
185 static int glamo_do_relocation(struct glamodrm_handle *gdrm,
186                                drm_glamo_cmd_buffer_t *cbuf, u16 *cmds,
187                                struct drm_device *dev,
188                                struct drm_file *file_priv)
189 {
190         u32 *handles;
191         int *offsets;
192         int nobjs =  cbuf->nobjs;
193         int i;
194
195         if ( nobjs > 32 ) return -EINVAL;       /* Get real... */
196
197         handles = kmalloc(nobjs*sizeof(u32), GFP_KERNEL);
198         if ( handles == NULL ) return -1;
199         if ( copy_from_user(handles, cbuf->objs, nobjs*sizeof(u32)) )
200                 return -1;
201
202         offsets = kmalloc(nobjs*sizeof(int), GFP_KERNEL);
203         if ( offsets == NULL ) return -1;
204         if ( copy_from_user(offsets, cbuf->obj_pos, nobjs*sizeof(int)) )
205                 return -1;
206
207         for ( i=0; i<nobjs; i++ ) {
208
209                 u32 handle = handles[i];
210                 int offset = offsets[i];
211                 struct drm_gem_object *obj;
212                 struct drm_glamo_gem_object *gobj;
213                 u32 addr;
214                 u16 addr_low, addr_high;
215
216                 if ( offset > cbuf->bufsz ) {
217                         printk(KERN_WARNING "[glamo-drm] Offset out of range"
218                                             " for this relocation!\n");
219                         goto fail;
220                 }
221
222                 obj = drm_gem_object_lookup(dev, file_priv, handle);
223                 if ( obj == NULL ) return -1;
224
225                 /* Unref the object now, or it'll never get freed.
226                  * This should really happen after the GPU has finished
227                  * the commands which are about to be submitted. */
228                 drm_gem_object_unreference(obj);
229
230                 gobj = obj->driver_private;
231                 if ( gobj == NULL ) {
232                         printk(KERN_WARNING "[glamo-drm] This object has no"
233                                             " private data!\n");
234                         goto fail;
235                 }
236
237                 addr = GLAMO_OFFSET_FB + gobj->block->start;
238                 addr_low = addr & 0xffff;
239                 addr_high = (addr >> 16) & 0x7f;
240
241                 /* FIXME: Should really check that the register is a
242                  * valid one for this relocation. */
243
244                 *(cmds+(offset/2)+1) = addr_low;
245                 *(cmds+(offset/2)+3) = addr_high;
246
247         }
248
249         kfree(handles);
250         kfree(offsets);
251         return 0;
252
253 fail:
254         kfree(handles);
255         kfree(offsets);
256         return -1;
257 }
258
259
260 /* This is DRM_IOCTL_GLAMO_CMDBUF */
261 int glamo_ioctl_cmdbuf(struct drm_device *dev, void *data,
262                        struct drm_file *file_priv)
263 {
264         int ret = 0;
265         struct glamodrm_handle *gdrm;
266         unsigned int count;
267         drm_glamo_cmd_buffer_t *cbuf = data;
268         u16 *cmds;
269
270         gdrm = dev->dev_private;
271
272         count = cbuf->bufsz;
273
274         if ( count > PAGE_SIZE ) return -EINVAL;
275
276         cmds = kmalloc(count, GFP_KERNEL);
277         if ( cmds == NULL ) return -ENOMEM;
278         if ( copy_from_user(cmds, cbuf->buf, count) )   {
279                 printk(KERN_WARNING "[glamo-drm] copy from user failed\n");
280                 ret = -EINVAL;
281                 goto cleanup;
282         }
283
284         /* Check the buffer isn't going to tell Glamo to enact naughtiness */
285         if ( !glamo_sanitize_buffer(cmds, count) ) {
286                 printk(KERN_WARNING "[glamo-drm] sanitize buffer failed\n");
287                 ret = -EINVAL;
288                 goto cleanup;
289         }
290
291         /* Perform relocation, if necessary */
292         if ( cbuf->nobjs ) {
293                 if ( glamo_do_relocation(gdrm, cbuf, cmds, dev, file_priv) )
294                 {
295                         printk(KERN_WARNING "[glamo-drm] Relocation failed\n");
296                         ret = -EINVAL;
297                         goto cleanup;
298                 }
299         }
300
301         glamo_add_to_ring(gdrm, cmds, count);
302
303 cleanup:
304         kfree(cmds);
305
306         return ret;
307 }
308
309
310 /* Return true for a legal sequence of commands, otherwise false */
311 static int glamo_sanitize_burst(u16 base, u16 *cmds, unsigned int count)
312 {
313         /* XXX FIXME TODO: Implementation... */
314         return 1;
315 }
316
317
318 static int glamo_relocate_burst(struct glamodrm_handle *gdrm,
319                                 drm_glamo_cmd_burst_t *cbuf, u16 *data,
320                                 struct drm_device *dev,
321                                 struct drm_file *file_priv)
322 {
323         u32 *handles;
324         int *offsets;
325         int nobjs =  cbuf->nobjs;
326         int i;
327
328         if ( nobjs > 32 ) return -EINVAL;       /* Get real... */
329
330         handles = kmalloc(nobjs*sizeof(u32), GFP_KERNEL);
331         if ( handles == NULL ) return -1;
332         if ( copy_from_user(handles, cbuf->objs, nobjs*sizeof(u32)) )
333                 return -1;
334
335         offsets = kmalloc(nobjs*sizeof(int), GFP_KERNEL);
336         if ( offsets == NULL ) return -1;
337         if ( copy_from_user(offsets, cbuf->obj_pos, nobjs*sizeof(int)) )
338                 return -1;
339
340         for ( i=0; i<nobjs; i++ ) {
341
342                 u32 handle = handles[i];
343                 int offset = offsets[i];
344                 struct drm_gem_object *obj;
345                 struct drm_glamo_gem_object *gobj;
346                 u32 addr;
347                 u16 addr_low, addr_high;
348
349                 if ( offset > cbuf->bufsz ) {
350                         printk(KERN_WARNING "[glamo-drm] Offset out of range"
351                                             " for this relocation!\n");
352                         goto fail;
353                 }
354
355                 obj = drm_gem_object_lookup(dev, file_priv, handle);
356                 if ( obj == NULL ) return -1;
357
358                 /* Unref the object now, or it'll never get freed.
359                  * FIXME: This should really happen after the GPU has
360                  * finished executing these commands. */
361                 drm_gem_object_unreference(obj);
362
363                 gobj = obj->driver_private;
364                 if ( gobj == NULL ) {
365                         printk(KERN_WARNING "[glamo-drm] This object has no"
366                                             " private data!\n");
367                         goto fail;
368                 }
369
370                 addr = GLAMO_OFFSET_FB + gobj->block->start;
371                 addr_low = addr & 0xffff;
372                 addr_high = (addr >> 16) & 0x7f;
373
374                 /* FIXME: Should really check that the register is a
375                  * valid one for this relocation. */
376
377                 *(data+(offset/2)+0) = addr_low;
378                 *(data+(offset/2)+1) = addr_high;
379
380         }
381
382         kfree(handles);
383         kfree(offsets);
384         return 0;
385
386 fail:
387         kfree(handles);
388         kfree(offsets);
389         return -1;
390 }
391
392
393 /* This is DRM_IOCTL_GLAMO_CMDBURST */
394 int glamo_ioctl_cmdburst(struct drm_device *dev, void *data,
395                          struct drm_file *file_priv)
396 {
397         int ret = 0;
398         struct glamodrm_handle *gdrm;
399         drm_glamo_cmd_burst_t *cbuf = data;
400         u16 *burst;
401         size_t burst_size;
402         size_t data_size;
403
404         gdrm = dev->dev_private;
405
406         data_size = cbuf->bufsz;
407         if ( data_size % 4 ) data_size += 2;
408         if ( data_size % 4 ) return -EINVAL;
409         burst_size = data_size + 4;  /* Add space for header */
410         if ( burst_size > PAGE_SIZE ) return -EINVAL;
411
412         burst = kmalloc(burst_size, GFP_KERNEL);
413         if ( burst == NULL ) return -ENOMEM;
414
415         /* Get data from userspace */
416         if ( copy_from_user(burst+2, cbuf->data, cbuf->bufsz) )         {
417                 printk(KERN_WARNING "[glamo-drm] copy from user failed\n");
418                 ret = -EINVAL;
419                 goto cleanup;
420         }
421
422         /* Sanitise */
423         if ( !glamo_sanitize_burst(cbuf->base, burst+2, cbuf->bufsz) ) {
424                 printk(KERN_WARNING "[glamo-drm] sanitize buffer failed\n");
425                 ret = -EINVAL;
426                 goto cleanup;
427         }
428
429         /* Relocate */
430         if ( cbuf->nobjs ) {
431                 if ( glamo_relocate_burst(gdrm, cbuf, burst+2, dev, file_priv) )
432                 {
433                         printk(KERN_WARNING "[glamo-drm] Relocation failed\n");
434                         ret = -EINVAL;
435                         goto cleanup;
436                 }
437         }
438
439         /* Add burst header */
440         burst[0] = 1<<15 | cbuf->base;
441         burst[1] = data_size / 2;  /* -> 2-byte words */
442         if ( burst[1] & 0x01 ) {
443                 printk(KERN_WARNING "[glamo-drm] Burst not aligned!\n");
444                 goto cleanup;
445         }
446
447         /* Zero-pad if necessary */
448         if ( data_size % 4 ) {
449                 burst[burst_size-1] = 0x0000;
450         }
451
452         /* Add to command queue */
453         glamo_add_to_ring(gdrm, burst, burst_size);
454
455 cleanup:
456         kfree(burst);
457
458         return ret;
459 }
460
461
462 int glamo_cmdq_setup(struct glamodrm_handle *gdrm)
463 {
464         unsigned int i;
465
466         init_MUTEX(&gdrm->add_to_ring);
467
468         /* Enable 2D and 3D */
469         glamo_engine_enable(gdrm->glamo_core, GLAMO_ENGINE_2D);
470         glamo_engine_reset(gdrm->glamo_core, GLAMO_ENGINE_2D);
471
472         /* Start by zeroing the command queue memory */
473         for ( i=0; i<GLAMO_CMDQ_SIZE; i+=2 ) {
474                 iowrite16(0x0000, gdrm->cmdq_base+i);
475         }
476
477         glamo_engine_enable(gdrm->glamo_core, GLAMO_ENGINE_CMDQ);
478         glamo_engine_reset(gdrm->glamo_core, GLAMO_ENGINE_CMDQ);
479
480         /* Set up command queue location */
481         reg_write(gdrm, GLAMO_REG_CMDQ_BASE_ADDRL,
482                                         gdrm->cmdq_offs & 0xffff);
483         reg_write(gdrm, GLAMO_REG_CMDQ_BASE_ADDRH,
484                                         (gdrm->cmdq_offs >> 16) & 0x7f);
485
486         /* Length of command queue in 1k blocks, minus one */
487         reg_write(gdrm, GLAMO_REG_CMDQ_LEN, (GLAMO_CMDQ_SIZE >> 10)-1);
488         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH, 0);
489         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL, 0);
490         reg_write(gdrm, GLAMO_REG_CMDQ_CONTROL,
491                                          1 << 12 |   /* Turbo flip (?) */
492                                          5 << 8  |   /* no interrupt */
493                                          8 << 4);    /* HQ threshold */
494
495         return 0;
496 }
497
498
499 int glamo_cmdq_init(struct drm_device *dev)
500 {
501         struct drm_gem_object *obj;
502         struct drm_glamo_gem_object *gobj;
503         struct glamodrm_handle *gdrm = dev->dev_private;
504         int ret = 0;
505
506         obj = glamo_gem_object_alloc(dev, GLAMO_CMDQ_SIZE, 4);
507         if ( !obj ) {
508                 printk(KERN_ERR "[glamo-drm] Failed to allocate CmdQ\n");
509                 ret = -ENOMEM;
510                 goto out;
511         }
512         gobj = obj->driver_private;
513         gdrm->cmdq_offs = GLAMO_OFFSET_FB + gobj->block->start;
514         gdrm->cmdq_base = ioremap(gdrm->vram->start + gdrm->cmdq_offs,
515                                   GLAMO_CMDQ_SIZE);
516
517         /* Set up registers */
518         glamo_cmdq_setup(gdrm);
519
520 out:
521         return ret;
522 }
523
524
525 int glamo_cmdq_shutdown(struct glamodrm_handle *gdrm)
526 {
527         iounmap(gdrm->cmdq_base);
528         return 0;
529 }
530
531
532 void glamo_cmdq_suspend(struct glamodrm_handle *gdrm)
533 {
534         /* Placeholder... */
535 }
536
537
538 void glamo_cmdq_resume(struct glamodrm_handle *gdrm)
539 {
540         glamo_cmdq_setup(gdrm);
541 }
542
543
544 /* Initialise an object's contents to zero.
545  * This is in glamo-cmdq.c in the hope that we can accelerate it later. */
546 void glamo_cmdq_blank(struct glamodrm_handle *gdrm, struct drm_gem_object *obj)
547 {
548         char __iomem *cookie;
549         struct drm_glamo_gem_object *gobj;
550         int i;
551
552         gobj = obj->driver_private;
553
554         cookie = ioremap(gdrm->vram->start + gobj->block->start, obj->size);
555         for ( i=0; i<obj->size; i+=2 ) {
556                 iowrite16(0, cookie+i);
557         }
558         iounmap(cookie);
559 }