Implement enable/reset of 3D engine
[kernel.git] / drivers / mfd / glamo / glamo-cmdq.c
1 /*
2  * SMedia Glamo 336x/337x command queue handling
3  *
4  * Copyright (C) 2008-2009 Thomas White <taw@bitwiz.org.uk>
5  * Copyright (C) 2009 Andreas Pokorny <andreas.pokorny@gmail.com>
6  * Based on xf86-video-glamo (see below for details)
7  *
8  * All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation; either version 2 of
13  * the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23  * MA 02111-1307 USA
24  *
25  * Command queue handling functions based on those from xf86-video-glamo, to
26  * which the following licence applies:
27  *
28  * Copyright  2007 OpenMoko, Inc.
29  * Copyright © 2009 Lars-Peter Clausen <lars@metafoo.de>
30  *
31  * This driver is based on Xati,
32  * Copyright  2004 Eric Anholt
33  *
34  * Permission to use, copy, modify, distribute, and sell this software and its
35  * documentation for any purpose is hereby granted without fee, provided that
36  * the above copyright notice appear in all copies and that both that copyright
37  * notice and this permission notice appear in supporting documentation, and
38  * that the name of the copyright holders not be used in advertising or
39  * publicity pertaining to distribution of the software without specific,
40  * written prior permission.  The copyright holders make no representations
41  * about the suitability of this software for any purpose.  It is provided "as
42  * is" without express or implied warranty.
43  *
44  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
45  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
46  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
47  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
48  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
49  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
50  * OF THIS SOFTWARE.
51  */
52
53
54 #include <drm/drmP.h>
55 #include <drm/glamo_drm.h>
56
57 #include "glamo-core.h"
58 #include "glamo-drm-private.h"
59 #include "glamo-regs.h"
60
61
62 static inline void reg_write(struct glamodrm_handle *gdrm,
63                              u_int16_t reg, u_int16_t val)
64 {
65         iowrite16(val, gdrm->reg_base + reg);
66 }
67
68
69 static inline u16 reg_read(struct glamodrm_handle *gdrm, u_int16_t reg)
70 {
71         return ioread16(gdrm->reg_base + reg);
72 }
73
74
75 static u32 glamo_get_read(struct glamodrm_handle *gdrm)
76 {
77         /* we could turn off clock here */
78         u32 ring_read = reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRL);
79         ring_read |= (reg_read(gdrm, GLAMO_REG_CMDQ_READ_ADDRH) & 0x7) << 16;
80
81         return ring_read;
82 }
83
84
85 static u32 glamo_get_write(struct glamodrm_handle *gdrm)
86 {
87         u32 ring_write = reg_read(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL);
88         ring_write |= (reg_read(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH) & 0x7) << 16;
89
90         return ring_write;
91 }
92
93
94 /* Add commands to the ring buffer */
95 int glamo_add_to_ring(struct glamodrm_handle *gdrm, u16 *addr,
96                       unsigned int count)
97 {
98         size_t ring_write, ring_read;
99         size_t new_ring_write;
100
101         if ( count >= GLAMO_CMDQ_SIZE ) {
102                 printk(KERN_WARNING "[glamo-drm] CmdQ submission too large\n");
103                 return -EINVAL;
104         }
105
106         down(&gdrm->add_to_ring);
107
108         ring_write = glamo_get_write(gdrm);
109
110         /* Calculate where we'll end up */
111         new_ring_write = (ring_write + count) % GLAMO_CMDQ_SIZE;
112
113         /* Wait until there is enough space to queue the cmd buffer */
114         if (new_ring_write > ring_write) {
115                 /* Loop while the read pointer is between the old and new
116                  * positions */
117                 do {
118                         ring_read = glamo_get_read(gdrm);
119                 } while (ring_read > ring_write && ring_read < new_ring_write);
120         } else {
121                 /* Same, but kind of inside-out */
122                 do {
123                         ring_read = glamo_get_read(gdrm);
124                 } while (ring_read > ring_write || ring_read < new_ring_write);
125         }
126
127         /* Are we about to wrap around? */
128         if (ring_write >= new_ring_write) {
129
130                 u32 rest_size;
131
132                 /* Wrap around */
133                 rest_size = GLAMO_CMDQ_SIZE - ring_write; /* Space left */
134
135                 /* Write from current position to end */
136                 memcpy_toio(gdrm->cmdq_base+ring_write, addr, rest_size);
137
138                 /* Write from start */
139                 memcpy_toio(gdrm->cmdq_base, addr+(rest_size>>1),
140                             count - rest_size);
141
142                 /* ring_write being 0 will result in a deadlock because the
143                  * cmdq read will never stop. To avoid such an behaviour insert
144                  * an empty instruction. */
145                 if (new_ring_write == 0) {
146                         iowrite16(0x0000, gdrm->cmdq_base);
147                         iowrite16(0x0000, gdrm->cmdq_base + 2);
148                         new_ring_write = 4;
149                 }
150
151         } else {
152
153                 memcpy_toio(gdrm->cmdq_base+ring_write, addr, count);
154
155         }
156
157         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH,
158                         (new_ring_write >> 16) & 0x7f);
159         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL,
160                         new_ring_write & 0xffff);
161
162         if ( !(reg_read(gdrm, GLAMO_REG_CMDQ_STATUS) & 1<<3)  ) {
163                 printk(KERN_ERR "[glamo-drm] CmdQ decode failure.\n");
164         }
165
166         up(&gdrm->add_to_ring);
167
168         return 0;
169 }
170
171
172 /* Return true for a legal sequence of commands, otherwise false */
173 static int glamo_sanitize_buffer(u16 *cmds, unsigned int count)
174 {
175         /* XXX FIXME TODO: Implementation... */
176         return 1;
177 }
178
179
180 /* Substitute the real addresses in VRAM for any required buffer objects */
181 static int glamo_do_relocation(struct glamodrm_handle *gdrm,
182                                drm_glamo_cmd_buffer_t *cbuf, u16 *cmds,
183                                struct drm_device *dev,
184                                struct drm_file *file_priv)
185 {
186         u32 *handles;
187         int *offsets;
188         int nobjs =  cbuf->nobjs;
189         int i;
190
191         if ( nobjs > 32 ) return -EINVAL;       /* Get real... */
192
193         handles = drm_alloc(nobjs*sizeof(u32), DRM_MEM_DRIVER);
194         if ( handles == NULL ) return -1;
195         if ( copy_from_user(handles, cbuf->objs, nobjs*sizeof(u32)) )
196                 return -1;
197
198         offsets = drm_alloc(nobjs*sizeof(int), DRM_MEM_DRIVER);
199         if ( offsets == NULL ) return -1;
200         if ( copy_from_user(offsets, cbuf->obj_pos, nobjs*sizeof(int)) )
201                 return -1;
202
203         for ( i=0; i<nobjs; i++ ) {
204
205                 u32 handle = handles[i];
206                 int offset = offsets[i];
207                 struct drm_gem_object *obj;
208                 struct drm_glamo_gem_object *gobj;
209                 u32 addr;
210                 u16 addr_low, addr_high;
211
212                 if ( offset > cbuf->bufsz ) {
213                         printk(KERN_WARNING "[glamo-drm] Offset out of range"
214                                             " for this relocation!\n");
215                         goto fail;
216                 }
217
218                 obj = drm_gem_object_lookup(dev, file_priv, handle);
219                 if ( obj == NULL ) return -1;
220
221                 /* Unref the object now, or it'll never get freed.
222                  * This should really happen after the GPU has finished
223                  * the commands which are about to be submitted. */
224                 drm_gem_object_unreference(obj);
225
226                 gobj = obj->driver_private;
227                 if ( gobj == NULL ) {
228                         printk(KERN_WARNING "[glamo-drm] This object has no"
229                                             " private data!\n");
230                         goto fail;
231                 }
232
233                 addr = GLAMO_OFFSET_FB + gobj->block->start;
234                 addr_low = addr & 0xffff;
235                 addr_high = (addr >> 16) & 0x7f;
236
237                 /* FIXME: Should really check that the register is a
238                  * valid one for this relocation. */
239
240                 *(cmds+(offset/2)+1) = addr_low;
241                 *(cmds+(offset/2)+3) = addr_high;
242
243         }
244
245         drm_free(handles, 1, DRM_MEM_DRIVER);
246         drm_free(offsets, 1, DRM_MEM_DRIVER);
247         return 0;
248
249 fail:
250         drm_free(handles, 1, DRM_MEM_DRIVER);
251         drm_free(offsets, 1, DRM_MEM_DRIVER);
252         return -1;
253 }
254
255
256 /* This is DRM_IOCTL_GLAMO_CMDBUF */
257 int glamo_ioctl_cmdbuf(struct drm_device *dev, void *data,
258                        struct drm_file *file_priv)
259 {
260         int ret = 0;
261         struct glamodrm_handle *gdrm;
262         unsigned int count;
263         drm_glamo_cmd_buffer_t *cbuf = data;
264         u16 *cmds;
265
266         gdrm = dev->dev_private;
267
268         count = cbuf->bufsz;
269
270         if ( count > PAGE_SIZE ) return -EINVAL;
271
272         cmds = drm_alloc(count, DRM_MEM_DRIVER);
273         if ( cmds == NULL ) return -ENOMEM;
274         if ( copy_from_user(cmds, cbuf->buf, count) )   {
275                 printk(KERN_WARNING "[glamo-drm] copy from user failed\n");
276                 ret = -EINVAL;
277                 goto cleanup;
278         }
279
280         /* Check the buffer isn't going to tell Glamo to enact naughtiness */
281         if ( !glamo_sanitize_buffer(cmds, count) ) {
282                 printk(KERN_WARNING "[glamo-drm] sanitize buffer failed\n");
283                 ret = -EINVAL;
284                 goto cleanup;
285         }
286
287         /* Perform relocation, if necessary */
288         if ( cbuf->nobjs ) {
289                 if ( glamo_do_relocation(gdrm, cbuf, cmds, dev, file_priv) )
290                 {
291                         printk(KERN_WARNING "[glamo-drm] Relocation failed\n");
292                         ret = -EINVAL;
293                         goto cleanup;
294                 }
295         }
296
297         glamo_add_to_ring(gdrm, cmds, count);
298
299 cleanup:
300         drm_free(cmds, 1, DRM_MEM_DRIVER);
301
302         return ret;
303 }
304
305
306 /* Return true for a legal sequence of commands, otherwise false */
307 static int glamo_sanitize_burst(u16 base, u16 *cmds, unsigned int count)
308 {
309         /* XXX FIXME TODO: Implementation... */
310         return 1;
311 }
312
313
314 static int glamo_relocate_burst(struct glamodrm_handle *gdrm,
315                                 drm_glamo_cmd_burst_t *cbuf, u16 *data,
316                                 struct drm_device *dev,
317                                 struct drm_file *file_priv)
318 {
319         u32 *handles;
320         int *offsets;
321         int nobjs =  cbuf->nobjs;
322         int i;
323
324         if ( nobjs > 32 ) return -EINVAL;       /* Get real... */
325
326         handles = drm_alloc(nobjs*sizeof(u32), DRM_MEM_DRIVER);
327         if ( handles == NULL ) return -1;
328         if ( copy_from_user(handles, cbuf->objs, nobjs*sizeof(u32)) )
329                 return -1;
330
331         offsets = drm_alloc(nobjs*sizeof(int), DRM_MEM_DRIVER);
332         if ( offsets == NULL ) return -1;
333         if ( copy_from_user(offsets, cbuf->obj_pos, nobjs*sizeof(int)) )
334                 return -1;
335
336         for ( i=0; i<nobjs; i++ ) {
337
338                 u32 handle = handles[i];
339                 int offset = offsets[i];
340                 struct drm_gem_object *obj;
341                 struct drm_glamo_gem_object *gobj;
342                 u32 addr;
343                 u16 addr_low, addr_high;
344
345                 if ( offset > cbuf->bufsz ) {
346                         printk(KERN_WARNING "[glamo-drm] Offset out of range"
347                                             " for this relocation!\n");
348                         goto fail;
349                 }
350
351                 obj = drm_gem_object_lookup(dev, file_priv, handle);
352                 if ( obj == NULL ) return -1;
353
354                 /* Unref the object now, or it'll never get freed.
355                  * FIXME: This should really happen after the GPU has
356                  * finished executing these commands. */
357                 drm_gem_object_unreference(obj);
358
359                 gobj = obj->driver_private;
360                 if ( gobj == NULL ) {
361                         printk(KERN_WARNING "[glamo-drm] This object has no"
362                                             " private data!\n");
363                         goto fail;
364                 }
365
366                 addr = GLAMO_OFFSET_FB + gobj->block->start;
367                 addr_low = addr & 0xffff;
368                 addr_high = (addr >> 16) & 0x7f;
369
370                 /* FIXME: Should really check that the register is a
371                  * valid one for this relocation. */
372
373                 *(data+(offset/2)+0) = addr_low;
374                 *(data+(offset/2)+1) = addr_high;
375
376         }
377
378         drm_free(handles, 1, DRM_MEM_DRIVER);
379         drm_free(offsets, 1, DRM_MEM_DRIVER);
380         return 0;
381
382 fail:
383         drm_free(handles, 1, DRM_MEM_DRIVER);
384         drm_free(offsets, 1, DRM_MEM_DRIVER);
385         return -1;
386 }
387
388
389 /* This is DRM_IOCTL_GLAMO_CMDBURST */
390 int glamo_ioctl_cmdburst(struct drm_device *dev, void *data,
391                          struct drm_file *file_priv)
392 {
393         int ret = 0;
394         struct glamodrm_handle *gdrm;
395         drm_glamo_cmd_burst_t *cbuf = data;
396         u16 *burst;
397         size_t burst_size;
398         size_t data_size;
399
400         gdrm = dev->dev_private;
401
402         data_size = cbuf->bufsz;
403         burst_size = data_size + 4;  /* Add space for header */
404         if ( data_size % 4 ) burst_size += 2;
405         if ( burst_size > PAGE_SIZE ) return -EINVAL;
406         if ( burst_size % 4 ) return -EINVAL;
407
408         burst = drm_alloc(burst_size, DRM_MEM_DRIVER);
409         if ( burst == NULL ) return -ENOMEM;
410
411         /* Get data from userspace */
412         if ( copy_from_user(burst+2, cbuf->data, cbuf->bufsz) )         {
413                 printk(KERN_WARNING "[glamo-drm] copy from user failed\n");
414                 ret = -EINVAL;
415                 goto cleanup;
416         }
417
418         /* Sanitise */
419         if ( !glamo_sanitize_burst(cbuf->base, burst+2, cbuf->bufsz) ) {
420                 printk(KERN_WARNING "[glamo-drm] sanitize buffer failed\n");
421                 ret = -EINVAL;
422                 goto cleanup;
423         }
424
425         /* Relocate */
426         if ( cbuf->nobjs ) {
427                 if ( glamo_relocate_burst(gdrm, cbuf, burst+2, dev, file_priv) )
428                 {
429                         printk(KERN_WARNING "[glamo-drm] Relocation failed\n");
430                         ret = -EINVAL;
431                         goto cleanup;
432                 }
433         }
434
435         /* Add burst header */
436         burst[0] = 1<<15 | cbuf->base;
437         burst[1] = data_size / 2;  /* -> 2-byte words */
438
439         /* Zero-pad if necessary */
440         if ( burst[1] & 0x01 ) {
441                 burst[(burst_size/2)-1] = 0x0000;
442         }
443
444         /* Add to command queue */
445         glamo_add_to_ring(gdrm, burst, burst_size);
446
447 cleanup:
448         drm_free(burst, 1, DRM_MEM_DRIVER);
449
450         return ret;
451 }
452
453
454 int glamo_cmdq_init(struct glamodrm_handle *gdrm)
455 {
456         unsigned int i;
457
458         init_MUTEX(&gdrm->add_to_ring);
459
460         /* Enable 2D and 3D */
461         glamo_engine_enable(gdrm->glamo_core, GLAMO_ENGINE_2D);
462         glamo_engine_reset(gdrm->glamo_core, GLAMO_ENGINE_2D);
463         glamo_engine_enable(gdrm->glamo_core, GLAMO_ENGINE_3D);
464         glamo_engine_reset(gdrm->glamo_core, GLAMO_ENGINE_3D);
465
466         /* Start by zeroing the command queue memory */
467         for ( i=0; i<GLAMO_CMDQ_SIZE; i+=2 ) {
468                 iowrite16(0x0000, gdrm->cmdq_base+i);
469         }
470
471         glamo_engine_enable(gdrm->glamo_core, GLAMO_ENGINE_CMDQ);
472         glamo_engine_reset(gdrm->glamo_core, GLAMO_ENGINE_CMDQ);
473
474         /* Set up command queue location */
475         reg_write(gdrm, GLAMO_REG_CMDQ_BASE_ADDRL,
476                                         GLAMO_OFFSET_CMDQ & 0xffff);
477         reg_write(gdrm, GLAMO_REG_CMDQ_BASE_ADDRH,
478                                         (GLAMO_OFFSET_CMDQ >> 16) & 0x7f);
479
480         /* Length of command queue in 1k blocks, minus one */
481         reg_write(gdrm, GLAMO_REG_CMDQ_LEN, (GLAMO_CMDQ_SIZE >> 10)-1);
482         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRH, 0);
483         reg_write(gdrm, GLAMO_REG_CMDQ_WRITE_ADDRL, 0);
484         reg_write(gdrm, GLAMO_REG_CMDQ_CONTROL,
485                                          1 << 12 |   /* Turbo flip (?) */
486                                          5 << 8  |   /* no interrupt */
487                                          8 << 4);    /* HQ threshold */
488
489         return 0;
490 }
491
492
493 int glamo_cmdq_shutdown(struct glamodrm_handle *gdrm)
494 {
495         return 0;
496 }
497
498
499 void glamo_cmdq_suspend(struct glamodrm_handle *gdrm)
500 {
501         /* Placeholder... */
502 }
503
504
505 void glamo_cmdq_resume(struct glamodrm_handle *gdrm)
506 {
507         glamo_cmdq_init(gdrm);
508 }
509
510
511 /* Initialise an object's contents to zero.
512  * This is in glamo-cmdq.c in the hope that we can accelerate it later. */
513 void glamo_cmdq_blank(struct glamodrm_handle *gdrm, struct drm_gem_object *obj)
514 {
515         char __iomem *cookie;
516         struct drm_glamo_gem_object *gobj;
517         int i;
518
519         gobj = obj->driver_private;
520
521         cookie = ioremap(gdrm->vram->start + gobj->block->start, obj->size);
522         for ( i=0; i<obj->size; i+=2 ) {
523                 iowrite16(0, cookie+i);
524         }
525         iounmap(cookie);
526 }