Remove debug watchpoints
[kernel.git] / drivers / mfd / glamo / glamo-core.c
1 /* Smedia Glamo 336x/337x driver
2  *
3  * (C) 2007 by Openmoko, Inc.
4  * Author: Harald Welte <laforge@openmoko.org>
5  * All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20  * MA 02111-1307 USA
21  */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/mm.h>
28 #include <linux/tty.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/fb.h>
32 #include <linux/init.h>
33 #include <linux/irq.h>
34 #include <linux/interrupt.h>
35 #include <linux/workqueue.h>
36 #include <linux/wait.h>
37 #include <linux/platform_device.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/spinlock.h>
40 #include <linux/glamofb.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/host.h>
43
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <asm/div64.h>
47
48 //#include <mach/regs-irq.h>
49
50 #ifdef CONFIG_PM
51 #include <linux/pm.h>
52 #endif
53
54 #include "glamo-regs.h"
55 #include "glamo-core.h"
56
57 #define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1)
58
59 #define GLAMO_MEM_REFRESH_COUNT 0x100
60
61
62 /*
63  * Glamo internal settings
64  *
65  * We run the memory interface from the faster PLLB on 2.6.28 kernels and
66  * above.  Couple of GTA02 users report trouble with memory bus when they
67  * upgraded from 2.6.24.  So this parameter allows reversion to 2.6.24
68  * scheme if their Glamo chip needs it.
69  *
70  * you can override the faster default on kernel commandline using
71  *
72  *   glamo3362.slow_memory=1
73  *
74  * for example
75  */
76
77 static int slow_memory = 0;
78 module_param(slow_memory, int, 0644);
79
80 struct reg_range {
81         int start;
82         int count;
83         char *name;
84         char dump;
85 };
86 struct reg_range reg_range[] = {
87         { 0x0000, 0x76,         "General",      1 },
88         { 0x0200, 0x16,         "Host Bus",     1 },
89         { 0x0300, 0x38,         "Memory",       1 },
90 /*      { 0x0400, 0x100,        "Sensor",       0 }, */
91 /*              { 0x0500, 0x300,        "ISP",          0 }, */
92 /*              { 0x0800, 0x400,        "JPEG",         0 }, */
93 /*              { 0x0c00, 0xcc,         "MPEG",         0 }, */
94         { 0x1100, 0xb2,         "LCD 1",        1 },
95         { 0x1200, 0x64,         "LCD 2",        1 },
96         { 0x1400, 0x40,         "MMC",          1 },
97 /*              { 0x1500, 0x080,        "MPU 0",        0 },
98         { 0x1580, 0x080,        "MPU 1",        0 },
99         { 0x1600, 0x080,        "Cmd Queue",    0 },
100         { 0x1680, 0x080,        "RISC CPU",     0 },
101         { 0x1700, 0x400,        "2D Unit",      0 },
102         { 0x1b00, 0x900,        "3D Unit",      0 }, */
103 };
104
105 static struct glamo_core *glamo_handle;
106
107 static inline void __reg_write(struct glamo_core *glamo,
108                                 u_int16_t reg, u_int16_t val)
109 {
110         writew(val, glamo->base + reg);
111 }
112
113 static inline u_int16_t __reg_read(struct glamo_core *glamo,
114                                    u_int16_t reg)
115 {
116         return readw(glamo->base + reg);
117 }
118
119 static void __reg_set_bit_mask(struct glamo_core *glamo,
120                                 u_int16_t reg, u_int16_t mask,
121                                 u_int16_t val)
122 {
123         u_int16_t tmp;
124
125         val &= mask;
126
127         tmp = __reg_read(glamo, reg);
128         tmp &= ~mask;
129         tmp |= val;
130         __reg_write(glamo, reg, tmp);
131 }
132
133 static void reg_set_bit_mask(struct glamo_core *glamo,
134                                 u_int16_t reg, u_int16_t mask,
135                                 u_int16_t val)
136 {
137         spin_lock(&glamo->lock);
138         __reg_set_bit_mask(glamo, reg, mask, val);
139         spin_unlock(&glamo->lock);
140 }
141
142 static inline void __reg_set_bit(struct glamo_core *glamo,
143                                  u_int16_t reg, u_int16_t bit)
144 {
145         __reg_set_bit_mask(glamo, reg, bit, 0xffff);
146 }
147
148 static inline void __reg_clear_bit(struct glamo_core *glamo,
149                                    u_int16_t reg, u_int16_t bit)
150 {
151         __reg_set_bit_mask(glamo, reg, bit, 0);
152 }
153
154 static inline void glamo_vmem_write(struct glamo_core *glamo, u_int32_t addr,
155                                     u_int16_t *src, int len)
156 {
157         if (addr & 0x0001 || (unsigned long)src & 0x0001 || len & 0x0001) {
158                 dev_err(&glamo->pdev->dev, "unaligned write(0x%08x, 0x%p, "
159                         "0x%x)!!\n", addr, src, len);
160         }
161
162 }
163
164 static inline void glamo_vmem_read(struct glamo_core *glamo, u_int16_t *buf,
165                                    u_int32_t addr, int len)
166 {
167         if (addr & 0x0001 || (unsigned long) buf & 0x0001 || len & 0x0001) {
168                 dev_err(&glamo->pdev->dev, "unaligned read(0x%p, 0x08%x, "
169                         "0x%x)!!\n", buf, addr, len);
170         }
171
172
173 }
174
175 /***********************************************************************
176  * resources of sibling devices
177  ***********************************************************************/
178 static struct resource glamo_graphics_resources[] = {
179         {
180                 .name   = "glamo-cmdq-regs",
181                 .start  = GLAMO_REGOFS_CMDQUEUE,
182                 .end    = GLAMO_REGOFS_RISC - 1,
183                 .flags  = IORESOURCE_MEM,
184         }, {
185                 .name   = "glamo-command-queue",
186                 .start  = GLAMO_MEM_BASE + GLAMO_OFFSET_CMDQ,
187                 .end    = GLAMO_MEM_BASE + GLAMO_OFFSET_CMDQ +
188                           GLAMO_CMDQ_SIZE - 1,
189                 .flags  = IORESOURCE_MEM,
190         }, {
191                 .name   = "glamo-fb-mem",
192                 .start  = GLAMO_MEM_BASE + GLAMO_OFFSET_FB,
193                 .end    = GLAMO_MEM_BASE + GLAMO_OFFSET_FB +
194                           GLAMO_FB_SIZE - 1,
195                 .flags  = IORESOURCE_MEM,
196         }, {
197                 .name   = "glamo-fb-regs",
198                 .start  = GLAMO_REGOFS_LCD,
199                 .end    = GLAMO_REGOFS_MMC - 1,
200                 .flags  = IORESOURCE_MEM,
201         }
202 };
203
204 static struct platform_device glamo_graphics_dev = {
205         .name           = "glamo-fb",
206         .resource       = glamo_graphics_resources,
207         .num_resources  = ARRAY_SIZE(glamo_graphics_resources),
208 };
209
210 static struct platform_device glamo_spigpio_dev = {
211         .name           = "glamo-spi-gpio",
212 };
213
214
215 static struct resource glamo_mmc_resources[] = {
216         {
217                 /* FIXME: those need to be incremented by parent base */
218                 .start  = GLAMO_REGOFS_MMC,
219                 .end    = GLAMO_REGOFS_MPROC0 - 1,
220                 .flags  = IORESOURCE_MEM
221         }, {
222                 .start  = IRQ_GLAMO_MMC,
223                 .end    = IRQ_GLAMO_MMC,
224                 .flags  = IORESOURCE_IRQ,
225         }, { /* our data buffer for MMC transfers */
226                 .start  = GLAMO_MEM_BASE + GLAMO_OFFSET_MMC,
227                 .end    = GLAMO_MEM_BASE + GLAMO_OFFSET_MMC +
228                           GLAMO_MMC_BUFFER_SIZE - 1,
229                 .flags  = IORESOURCE_MEM
230         },
231 };
232
233 struct glamo_mci_pdata glamo_mci_def_pdata = {
234         .gpio_detect            = 0,
235         .glamo_can_set_mci_power        = NULL, /* filled in from MFD platform data */
236         .ocr_avail      = MMC_VDD_20_21 |
237                           MMC_VDD_21_22 |
238                           MMC_VDD_22_23 |
239                           MMC_VDD_23_24 |
240                           MMC_VDD_24_25 |
241                           MMC_VDD_25_26 |
242                           MMC_VDD_26_27 |
243                           MMC_VDD_27_28 |
244                           MMC_VDD_28_29 |
245                           MMC_VDD_29_30 |
246                           MMC_VDD_30_31 |
247                           MMC_VDD_32_33,
248         .glamo_irq_is_wired     = NULL, /* filled in from MFD platform data */
249         .mci_suspending = NULL, /* filled in from MFD platform data */
250         .mci_all_dependencies_resumed = NULL, /* filled in from MFD platform data */
251 };
252 EXPORT_SYMBOL_GPL(glamo_mci_def_pdata);
253
254
255
256 static void mangle_mem_resources(struct resource *res, int num_res,
257                                  struct resource *parent)
258 {
259         int i;
260
261         for (i = 0; i < num_res; i++) {
262                 if (res[i].flags != IORESOURCE_MEM)
263                         continue;
264                 res[i].start += parent->start;
265                 res[i].end += parent->start;
266                 res[i].parent = parent;
267         }
268 }
269
270 /***********************************************************************
271  * IRQ demultiplexer
272  ***********************************************************************/
273 #define irq2glamo(x)    (x - IRQ_GLAMO(0))
274
275 static void glamo_ack_irq(unsigned int irq)
276 {
277         /* clear interrupt source */
278         __reg_write(glamo_handle, GLAMO_REG_IRQ_CLEAR,
279                     1 << irq2glamo(irq));
280 }
281
282 static void glamo_mask_irq(unsigned int irq)
283 {
284         u_int16_t tmp;
285
286         /* clear bit in enable register */
287         tmp = __reg_read(glamo_handle, GLAMO_REG_IRQ_ENABLE);
288         tmp &= ~(1 << irq2glamo(irq));
289         __reg_write(glamo_handle, GLAMO_REG_IRQ_ENABLE, tmp);
290 }
291
292 static void glamo_unmask_irq(unsigned int irq)
293 {
294         u_int16_t tmp;
295
296         /* set bit in enable register */
297         tmp = __reg_read(glamo_handle, GLAMO_REG_IRQ_ENABLE);
298         tmp |= (1 << irq2glamo(irq));
299         __reg_write(glamo_handle, GLAMO_REG_IRQ_ENABLE, tmp);
300 }
301
302 static struct irq_chip glamo_irq_chip = {
303         .ack    = glamo_ack_irq,
304         .mask   = glamo_mask_irq,
305         .unmask = glamo_unmask_irq,
306 };
307
308 static void glamo_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
309 {
310         const unsigned int cpu = smp_processor_id();
311
312         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
313
314         if (unlikely(desc->status & IRQ_INPROGRESS)) {
315                 desc->status |= (IRQ_PENDING | IRQ_MASKED);
316                 desc->chip->mask(irq);
317                 desc->chip->ack(irq);
318                 return;
319         }
320
321         kstat_cpu(cpu).irqs[irq]++;
322         desc->chip->ack(irq);
323         desc->status |= IRQ_INPROGRESS;
324
325         do {
326                 u_int16_t irqstatus;
327                 int i;
328
329                 if (unlikely((desc->status &
330                                 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
331                                 (IRQ_PENDING | IRQ_MASKED))) {
332                         /* dealing with pending IRQ, unmasking */
333                         desc->chip->unmask(irq);
334                         desc->status &= ~IRQ_MASKED;
335                 }
336
337                 desc->status &= ~IRQ_PENDING;
338
339                 /* read IRQ status register */
340                 irqstatus = __reg_read(glamo_handle, GLAMO_REG_IRQ_STATUS);
341                 for (i = 0; i < 9; i++)
342                         if (irqstatus & (1 << i))
343                                 desc_handle_irq(IRQ_GLAMO(i),
344                                     irq_desc+IRQ_GLAMO(i));
345
346         } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
347
348         desc->status &= ~IRQ_INPROGRESS;
349 }
350
351
352 static ssize_t regs_write(struct device *dev, struct device_attribute *attr,
353                            const char *buf, size_t count)
354 {
355         unsigned long reg = simple_strtoul(buf, NULL, 10);
356         struct glamo_core *glamo = dev_get_drvdata(dev);
357
358         while (*buf && (*buf != ' '))
359                 buf++;
360         if (*buf != ' ')
361                 return -EINVAL;
362         while (*buf && (*buf == ' '))
363                 buf++;
364         if (!*buf)
365                 return -EINVAL;
366
367         printk(KERN_INFO"reg 0x%02lX <-- 0x%04lX\n",
368                reg, simple_strtoul(buf, NULL, 10));
369
370         __reg_write(glamo, reg, simple_strtoul(buf, NULL, 10));
371
372         return count;
373 }
374
375 static ssize_t regs_read(struct device *dev, struct device_attribute *attr,
376                         char *buf)
377 {
378         struct glamo_core *glamo = dev_get_drvdata(dev);
379         int n, n1 = 0, r;
380         char * end = buf;
381
382         spin_lock(&glamo->lock);
383
384         for (r = 0; r < ARRAY_SIZE(reg_range); r++) {
385                 if (!reg_range[r].dump)
386                         continue;
387                 n1 = 0;
388                 end += sprintf(end, "\n%s\n", reg_range[r].name);
389                 for (n = reg_range[r].start;
390                      n < reg_range[r].start + reg_range[r].count; n += 2) {
391                         if (((n1++) & 7) == 0)
392                                 end += sprintf(end, "\n%04X:  ", n);
393                         end += sprintf(end, "%04x ", __reg_read(glamo, n));
394                 }
395                 end += sprintf(end, "\n");
396                 if (!attr) {
397                         printk("%s", buf);
398                         end = buf;
399                 }
400         }
401         spin_unlock(&glamo->lock);
402
403         return end - buf;
404 }
405
406 static DEVICE_ATTR(regs, 0644, regs_read, regs_write);
407 static struct attribute *glamo_sysfs_entries[] = {
408         &dev_attr_regs.attr,
409         NULL
410 };
411 static struct attribute_group glamo_attr_group = {
412         .name   = NULL,
413         .attrs  = glamo_sysfs_entries,
414 };
415
416
417
418 /***********************************************************************
419  * 'engine' support
420  ***********************************************************************/
421
422 int __glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
423 {
424         switch (engine) {
425         case GLAMO_ENGINE_LCD:
426                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
427                                    GLAMO_HOSTBUS2_MMIO_EN_LCD,
428                                    GLAMO_HOSTBUS2_MMIO_EN_LCD);
429                 __reg_write(glamo, GLAMO_REG_CLOCK_LCD,
430                             GLAMO_CLOCK_LCD_EN_M5CLK |
431                             GLAMO_CLOCK_LCD_EN_DHCLK |
432                             GLAMO_CLOCK_LCD_EN_DMCLK |
433                             GLAMO_CLOCK_LCD_EN_DCLK |
434                             GLAMO_CLOCK_LCD_DG_M5CLK |
435                             GLAMO_CLOCK_LCD_DG_DMCLK);
436                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
437                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
438                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
439                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0xffff);
440                 break;
441         case GLAMO_ENGINE_MMC:
442                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
443                                    GLAMO_HOSTBUS2_MMIO_EN_MMC,
444                                    GLAMO_HOSTBUS2_MMIO_EN_MMC);
445                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
446                                    GLAMO_CLOCK_MMC_EN_M9CLK |
447                                    GLAMO_CLOCK_MMC_EN_TCLK |
448                                    GLAMO_CLOCK_MMC_DG_M9CLK |
449                                    GLAMO_CLOCK_MMC_DG_TCLK, 0xffff);
450                 /* enable the TCLK divider clk input */
451                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
452                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
453                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK);
454                 break;
455         case GLAMO_ENGINE_2D:
456                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
457                                    GLAMO_CLOCK_2D_EN_M7CLK |
458                                    GLAMO_CLOCK_2D_EN_GCLK |
459                                    GLAMO_CLOCK_2D_DG_M7CLK |
460                                    GLAMO_CLOCK_2D_DG_GCLK, 0xffff);
461                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
462                                    GLAMO_HOSTBUS2_MMIO_EN_2D,
463                                    GLAMO_HOSTBUS2_MMIO_EN_2D);
464                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
465                                    GLAMO_CLOCK_GEN51_EN_DIV_GCLK, 0xffff);
466                 break;
467         case GLAMO_ENGINE_CMDQ:
468                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
469                                    GLAMO_CLOCK_2D_EN_M6CLK, 0xffff);
470                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
471                                    GLAMO_HOSTBUS2_MMIO_EN_CQ,
472                                    GLAMO_HOSTBUS2_MMIO_EN_CQ);
473                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
474                                    GLAMO_CLOCK_GEN51_EN_DIV_MCLK, 0xffff);
475                 break;
476         /* FIXME: Implementation */
477         default:
478                 break;
479         }
480
481         glamo->engine_enabled_bitfield |= 1 << engine;
482
483         return 0;
484 }
485
486 int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
487 {
488         int ret;
489
490         spin_lock(&glamo->lock);
491
492         ret = __glamo_engine_enable(glamo, engine);
493
494         spin_unlock(&glamo->lock);
495
496         return ret;
497 }
498 EXPORT_SYMBOL_GPL(glamo_engine_enable);
499
500 int __glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
501 {
502         switch (engine) {
503         case GLAMO_ENGINE_LCD:
504                 /* remove pixel clock to LCM */
505                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
506                             GLAMO_CLOCK_LCD_EN_DCLK, 0);
507                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
508                             GLAMO_CLOCK_LCD_EN_DHCLK |
509                             GLAMO_CLOCK_LCD_EN_DMCLK, 0);
510                 /* kill memory clock */
511                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
512                             GLAMO_CLOCK_LCD_EN_M5CLK, 0);
513                 /* stop dividing the clocks */
514                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
515                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
516                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
517                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0);
518                 break;
519
520         case GLAMO_ENGINE_MMC:
521 //              __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
522 //                                                 GLAMO_CLOCK_MMC_EN_M9CLK |
523 //                                                 GLAMO_CLOCK_MMC_EN_TCLK |
524 //                                                 GLAMO_CLOCK_MMC_DG_M9CLK |
525 //                                                 GLAMO_CLOCK_MMC_DG_TCLK, 0);
526                 /* disable the TCLK divider clk input */
527 //              __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
528 //                                      GLAMO_CLOCK_GEN51_EN_DIV_TCLK, 0);
529
530         default:
531                 break;
532         }
533
534         glamo->engine_enabled_bitfield &= ~(1 << engine);
535
536         return 0;
537 }
538 int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
539 {
540         int ret;
541
542         spin_lock(&glamo->lock);
543
544         ret = __glamo_engine_disable(glamo, engine);
545
546         spin_unlock(&glamo->lock);
547
548         return ret;
549 }
550 EXPORT_SYMBOL_GPL(glamo_engine_disable);
551
552 static const u_int16_t engine_clock_regs[__NUM_GLAMO_ENGINES] = {
553         [GLAMO_ENGINE_LCD]      = GLAMO_REG_CLOCK_LCD,
554         [GLAMO_ENGINE_MMC]      = GLAMO_REG_CLOCK_MMC,
555         [GLAMO_ENGINE_ISP]      = GLAMO_REG_CLOCK_ISP,
556         [GLAMO_ENGINE_JPEG]     = GLAMO_REG_CLOCK_JPEG,
557         [GLAMO_ENGINE_3D]       = GLAMO_REG_CLOCK_3D,
558         [GLAMO_ENGINE_2D]       = GLAMO_REG_CLOCK_2D,
559         [GLAMO_ENGINE_MPEG_ENC] = GLAMO_REG_CLOCK_MPEG,
560         [GLAMO_ENGINE_MPEG_DEC] = GLAMO_REG_CLOCK_MPEG,
561 };
562
563 void glamo_engine_clkreg_set(struct glamo_core *glamo,
564                              enum glamo_engine engine,
565                              u_int16_t mask, u_int16_t val)
566 {
567         reg_set_bit_mask(glamo, engine_clock_regs[engine], mask, val);
568 }
569 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_set);
570
571 u_int16_t glamo_engine_clkreg_get(struct glamo_core *glamo,
572                                   enum glamo_engine engine)
573 {
574         u_int16_t val;
575
576         spin_lock(&glamo->lock);
577         val = __reg_read(glamo, engine_clock_regs[engine]);
578         spin_unlock(&glamo->lock);
579
580         return val;
581 }
582 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_get);
583
584 struct glamo_script reset_regs[] = {
585         [GLAMO_ENGINE_LCD] = {
586                 GLAMO_REG_CLOCK_LCD, GLAMO_CLOCK_LCD_RESET
587         },
588 #if 0
589         [GLAMO_ENGINE_HOST] = {
590                 GLAMO_REG_CLOCK_HOST, GLAMO_CLOCK_HOST_RESET
591         },
592         [GLAMO_ENGINE_MEM] = {
593                 GLAMO_REG_CLOCK_MEM, GLAMO_CLOCK_MEM_RESET
594         },
595 #endif
596         [GLAMO_ENGINE_MMC] = {
597                 GLAMO_REG_CLOCK_MMC, GLAMO_CLOCK_MMC_RESET
598         },
599         [GLAMO_ENGINE_2D] = {
600                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_RESET
601         },
602         [GLAMO_ENGINE_JPEG] = {
603                 GLAMO_REG_CLOCK_JPEG, GLAMO_CLOCK_JPEG_RESET
604         },
605         /* The following is defined as "Reset command queue", nothing to do
606          * with the 2D engine. */
607         [GLAMO_ENGINE_CMDQ] = {
608                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_CQ_RESET
609         },
610 };
611
612 void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine)
613 {
614         struct glamo_script *rst;
615
616         if (engine >= ARRAY_SIZE(reset_regs)) {
617                 dev_warn(&glamo->pdev->dev, "unknown engine %u ", engine);
618                 return;
619         }
620
621         rst = &reset_regs[engine];
622
623         spin_lock(&glamo->lock);
624         __reg_set_bit(glamo, rst->reg, rst->val);
625         __reg_clear_bit(glamo, rst->reg, rst->val);
626         spin_unlock(&glamo->lock);
627 }
628 EXPORT_SYMBOL_GPL(glamo_engine_reset);
629
630 void glamo_lcm_reset(int level)
631 {
632         if (!glamo_handle)
633                 return;
634
635         glamo_gpio_setpin(glamo_handle, GLAMO_GPIO4, level);
636         glamo_gpio_cfgpin(glamo_handle, GLAMO_GPIO4_OUTPUT);
637
638 }
639 EXPORT_SYMBOL_GPL(glamo_lcm_reset);
640
641 enum glamo_pll {
642         GLAMO_PLL1,
643         GLAMO_PLL2,
644 };
645
646 static int glamo_pll_rate(struct glamo_core *glamo,
647                           enum glamo_pll pll)
648 {
649         u_int16_t reg;
650         unsigned int div = 512;
651         /* FIXME: move osci into platform_data */
652         unsigned int osci = 32768;
653
654         if (osci == 32768)
655                 div = 1;
656
657         switch (pll) {
658         case GLAMO_PLL1:
659                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN1);
660                 break;
661         case GLAMO_PLL2:
662                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN3);
663                 break;
664         default:
665                 return -EINVAL;
666         }
667         return (osci/div)*reg;
668 }
669
670 int glamo_engine_reclock(struct glamo_core *glamo,
671                          enum glamo_engine engine,
672                          int ps)
673 {
674         int pll, khz;
675         u_int16_t reg, mask, val = 0;
676
677         if (!ps)
678                 return 0;
679
680         switch (engine) {
681         case GLAMO_ENGINE_LCD:
682                 pll = GLAMO_PLL1;
683                 reg = GLAMO_REG_CLOCK_GEN7;
684                 mask = 0xff;
685                 break;
686         default:
687                 dev_warn(&glamo->pdev->dev,
688                          "reclock of engine 0x%x not supported\n", engine);
689                 return -EINVAL;
690                 break;
691         }
692
693         pll = glamo_pll_rate(glamo, pll);
694         khz = 1000000000UL / ps;
695
696         if (khz)
697                 val = (pll / khz) / 1000;
698
699         dev_dbg(&glamo->pdev->dev,
700                         "PLL %d, kHZ %d, div %d\n", pll, khz, val);
701
702         if (val) {
703                 val--;
704                 reg_set_bit_mask(glamo, reg, mask, val);
705                 mdelay(5); /* wait some time to stabilize */
706
707                 return 0;
708         } else {
709                 return -EINVAL;
710         }
711 }
712 EXPORT_SYMBOL_GPL(glamo_engine_reclock);
713
714 /***********************************************************************
715  * script support
716  ***********************************************************************/
717
718 int glamo_run_script(struct glamo_core *glamo, struct glamo_script *script,
719                      int len, int may_sleep)
720 {
721         int i;
722
723         for (i = 0; i < len; i++) {
724                 struct glamo_script *line = &script[i];
725
726                 switch (line->reg) {
727                 case 0xffff:
728                         return 0;
729                 case 0xfffe:
730                         if (may_sleep)
731                                 msleep(line->val);
732                         else
733                                 mdelay(line->val * 4);
734                         break;
735                 case 0xfffd:
736                         /* spin until PLLs lock */
737                         while ((__reg_read(glamo, GLAMO_REG_PLL_GEN5) & 3) != 3)
738                                 ;
739                         break;
740
741                 /*
742                  * couple of people reported artefacts with 2.6.28 changes, this
743                  * allows reversion to 2.6.24 settings
744                  */
745
746                 case 0x200:
747                         switch (slow_memory) {
748                         /* choice 1 is the most conservative */
749                         case 1: /* 3 waits on Async BB R & W, Use PLL 1 for mem bus */
750                                 __reg_write(glamo, script[i].reg, 0xef0);
751                                 break;
752                         case 2: /* 2 waits on Async BB R & W, Use PLL 1 for mem bus */
753                                 __reg_write(glamo, script[i].reg, 0xea0);
754                                 break;
755                         case 3: /* 1 waits on Async BB R & W, Use PLL 1 for mem bus */
756                                 __reg_write(glamo, script[i].reg, 0xe50);
757                                 break;
758                         case 4: /* 0 waits on Async BB R & W, Use PLL 1 for mem bus */
759                                 __reg_write(glamo, script[i].reg, 0xe00);
760                                 break;
761
762                         /* using PLL2 for memory bus increases CPU bandwidth significantly */
763                         case 5: /* 3 waits on Async BB R & W, Use PLL 2 for mem bus */
764                                 __reg_write(glamo, script[i].reg, 0xef3);
765                                 break;
766                         case 6: /* 2 waits on Async BB R & W, Use PLL 2 for mem bus */
767                                 __reg_write(glamo, script[i].reg, 0xea3);
768                                 break;
769                         case 7: /* 1 waits on Async BB R & W, Use PLL 2 for mem bus */
770                                 __reg_write(glamo, script[i].reg, 0xe53);
771                                 break;
772                         /* default of 0 or >7 is fastest */
773                         default: /* 0 waits on Async BB R & W, Use PLL 2 for mem bus */
774                                 __reg_write(glamo, script[i].reg, 0xe03);
775                                 break;
776                         }
777                         break;
778
779                 default:
780                         __reg_write(glamo, script[i].reg, script[i].val);
781                         break;
782                 }
783         }
784
785         return 0;
786 }
787 EXPORT_SYMBOL(glamo_run_script);
788
789 static struct glamo_script glamo_init_script[] = {
790         { GLAMO_REG_CLOCK_HOST,         0x1000 },
791                 { 0xfffe, 2 },
792         { GLAMO_REG_CLOCK_MEMORY,       0x1000 },
793         { GLAMO_REG_CLOCK_MEMORY,       0x2000 },
794         { GLAMO_REG_CLOCK_LCD,          0x1000 },
795         { GLAMO_REG_CLOCK_MMC,          0x1000 },
796         { GLAMO_REG_CLOCK_ISP,          0x1000 },
797         { GLAMO_REG_CLOCK_ISP,          0x3000 },
798         { GLAMO_REG_CLOCK_JPEG,         0x1000 },
799         { GLAMO_REG_CLOCK_3D,           0x1000 },
800         { GLAMO_REG_CLOCK_3D,           0x3000 },
801         { GLAMO_REG_CLOCK_2D,           0x1000 },
802         { GLAMO_REG_CLOCK_2D,           0x3000 },
803         { GLAMO_REG_CLOCK_RISC1,        0x1000 },
804         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
805         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
806         { GLAMO_REG_CLOCK_MPROC,        0x1000 /*0x100f*/ },
807                 { 0xfffe, 2 },
808         { GLAMO_REG_CLOCK_HOST,         0x0000 },
809         { GLAMO_REG_CLOCK_MEMORY,       0x0000 },
810         { GLAMO_REG_CLOCK_LCD,          0x0000 },
811         { GLAMO_REG_CLOCK_MMC,          0x0000 },
812 #if 0
813 /* unused engines must be left in reset to stop MMC block read "blackouts" */
814         { GLAMO_REG_CLOCK_ISP,          0x0000 },
815         { GLAMO_REG_CLOCK_ISP,          0x0000 },
816         { GLAMO_REG_CLOCK_JPEG,         0x0000 },
817         { GLAMO_REG_CLOCK_3D,           0x0000 },
818         { GLAMO_REG_CLOCK_3D,           0x0000 },
819         { GLAMO_REG_CLOCK_2D,           0x0000 },
820         { GLAMO_REG_CLOCK_2D,           0x0000 },
821         { GLAMO_REG_CLOCK_RISC1,        0x0000 },
822         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
823         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
824 #endif
825         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
826         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
827         { 0xfffd, 0 },
828         /*
829          * b9 of this register MUST be zero to get any interrupts on INT#
830          * the other set bits enable all the engine interrupt sources
831          */
832         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
833         { GLAMO_REG_CLOCK_GEN6,         0x2000 },
834         { GLAMO_REG_CLOCK_GEN7,         0x0101 },
835         { GLAMO_REG_CLOCK_GEN8,         0x0100 },
836         { GLAMO_REG_CLOCK_HOST,         0x000d },
837         /*
838          * b7..b4 = 0 = no wait states on read or write
839          * b0 = 1 select PLL2 for Host interface, b1 = enable it
840          */
841         { 0x200,        0x0e03 /* this is replaced by script parser */ },
842         { 0x202,        0x07ff },
843         { 0x212,        0x0000 },
844         { 0x214,        0x4000 },
845         { 0x216,        0xf00e },
846
847         /* S-Media recommended "set tiling mode to 512 mode for memory access
848          * more efficiency when 640x480" */
849         { GLAMO_REG_MEM_TYPE,           0x0c74 }, /* 8MB, 16 word pg wr+rd */
850         { GLAMO_REG_MEM_GEN,            0xafaf }, /* 63 grants min + max */
851
852         { GLAMO_REGOFS_HOSTBUS + 2,     0xffff }, /* enable  on MMIO*/
853
854         { GLAMO_REG_MEM_TIMING1,        0x0108 },
855         { GLAMO_REG_MEM_TIMING2,        0x0010 }, /* Taa = 3 MCLK */
856         { GLAMO_REG_MEM_TIMING3,        0x0000 },
857         { GLAMO_REG_MEM_TIMING4,        0x0000 }, /* CE1# delay fall/rise */
858         { GLAMO_REG_MEM_TIMING5,        0x0000 }, /* UB# LB# */
859         { GLAMO_REG_MEM_TIMING6,        0x0000 }, /* OE# */
860         { GLAMO_REG_MEM_TIMING7,        0x0000 }, /* WE# */
861         { GLAMO_REG_MEM_TIMING8,        0x1002 }, /* MCLK delay, was 0x1000 */
862         { GLAMO_REG_MEM_TIMING9,        0x6006 },
863         { GLAMO_REG_MEM_TIMING10,       0x00ff },
864         { GLAMO_REG_MEM_TIMING11,       0x0001 },
865         { GLAMO_REG_MEM_POWER1,         0x0020 },
866         { GLAMO_REG_MEM_POWER2,         0x0000 },
867         { GLAMO_REG_MEM_DRAM1,          0x0000 },
868                 { 0xfffe, 1 },
869         { GLAMO_REG_MEM_DRAM1,          0xc100 },
870                 { 0xfffe, 1 },
871         { GLAMO_REG_MEM_DRAM1,          0xe100 },
872         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
873         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
874         { GLAMO_REG_GPIO_GEN1,          0x000f },
875         { GLAMO_REG_GPIO_GEN2,          0x111e },
876         { GLAMO_REG_GPIO_GEN3,          0xccc3 },
877         { GLAMO_REG_GPIO_GEN4,          0x111e },
878         { GLAMO_REG_GPIO_GEN5,          0x000f },
879 };
880 #if 0
881 static struct glamo_script glamo_resume_script[] = {
882
883         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
884         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
885         { GLAMO_REG_DFT_GEN6, 1 },
886                 { 0xfffe, 100 },
887                 { 0xfffd, 0 },
888         { 0x200,        0x0e03 },
889
890         /*
891          * b9 of this register MUST be zero to get any interrupts on INT#
892          * the other set bits enable all the engine interrupt sources
893          */
894         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
895         { GLAMO_REG_CLOCK_HOST,         0x0018 },
896         { GLAMO_REG_CLOCK_GEN5_1, 0x18b1 },
897
898         { GLAMO_REG_MEM_DRAM1,          0x0000 },
899                 { 0xfffe, 1 },
900         { GLAMO_REG_MEM_DRAM1,          0xc100 },
901                 { 0xfffe, 1 },
902         { GLAMO_REG_MEM_DRAM1,          0xe100 },
903         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
904         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
905 };
906 #endif
907
908 enum glamo_power {
909         GLAMO_POWER_ON,
910         GLAMO_POWER_SUSPEND,
911 };
912
913 static void glamo_power(struct glamo_core *glamo,
914                         enum glamo_power new_state)
915 {
916         int n;
917         unsigned long flags;
918
919         spin_lock_irqsave(&glamo->lock, flags);
920
921         dev_info(&glamo->pdev->dev, "***** glamo_power -> %d\n", new_state);
922
923         /*
924 Power management
925 static const REG_VALUE_MASK_TYPE reg_powerOn[] =
926 {
927     { REG_GEN_DFT6,     REG_BIT_ALL,    REG_DATA(1u << 0)           },
928     { REG_GEN_PLL3,     0u,             REG_DATA(1u << 13)          },
929     { REG_GEN_MEM_CLK,  REG_BIT_ALL,    REG_BIT_EN_MOCACLK          },
930     { REG_MEM_DRAM2,    0u,             REG_BIT_EN_DEEP_POWER_DOWN  },
931     { REG_MEM_DRAM1,    0u,             REG_BIT_SELF_REFRESH        }
932 };
933
934 static const REG_VALUE_MASK_TYPE reg_powerStandby[] =
935 {
936     { REG_MEM_DRAM1,    REG_BIT_ALL,    REG_BIT_SELF_REFRESH    },
937     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK      },
938     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)      },
939     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)       }
940 };
941
942 static const REG_VALUE_MASK_TYPE reg_powerSuspend[] =
943 {
944     { REG_MEM_DRAM2,    REG_BIT_ALL,    REG_BIT_EN_DEEP_POWER_DOWN  },
945     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK          },
946     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)          },
947     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)           }
948 };
949 */
950
951         switch (new_state) {
952         case GLAMO_POWER_ON:
953
954                 /*
955                  * glamo state on resume is nondeterministic in some
956                  * fundamental way, it has also been observed that the
957                  * Glamo reset pin can get asserted by, eg, touching it with
958                  * a scope probe.  So the only answer is to roll with it and
959                  * force an external reset on the Glamo during resume.
960                  */
961
962                 (glamo->pdata->glamo_external_reset)(0);
963                 udelay(10);
964                 (glamo->pdata->glamo_external_reset)(1);
965                 mdelay(5);
966
967                 glamo_run_script(glamo, glamo_init_script,
968                          ARRAY_SIZE(glamo_init_script), 0);
969
970                 break;
971
972         case GLAMO_POWER_SUSPEND:
973
974                 /* nuke interrupts */
975                 __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, 0x200);
976
977                 /* stash a copy of which engines were running */
978                 glamo->engine_enabled_bitfield_suspend =
979                                                  glamo->engine_enabled_bitfield;
980
981                 /* take down each engine before we kill mem and pll */
982                 for (n = 0; n < __NUM_GLAMO_ENGINES; n++)
983                         if (glamo->engine_enabled_bitfield & (1 << n))
984                                 __glamo_engine_disable(glamo, n);
985
986                 /* enable self-refresh */
987
988                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
989                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
990                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
991                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
992                                         GLAMO_MEM_REFRESH_COUNT);
993                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
994                                         GLAMO_MEM_DRAM1_EN_MODEREG_SET |
995                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
996                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
997                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
998                                         GLAMO_MEM_REFRESH_COUNT);
999
1000                 /* force RAM into deep powerdown */
1001
1002                 __reg_write(glamo, GLAMO_REG_MEM_DRAM2,
1003                                         GLAMO_MEM_DRAM2_DEEP_PWRDOWN |
1004                                         (7 << 6) | /* tRC */
1005                                         (1 << 4) | /* tRP */
1006                                         (1 << 2) | /* tRCD */
1007                                         2); /* CAS latency */
1008
1009                 /* disable clocks to memory */
1010                 __reg_write(glamo, GLAMO_REG_CLOCK_MEMORY, 0);
1011
1012                 /* all dividers from OSCI */
1013                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1, 0x400, 0x400);
1014
1015                 /* PLL2 into bypass */
1016                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 12, 1 << 12);
1017
1018                 __reg_write(glamo, 0x200, 0x0e00);
1019
1020
1021                 /* kill PLLS 1 then 2 */
1022                 __reg_write(glamo, GLAMO_REG_DFT_GEN5, 0x0001);
1023                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 13, 1 << 13);
1024
1025                 break;
1026         }
1027
1028         spin_unlock_irqrestore(&glamo->lock, flags);
1029 }
1030
1031 #if 0
1032 #define MEMDETECT_RETRY 6
1033 static unsigned int detect_memsize(struct glamo_core *glamo)
1034 {
1035         int i;
1036
1037         /*static const u_int16_t pattern[] = {
1038                 0x1111, 0x8a8a, 0x2222, 0x7a7a,
1039                 0x3333, 0x6a6a, 0x4444, 0x5a5a,
1040                 0x5555, 0x4a4a, 0x6666, 0x3a3a,
1041                 0x7777, 0x2a2a, 0x8888, 0x1a1a
1042         }; */
1043
1044         for (i = 0; i < MEMDETECT_RETRY; i++) {
1045                 switch (glamo->type) {
1046                 case 3600:
1047                         __reg_write(glamo, GLAMO_REG_MEM_TYPE, 0x0072);
1048                         __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1049                         break;
1050                 case 3650:
1051                         switch (glamo->revision) {
1052                         case GLAMO_CORE_REV_A0:
1053                                 if (i & 1)
1054                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1055                                                     0x097a);
1056                                 else
1057                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1058                                                     0x0173);
1059
1060                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1061                                 msleep(1);
1062                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1063                                 break;
1064                         default:
1065                                 if (i & 1)
1066                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1067                                                     0x0972);
1068                                 else
1069                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1070                                                     0x0872);
1071
1072                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1073                                 msleep(1);
1074                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xe100);
1075                                 break;
1076                         }
1077                         break;
1078                 case 3700:
1079                         /* FIXME */
1080                 default:
1081                         break;
1082                 }
1083
1084 #if 0
1085                 /* FIXME: finish implementation */
1086                 for (j = 0; j < 8; j++) {
1087                         __
1088 #endif
1089         }
1090
1091         return 0;
1092 }
1093 #endif
1094
1095 /* Find out if we can support this version of the Glamo chip */
1096 static int glamo_supported(struct glamo_core *glamo)
1097 {
1098         u_int16_t dev_id, rev_id; /*, memsize; */
1099
1100         dev_id = __reg_read(glamo, GLAMO_REG_DEVICE_ID);
1101         rev_id = __reg_read(glamo, GLAMO_REG_REVISION_ID);
1102
1103         switch (dev_id) {
1104         case 0x3650:
1105                 switch (rev_id) {
1106                 case GLAMO_CORE_REV_A2:
1107                         break;
1108                 case GLAMO_CORE_REV_A0:
1109                 case GLAMO_CORE_REV_A1:
1110                 case GLAMO_CORE_REV_A3:
1111                         dev_warn(&glamo->pdev->dev, "untested core revision "
1112                                  "%04x, your mileage may vary\n", rev_id);
1113                         break;
1114                 default:
1115                         dev_warn(&glamo->pdev->dev, "unknown glamo revision "
1116                                  "%04x, your mileage may vary\n", rev_id);
1117                         /* maybe should abort ? */
1118                 }
1119                 break;
1120         case 0x3600:
1121         case 0x3700:
1122         default:
1123                 dev_err(&glamo->pdev->dev, "unsupported Glamo device %04x\n",
1124                         dev_id);
1125                 return 0;
1126         }
1127
1128         dev_dbg(&glamo->pdev->dev, "Detected Glamo core %04x Revision %04x "
1129                  "(%uHz CPU / %uHz Memory)\n", dev_id, rev_id,
1130                  glamo_pll_rate(glamo, GLAMO_PLL1),
1131                  glamo_pll_rate(glamo, GLAMO_PLL2));
1132
1133         return 1;
1134 }
1135
1136 static int __init glamo_probe(struct platform_device *pdev)
1137 {
1138         int rc = 0, irq;
1139         struct glamo_core *glamo;
1140         struct platform_device *glamo_mmc_dev;
1141
1142         if (glamo_handle) {
1143                 dev_err(&pdev->dev,
1144                         "This driver supports only one instance\n");
1145                 return -EBUSY;
1146         }
1147
1148         glamo = kmalloc(GFP_KERNEL, sizeof(*glamo));
1149         if (!glamo)
1150                 return -ENOMEM;
1151
1152         spin_lock_init(&glamo->lock);
1153         glamo_handle = glamo;
1154         glamo->pdev = pdev;
1155         glamo->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1156         glamo->irq = platform_get_irq(pdev, 0);
1157         glamo->pdata = pdev->dev.platform_data;
1158         if (!glamo->mem || !glamo->pdata) {
1159                 dev_err(&pdev->dev, "platform device with no MEM/PDATA ?\n");
1160                 rc = -ENOENT;
1161                 goto bail_free;
1162         }
1163
1164         /* register a number of sibling devices whoise IOMEM resources
1165          * are siblings of pdev's IOMEM resource */
1166 #if 0
1167         glamo_core_dev.dev.parent = &pdev.dev;
1168         mangle_mem_resources(glamo_core_dev.resources,
1169                              glamo_core_dev.num_resources, glamo->mem);
1170         glamo_core_dev.resources[1].start = glamo->irq;
1171         glamo_core_dev.resources[1].end = glamo->irq;
1172         platform_device_register(&glamo_core_dev);
1173 #endif
1174         /* only remap the generic, hostbus and memory controller registers */
1175         glamo->base = ioremap(glamo->mem->start, 0x4000 /*GLAMO_REGOFS_VIDCAP*/);
1176         if (!glamo->base) {
1177                 dev_err(&pdev->dev, "failed to ioremap() memory region\n");
1178                 goto bail_free;
1179         }
1180
1181         platform_set_drvdata(pdev, glamo);
1182
1183         (glamo->pdata->glamo_external_reset)(0);
1184         udelay(10);
1185         (glamo->pdata->glamo_external_reset)(1);
1186         mdelay(10);
1187
1188         /*
1189          * finally set the mfd interrupts up
1190          * can't do them earlier or sibling probes blow up
1191          */
1192
1193         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1194                 set_irq_chip(irq, &glamo_irq_chip);
1195                 set_irq_handler(irq, handle_level_irq);
1196                 set_irq_flags(irq, IRQF_VALID);
1197         }
1198
1199         if (glamo->pdata->glamo_irq_is_wired &&
1200             !glamo->pdata->glamo_irq_is_wired()) {
1201                 set_irq_chained_handler(glamo->irq, glamo_irq_demux_handler);
1202                 set_irq_type(glamo->irq, IRQ_TYPE_EDGE_FALLING);
1203                 dev_info(&pdev->dev, "Glamo interrupt registered\n");
1204                 glamo->irq_works = 1;
1205         } else {
1206                 dev_err(&pdev->dev, "Glamo interrupt not used\n");
1207                 glamo->irq_works = 0;
1208         }
1209
1210
1211         /* confirm it isn't insane version */
1212         if (!glamo_supported(glamo)) {
1213                 dev_err(&pdev->dev, "This Glamo is not supported\n");
1214                 goto bail_irq;
1215         }
1216
1217         /* sysfs */
1218         rc = sysfs_create_group(&pdev->dev.kobj, &glamo_attr_group);
1219         if (rc < 0) {
1220                 dev_err(&pdev->dev, "cannot create sysfs group\n");
1221                 goto bail_irq;
1222         }
1223
1224         /* init the chip with canned register set */
1225
1226         dev_dbg(&glamo->pdev->dev, "running init script\n");
1227         glamo_run_script(glamo, glamo_init_script,
1228                          ARRAY_SIZE(glamo_init_script), 1);
1229
1230         dev_info(&glamo->pdev->dev, "Glamo core PLL1: %uHz, PLL2: %uHz\n",
1231                  glamo_pll_rate(glamo, GLAMO_PLL1),
1232                  glamo_pll_rate(glamo, GLAMO_PLL2));
1233
1234         /* bring MCI specific stuff over from our MFD platform data */
1235         glamo_mci_def_pdata.glamo_can_set_mci_power =
1236                                         glamo->pdata->glamo_can_set_mci_power;
1237         glamo_mci_def_pdata.glamo_mci_use_slow =
1238                                         glamo->pdata->glamo_mci_use_slow;
1239         glamo_mci_def_pdata.glamo_irq_is_wired =
1240                                         glamo->pdata->glamo_irq_is_wired;
1241
1242         /* start creating the siblings */
1243         glamo->pdata->glamo = glamo;
1244
1245         /* Command queue device (for DRM) */
1246         glamo_graphics_dev.dev.parent = &pdev->dev;
1247         glamo_graphics_dev.dev.platform_data = glamo->pdata;
1248         mangle_mem_resources(glamo_graphics_dev.resource,
1249                              glamo_graphics_dev.num_resources, glamo->mem);
1250         platform_device_register(&glamo_graphics_dev);
1251
1252         /* GPIO */
1253         glamo->pdata->spigpio_info->glamo = glamo;
1254         glamo_spigpio_dev.dev.parent = &pdev->dev;
1255         glamo_spigpio_dev.dev.platform_data = glamo->pdata->spigpio_info;
1256         platform_device_register(&glamo_spigpio_dev);
1257
1258         /* MMC */
1259         glamo_mmc_dev = glamo->pdata->mmc_dev;
1260         glamo_mmc_dev->name = "glamo-mci";
1261         glamo_mmc_dev->dev.parent = &pdev->dev;
1262         glamo_mmc_dev->resource = glamo_mmc_resources;
1263         glamo_mmc_dev->num_resources = ARRAY_SIZE(glamo_mmc_resources);
1264         glamo_mci_def_pdata.pglamo = glamo;
1265         mangle_mem_resources(glamo_mmc_dev->resource,
1266                              glamo_mmc_dev->num_resources, glamo->mem);
1267         platform_device_register(glamo_mmc_dev);
1268
1269         /* Only request the generic, hostbus and memory controller MMIO */
1270         glamo->mem = request_mem_region(glamo->mem->start,
1271                                         GLAMO_REGOFS_VIDCAP, "glamo-core");
1272         if (!glamo->mem) {
1273                 dev_err(&pdev->dev, "failed to request memory region\n");
1274                 goto bail_irq;
1275         }
1276
1277         return 0;
1278
1279 bail_irq:
1280         disable_irq(glamo->irq);
1281         set_irq_chained_handler(glamo->irq, NULL);
1282
1283         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1284                 set_irq_flags(irq, 0);
1285                 set_irq_chip(irq, NULL);
1286         }
1287
1288         iounmap(glamo->base);
1289 bail_free:
1290         platform_set_drvdata(pdev, NULL);
1291         glamo_handle = NULL;
1292         kfree(glamo);
1293
1294         return rc;
1295 }
1296
1297 static int glamo_remove(struct platform_device *pdev)
1298 {
1299         struct glamo_core *glamo = platform_get_drvdata(pdev);
1300         int irq;
1301
1302         disable_irq(glamo->irq);
1303         set_irq_chained_handler(glamo->irq, NULL);
1304
1305         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1306                 set_irq_flags(irq, 0);
1307                 set_irq_chip(irq, NULL);
1308         }
1309
1310         platform_set_drvdata(pdev, NULL);
1311         platform_device_unregister(glamo->pdata->mmc_dev);
1312         /* FIXME: Don't we need to unregister these as well?
1313          * platform_device_unregister(glamo->pdata->graphics_dev);
1314          * platform_device_unregister(glamo->pdata->gpio_dev); */
1315         iounmap(glamo->base);
1316         release_mem_region(glamo->mem->start, GLAMO_REGOFS_VIDCAP);
1317         glamo_handle = NULL;
1318         kfree(glamo);
1319
1320         return 0;
1321 }
1322
1323 #ifdef CONFIG_PM
1324
1325 static int glamo_suspend(struct platform_device *pdev, pm_message_t state)
1326 {
1327         glamo_handle->suspending = 1;
1328         glamo_power(glamo_handle, GLAMO_POWER_SUSPEND);
1329
1330         return 0;
1331 }
1332
1333 static int glamo_resume(struct platform_device *pdev)
1334 {
1335         glamo_power(glamo_handle, GLAMO_POWER_ON);
1336         glamo_handle->suspending = 0;
1337
1338         return 0;
1339 }
1340
1341 #else
1342 #define glamo_suspend NULL
1343 #define glamo_resume  NULL
1344 #endif
1345
1346 static struct platform_driver glamo_driver = {
1347         .probe          = glamo_probe,
1348         .remove         = glamo_remove,
1349         .suspend        = glamo_suspend,
1350         .resume = glamo_resume,
1351         .driver         = {
1352                 .name   = "glamo3362",
1353                 .owner  = THIS_MODULE,
1354         },
1355 };
1356
1357 static int __devinit glamo_init(void)
1358 {
1359         return platform_driver_register(&glamo_driver);
1360 }
1361
1362 static void __exit glamo_cleanup(void)
1363 {
1364         platform_driver_unregister(&glamo_driver);
1365 }
1366
1367 module_init(glamo_init);
1368 module_exit(glamo_cleanup);
1369
1370 MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
1371 MODULE_DESCRIPTION("Smedia Glamo 336x/337x core/resource driver");
1372 MODULE_LICENSE("GPL");