9e9292b70e49e2e74bcc4b88be31c0d3a593a135
[kernel.git] / drivers / mfd / glamo / glamo-core.c
1 /* Smedia Glamo 336x/337x driver
2  *
3  * (C) 2007 by Openmoko, Inc.
4  * Author: Harald Welte <laforge@openmoko.org>
5  * All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20  * MA 02111-1307 USA
21  */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/mm.h>
28 #include <linux/tty.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/fb.h>
32 #include <linux/init.h>
33 #include <linux/irq.h>
34 #include <linux/interrupt.h>
35 #include <linux/workqueue.h>
36 #include <linux/wait.h>
37 #include <linux/platform_device.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/spinlock.h>
40 #include <linux/glamofb.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/host.h>
43
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <asm/div64.h>
47
48 //#include <mach/regs-irq.h>
49
50 #ifdef CONFIG_PM
51 #include <linux/pm.h>
52 #endif
53
54 #include "glamo-regs.h"
55 #include "glamo-core.h"
56
57 #define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1)
58
59 #define GLAMO_MEM_REFRESH_COUNT 0x100
60
61
62 /*
63  * Glamo internal settings
64  *
65  * We run the memory interface from the faster PLLB on 2.6.28 kernels and
66  * above.  Couple of GTA02 users report trouble with memory bus when they
67  * upgraded from 2.6.24.  So this parameter allows reversion to 2.6.24
68  * scheme if their Glamo chip needs it.
69  *
70  * you can override the faster default on kernel commandline using
71  *
72  *   glamo3362.slow_memory=1
73  *
74  * for example
75  */
76
77 static int slow_memory = 0;
78 module_param(slow_memory, int, 0644);
79
80 struct reg_range {
81         int start;
82         int count;
83         char *name;
84         char dump;
85 };
86 struct reg_range reg_range[] = {
87         { 0x0000, 0x76,         "General",      1 },
88         { 0x0200, 0x16,         "Host Bus",     1 },
89         { 0x0300, 0x38,         "Memory",       1 },
90 /*      { 0x0400, 0x100,        "Sensor",       0 }, */
91 /*              { 0x0500, 0x300,        "ISP",          0 }, */
92 /*              { 0x0800, 0x400,        "JPEG",         0 }, */
93 /*              { 0x0c00, 0xcc,         "MPEG",         0 }, */
94         { 0x1100, 0xb2,         "LCD 1",        1 },
95         { 0x1200, 0x64,         "LCD 2",        1 },
96         { 0x1400, 0x40,         "MMC",          1 },
97 /*              { 0x1500, 0x080,        "MPU 0",        0 },
98         { 0x1580, 0x080,        "MPU 1",        0 },
99         { 0x1600, 0x080,        "Cmd Queue",    0 },
100         { 0x1680, 0x080,        "RISC CPU",     0 },
101         { 0x1700, 0x400,        "2D Unit",      0 },
102         { 0x1b00, 0x900,        "3D Unit",      0 }, */
103 };
104
105 static struct glamo_core *glamo_handle;
106
107 static inline void __reg_write(struct glamo_core *glamo,
108                                 u_int16_t reg, u_int16_t val)
109 {
110         printk(KERN_CRIT "Core %i -> %x\n", val, reg);
111         writew(val, glamo->base + reg);
112 }
113
114 static inline u_int16_t __reg_read(struct glamo_core *glamo,
115                                    u_int16_t reg)
116 {
117         return readw(glamo->base + reg);
118 }
119
120 static void __reg_set_bit_mask(struct glamo_core *glamo,
121                                 u_int16_t reg, u_int16_t mask,
122                                 u_int16_t val)
123 {
124         u_int16_t tmp;
125
126         val &= mask;
127
128         tmp = __reg_read(glamo, reg);
129         tmp &= ~mask;
130         tmp |= val;
131         __reg_write(glamo, reg, tmp);
132 }
133
134 static void reg_set_bit_mask(struct glamo_core *glamo,
135                                 u_int16_t reg, u_int16_t mask,
136                                 u_int16_t val)
137 {
138         spin_lock(&glamo->lock);
139         __reg_set_bit_mask(glamo, reg, mask, val);
140         spin_unlock(&glamo->lock);
141 }
142
143 static inline void __reg_set_bit(struct glamo_core *glamo,
144                                  u_int16_t reg, u_int16_t bit)
145 {
146         __reg_set_bit_mask(glamo, reg, bit, 0xffff);
147 }
148
149 static inline void __reg_clear_bit(struct glamo_core *glamo,
150                                    u_int16_t reg, u_int16_t bit)
151 {
152         __reg_set_bit_mask(glamo, reg, bit, 0);
153 }
154
155 static inline void glamo_vmem_write(struct glamo_core *glamo, u_int32_t addr,
156                                     u_int16_t *src, int len)
157 {
158         if (addr & 0x0001 || (unsigned long)src & 0x0001 || len & 0x0001) {
159                 dev_err(&glamo->pdev->dev, "unaligned write(0x%08x, 0x%p, "
160                         "0x%x)!!\n", addr, src, len);
161         }
162
163 }
164
165 static inline void glamo_vmem_read(struct glamo_core *glamo, u_int16_t *buf,
166                                    u_int32_t addr, int len)
167 {
168         if (addr & 0x0001 || (unsigned long) buf & 0x0001 || len & 0x0001) {
169                 dev_err(&glamo->pdev->dev, "unaligned read(0x%p, 0x08%x, "
170                         "0x%x)!!\n", buf, addr, len);
171         }
172
173
174 }
175
176 /***********************************************************************
177  * resources of sibling devices
178  ***********************************************************************/
179 static struct resource glamo_graphics_resources[] = {
180         {
181                 .name   = "glamo-cmdq-regs",
182                 .start  = GLAMO_REGOFS_CMDQUEUE,
183                 .end    = GLAMO_REGOFS_RISC - 1,
184                 .flags  = IORESOURCE_MEM,
185         }, {
186                 .name   = "glamo-command-queue",
187                 .start  = GLAMO_MEM_BASE + GLAMO_OFFSET_CMDQ,
188                 .end    = GLAMO_MEM_BASE + GLAMO_OFFSET_CMDQ +
189                           GLAMO_CMDQ_SIZE - 1,
190                 .flags  = IORESOURCE_MEM,
191         }, {
192                 .name   = "glamo-fb-mem",
193                 .start  = GLAMO_MEM_BASE + GLAMO_OFFSET_FB,
194                 .end    = GLAMO_MEM_BASE + GLAMO_OFFSET_FB +
195                           GLAMO_FB_SIZE - 1,
196                 .flags  = IORESOURCE_MEM,
197         }, {
198                 .name   = "glamo-fb-regs",
199                 .start  = GLAMO_REGOFS_LCD,
200                 .end    = GLAMO_REGOFS_MMC - 1,
201                 .flags  = IORESOURCE_MEM,
202         }
203 };
204
205 static struct platform_device glamo_graphics_dev = {
206         .name           = "glamo-fb",
207         .resource       = glamo_graphics_resources,
208         .num_resources  = ARRAY_SIZE(glamo_graphics_resources),
209 };
210
211 static struct platform_device glamo_spigpio_dev = {
212         .name           = "glamo-spi-gpio",
213 };
214
215
216 static struct resource glamo_mmc_resources[] = {
217         {
218                 /* FIXME: those need to be incremented by parent base */
219                 .start  = GLAMO_REGOFS_MMC,
220                 .end    = GLAMO_REGOFS_MPROC0 - 1,
221                 .flags  = IORESOURCE_MEM
222         }, {
223                 .start  = IRQ_GLAMO_MMC,
224                 .end    = IRQ_GLAMO_MMC,
225                 .flags  = IORESOURCE_IRQ,
226         }, { /* our data buffer for MMC transfers */
227                 .start  = GLAMO_MEM_BASE + GLAMO_OFFSET_MMC,
228                 .end    = GLAMO_MEM_BASE + GLAMO_OFFSET_MMC +
229                           GLAMO_MMC_BUFFER_SIZE - 1,
230                 .flags  = IORESOURCE_MEM
231         },
232 };
233
234 struct glamo_mci_pdata glamo_mci_def_pdata = {
235         .gpio_detect            = 0,
236         .glamo_can_set_mci_power        = NULL, /* filled in from MFD platform data */
237         .ocr_avail      = MMC_VDD_20_21 |
238                           MMC_VDD_21_22 |
239                           MMC_VDD_22_23 |
240                           MMC_VDD_23_24 |
241                           MMC_VDD_24_25 |
242                           MMC_VDD_25_26 |
243                           MMC_VDD_26_27 |
244                           MMC_VDD_27_28 |
245                           MMC_VDD_28_29 |
246                           MMC_VDD_29_30 |
247                           MMC_VDD_30_31 |
248                           MMC_VDD_32_33,
249         .glamo_irq_is_wired     = NULL, /* filled in from MFD platform data */
250         .mci_suspending = NULL, /* filled in from MFD platform data */
251         .mci_all_dependencies_resumed = NULL, /* filled in from MFD platform data */
252 };
253 EXPORT_SYMBOL_GPL(glamo_mci_def_pdata);
254
255
256
257 static void mangle_mem_resources(struct resource *res, int num_res,
258                                  struct resource *parent)
259 {
260         int i;
261
262         for (i = 0; i < num_res; i++) {
263                 if (res[i].flags != IORESOURCE_MEM)
264                         continue;
265                 res[i].start += parent->start;
266                 res[i].end += parent->start;
267                 res[i].parent = parent;
268         }
269 }
270
271 /***********************************************************************
272  * IRQ demultiplexer
273  ***********************************************************************/
274 #define irq2glamo(x)    (x - IRQ_GLAMO(0))
275
276 static void glamo_ack_irq(unsigned int irq)
277 {
278         /* clear interrupt source */
279         __reg_write(glamo_handle, GLAMO_REG_IRQ_CLEAR,
280                     1 << irq2glamo(irq));
281 }
282
283 static void glamo_mask_irq(unsigned int irq)
284 {
285         u_int16_t tmp;
286
287         /* clear bit in enable register */
288         tmp = __reg_read(glamo_handle, GLAMO_REG_IRQ_ENABLE);
289         tmp &= ~(1 << irq2glamo(irq));
290         __reg_write(glamo_handle, GLAMO_REG_IRQ_ENABLE, tmp);
291 }
292
293 static void glamo_unmask_irq(unsigned int irq)
294 {
295         u_int16_t tmp;
296
297         /* set bit in enable register */
298         tmp = __reg_read(glamo_handle, GLAMO_REG_IRQ_ENABLE);
299         tmp |= (1 << irq2glamo(irq));
300         __reg_write(glamo_handle, GLAMO_REG_IRQ_ENABLE, tmp);
301 }
302
303 static struct irq_chip glamo_irq_chip = {
304         .ack    = glamo_ack_irq,
305         .mask   = glamo_mask_irq,
306         .unmask = glamo_unmask_irq,
307 };
308
309 static void glamo_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
310 {
311         const unsigned int cpu = smp_processor_id();
312
313         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
314
315         if (unlikely(desc->status & IRQ_INPROGRESS)) {
316                 desc->status |= (IRQ_PENDING | IRQ_MASKED);
317                 desc->chip->mask(irq);
318                 desc->chip->ack(irq);
319                 return;
320         }
321
322         kstat_cpu(cpu).irqs[irq]++;
323         desc->chip->ack(irq);
324         desc->status |= IRQ_INPROGRESS;
325
326         do {
327                 u_int16_t irqstatus;
328                 int i;
329
330                 if (unlikely((desc->status &
331                                 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
332                                 (IRQ_PENDING | IRQ_MASKED))) {
333                         /* dealing with pending IRQ, unmasking */
334                         desc->chip->unmask(irq);
335                         desc->status &= ~IRQ_MASKED;
336                 }
337
338                 desc->status &= ~IRQ_PENDING;
339
340                 /* read IRQ status register */
341                 irqstatus = __reg_read(glamo_handle, GLAMO_REG_IRQ_STATUS);
342                 for (i = 0; i < 9; i++)
343                         if (irqstatus & (1 << i))
344                                 desc_handle_irq(IRQ_GLAMO(i),
345                                     irq_desc+IRQ_GLAMO(i));
346
347         } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
348
349         desc->status &= ~IRQ_INPROGRESS;
350 }
351
352
353 static ssize_t regs_write(struct device *dev, struct device_attribute *attr,
354                            const char *buf, size_t count)
355 {
356         unsigned long reg = simple_strtoul(buf, NULL, 10);
357         struct glamo_core *glamo = dev_get_drvdata(dev);
358
359         while (*buf && (*buf != ' '))
360                 buf++;
361         if (*buf != ' ')
362                 return -EINVAL;
363         while (*buf && (*buf == ' '))
364                 buf++;
365         if (!*buf)
366                 return -EINVAL;
367
368         printk(KERN_INFO"reg 0x%02lX <-- 0x%04lX\n",
369                reg, simple_strtoul(buf, NULL, 10));
370
371         __reg_write(glamo, reg, simple_strtoul(buf, NULL, 10));
372
373         return count;
374 }
375
376 static ssize_t regs_read(struct device *dev, struct device_attribute *attr,
377                         char *buf)
378 {
379         struct glamo_core *glamo = dev_get_drvdata(dev);
380         int n, n1 = 0, r;
381         char * end = buf;
382
383         spin_lock(&glamo->lock);
384
385         for (r = 0; r < ARRAY_SIZE(reg_range); r++) {
386                 if (!reg_range[r].dump)
387                         continue;
388                 n1 = 0;
389                 end += sprintf(end, "\n%s\n", reg_range[r].name);
390                 for (n = reg_range[r].start;
391                      n < reg_range[r].start + reg_range[r].count; n += 2) {
392                         if (((n1++) & 7) == 0)
393                                 end += sprintf(end, "\n%04X:  ", n);
394                         end += sprintf(end, "%04x ", __reg_read(glamo, n));
395                 }
396                 end += sprintf(end, "\n");
397                 if (!attr) {
398                         printk("%s", buf);
399                         end = buf;
400                 }
401         }
402         spin_unlock(&glamo->lock);
403
404         return end - buf;
405 }
406
407 static DEVICE_ATTR(regs, 0644, regs_read, regs_write);
408 static struct attribute *glamo_sysfs_entries[] = {
409         &dev_attr_regs.attr,
410         NULL
411 };
412 static struct attribute_group glamo_attr_group = {
413         .name   = NULL,
414         .attrs  = glamo_sysfs_entries,
415 };
416
417
418
419 /***********************************************************************
420  * 'engine' support
421  ***********************************************************************/
422
423 int __glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
424 {
425         switch (engine) {
426         case GLAMO_ENGINE_LCD:
427                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
428                                    GLAMO_HOSTBUS2_MMIO_EN_LCD,
429                                    GLAMO_HOSTBUS2_MMIO_EN_LCD);
430                 __reg_write(glamo, GLAMO_REG_CLOCK_LCD,
431                             GLAMO_CLOCK_LCD_EN_M5CLK |
432                             GLAMO_CLOCK_LCD_EN_DHCLK |
433                             GLAMO_CLOCK_LCD_EN_DMCLK |
434                             GLAMO_CLOCK_LCD_EN_DCLK |
435                             GLAMO_CLOCK_LCD_DG_M5CLK |
436                             GLAMO_CLOCK_LCD_DG_DMCLK);
437                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
438                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
439                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
440                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0xffff);
441                 break;
442         case GLAMO_ENGINE_MMC:
443                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
444                                    GLAMO_HOSTBUS2_MMIO_EN_MMC,
445                                    GLAMO_HOSTBUS2_MMIO_EN_MMC);
446                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
447                                    GLAMO_CLOCK_MMC_EN_M9CLK |
448                                    GLAMO_CLOCK_MMC_EN_TCLK |
449                                    GLAMO_CLOCK_MMC_DG_M9CLK |
450                                    GLAMO_CLOCK_MMC_DG_TCLK, 0xffff);
451                 /* enable the TCLK divider clk input */
452                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
453                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
454                                                  GLAMO_CLOCK_GEN51_EN_DIV_TCLK);
455                 break;
456         case GLAMO_ENGINE_2D:
457                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
458                                    GLAMO_CLOCK_2D_EN_M7CLK |
459                                    GLAMO_CLOCK_2D_EN_GCLK |
460                                    GLAMO_CLOCK_2D_DG_M7CLK |
461                                    GLAMO_CLOCK_2D_DG_GCLK, 0xffff);
462                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
463                                    GLAMO_HOSTBUS2_MMIO_EN_2D,
464                                    GLAMO_HOSTBUS2_MMIO_EN_2D);
465                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
466                                    GLAMO_CLOCK_GEN51_EN_DIV_GCLK, 0xffff);
467                 break;
468         case GLAMO_ENGINE_CMDQ:
469                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
470                                    GLAMO_CLOCK_2D_EN_M6CLK, 0xffff);
471                 __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
472                                    GLAMO_HOSTBUS2_MMIO_EN_CQ,
473                                    GLAMO_HOSTBUS2_MMIO_EN_CQ);
474                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
475                                    GLAMO_CLOCK_GEN51_EN_DIV_MCLK, 0xffff);
476                 break;
477         /* FIXME: Implementation */
478         default:
479                 break;
480         }
481
482         glamo->engine_enabled_bitfield |= 1 << engine;
483
484         return 0;
485 }
486
487 int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
488 {
489         int ret;
490
491         spin_lock(&glamo->lock);
492
493         ret = __glamo_engine_enable(glamo, engine);
494
495         spin_unlock(&glamo->lock);
496
497         return ret;
498 }
499 EXPORT_SYMBOL_GPL(glamo_engine_enable);
500
501 int __glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
502 {
503         switch (engine) {
504         case GLAMO_ENGINE_LCD:
505                 /* remove pixel clock to LCM */
506                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
507                             GLAMO_CLOCK_LCD_EN_DCLK, 0);
508                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
509                             GLAMO_CLOCK_LCD_EN_DHCLK |
510                             GLAMO_CLOCK_LCD_EN_DMCLK, 0);
511                 /* kill memory clock */
512                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
513                             GLAMO_CLOCK_LCD_EN_M5CLK, 0);
514                 /* stop dividing the clocks */
515                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
516                             GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
517                             GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
518                             GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0);
519                 break;
520
521         case GLAMO_ENGINE_MMC:
522 //              __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
523 //                                                 GLAMO_CLOCK_MMC_EN_M9CLK |
524 //                                                 GLAMO_CLOCK_MMC_EN_TCLK |
525 //                                                 GLAMO_CLOCK_MMC_DG_M9CLK |
526 //                                                 GLAMO_CLOCK_MMC_DG_TCLK, 0);
527                 /* disable the TCLK divider clk input */
528 //              __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
529 //                                      GLAMO_CLOCK_GEN51_EN_DIV_TCLK, 0);
530
531         default:
532                 break;
533         }
534
535         glamo->engine_enabled_bitfield &= ~(1 << engine);
536
537         return 0;
538 }
539 int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
540 {
541         int ret;
542
543         spin_lock(&glamo->lock);
544
545         ret = __glamo_engine_disable(glamo, engine);
546
547         spin_unlock(&glamo->lock);
548
549         return ret;
550 }
551 EXPORT_SYMBOL_GPL(glamo_engine_disable);
552
553 static const u_int16_t engine_clock_regs[__NUM_GLAMO_ENGINES] = {
554         [GLAMO_ENGINE_LCD]      = GLAMO_REG_CLOCK_LCD,
555         [GLAMO_ENGINE_MMC]      = GLAMO_REG_CLOCK_MMC,
556         [GLAMO_ENGINE_ISP]      = GLAMO_REG_CLOCK_ISP,
557         [GLAMO_ENGINE_JPEG]     = GLAMO_REG_CLOCK_JPEG,
558         [GLAMO_ENGINE_3D]       = GLAMO_REG_CLOCK_3D,
559         [GLAMO_ENGINE_2D]       = GLAMO_REG_CLOCK_2D,
560         [GLAMO_ENGINE_MPEG_ENC] = GLAMO_REG_CLOCK_MPEG,
561         [GLAMO_ENGINE_MPEG_DEC] = GLAMO_REG_CLOCK_MPEG,
562 };
563
564 void glamo_engine_clkreg_set(struct glamo_core *glamo,
565                              enum glamo_engine engine,
566                              u_int16_t mask, u_int16_t val)
567 {
568         reg_set_bit_mask(glamo, engine_clock_regs[engine], mask, val);
569 }
570 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_set);
571
572 u_int16_t glamo_engine_clkreg_get(struct glamo_core *glamo,
573                                   enum glamo_engine engine)
574 {
575         u_int16_t val;
576
577         spin_lock(&glamo->lock);
578         val = __reg_read(glamo, engine_clock_regs[engine]);
579         spin_unlock(&glamo->lock);
580
581         return val;
582 }
583 EXPORT_SYMBOL_GPL(glamo_engine_clkreg_get);
584
585 struct glamo_script reset_regs[] = {
586         [GLAMO_ENGINE_LCD] = {
587                 GLAMO_REG_CLOCK_LCD, GLAMO_CLOCK_LCD_RESET
588         },
589 #if 0
590         [GLAMO_ENGINE_HOST] = {
591                 GLAMO_REG_CLOCK_HOST, GLAMO_CLOCK_HOST_RESET
592         },
593         [GLAMO_ENGINE_MEM] = {
594                 GLAMO_REG_CLOCK_MEM, GLAMO_CLOCK_MEM_RESET
595         },
596 #endif
597         [GLAMO_ENGINE_MMC] = {
598                 GLAMO_REG_CLOCK_MMC, GLAMO_CLOCK_MMC_RESET
599         },
600         [GLAMO_ENGINE_2D] = {
601                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_RESET
602         },
603         [GLAMO_ENGINE_JPEG] = {
604                 GLAMO_REG_CLOCK_JPEG, GLAMO_CLOCK_JPEG_RESET
605         },
606         /* The following is defined as "Reset command queue", nothing to do
607          * with the 2D engine. */
608         [GLAMO_ENGINE_CMDQ] = {
609                 GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_CQ_RESET
610         },
611 };
612
613 void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine)
614 {
615         struct glamo_script *rst;
616
617         if (engine >= ARRAY_SIZE(reset_regs)) {
618                 dev_warn(&glamo->pdev->dev, "unknown engine %u ", engine);
619                 return;
620         }
621
622         rst = &reset_regs[engine];
623
624         spin_lock(&glamo->lock);
625         __reg_set_bit(glamo, rst->reg, rst->val);
626         __reg_clear_bit(glamo, rst->reg, rst->val);
627         spin_unlock(&glamo->lock);
628 }
629 EXPORT_SYMBOL_GPL(glamo_engine_reset);
630
631 void glamo_lcm_reset(int level)
632 {
633         if (!glamo_handle)
634                 return;
635
636         glamo_gpio_setpin(glamo_handle, GLAMO_GPIO4, level);
637         glamo_gpio_cfgpin(glamo_handle, GLAMO_GPIO4_OUTPUT);
638
639 }
640 EXPORT_SYMBOL_GPL(glamo_lcm_reset);
641
642 enum glamo_pll {
643         GLAMO_PLL1,
644         GLAMO_PLL2,
645 };
646
647 static int glamo_pll_rate(struct glamo_core *glamo,
648                           enum glamo_pll pll)
649 {
650         u_int16_t reg;
651         unsigned int div = 512;
652         /* FIXME: move osci into platform_data */
653         unsigned int osci = 32768;
654
655         if (osci == 32768)
656                 div = 1;
657
658         switch (pll) {
659         case GLAMO_PLL1:
660                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN1);
661                 break;
662         case GLAMO_PLL2:
663                 reg = __reg_read(glamo, GLAMO_REG_PLL_GEN3);
664                 break;
665         default:
666                 return -EINVAL;
667         }
668         return (osci/div)*reg;
669 }
670
671 int glamo_engine_reclock(struct glamo_core *glamo,
672                          enum glamo_engine engine,
673                          int ps)
674 {
675         int pll, khz;
676         u_int16_t reg, mask, val = 0;
677
678         if (!ps)
679                 return 0;
680
681         switch (engine) {
682         case GLAMO_ENGINE_LCD:
683                 pll = GLAMO_PLL1;
684                 reg = GLAMO_REG_CLOCK_GEN7;
685                 mask = 0xff;
686                 break;
687         default:
688                 dev_warn(&glamo->pdev->dev,
689                          "reclock of engine 0x%x not supported\n", engine);
690                 return -EINVAL;
691                 break;
692         }
693
694         pll = glamo_pll_rate(glamo, pll);
695         khz = 1000000000UL / ps;
696
697         if (khz)
698                 val = (pll / khz) / 1000;
699
700         dev_dbg(&glamo->pdev->dev,
701                         "PLL %d, kHZ %d, div %d\n", pll, khz, val);
702
703         if (val) {
704                 val--;
705                 reg_set_bit_mask(glamo, reg, mask, val);
706                 mdelay(5); /* wait some time to stabilize */
707
708                 return 0;
709         } else {
710                 return -EINVAL;
711         }
712 }
713 EXPORT_SYMBOL_GPL(glamo_engine_reclock);
714
715 /***********************************************************************
716  * script support
717  ***********************************************************************/
718
719 int glamo_run_script(struct glamo_core *glamo, struct glamo_script *script,
720                      int len, int may_sleep)
721 {
722         int i;
723
724         for (i = 0; i < len; i++) {
725                 struct glamo_script *line = &script[i];
726
727                 switch (line->reg) {
728                 case 0xffff:
729                         return 0;
730                 case 0xfffe:
731                         if (may_sleep)
732                                 msleep(line->val);
733                         else
734                                 mdelay(line->val * 4);
735                         break;
736                 case 0xfffd:
737                         /* spin until PLLs lock */
738                         while ((__reg_read(glamo, GLAMO_REG_PLL_GEN5) & 3) != 3)
739                                 ;
740                         break;
741
742                 /*
743                  * couple of people reported artefacts with 2.6.28 changes, this
744                  * allows reversion to 2.6.24 settings
745                  */
746
747                 case 0x200:
748                         switch (slow_memory) {
749                         /* choice 1 is the most conservative */
750                         case 1: /* 3 waits on Async BB R & W, Use PLL 1 for mem bus */
751                                 __reg_write(glamo, script[i].reg, 0xef0);
752                                 break;
753                         case 2: /* 2 waits on Async BB R & W, Use PLL 1 for mem bus */
754                                 __reg_write(glamo, script[i].reg, 0xea0);
755                                 break;
756                         case 3: /* 1 waits on Async BB R & W, Use PLL 1 for mem bus */
757                                 __reg_write(glamo, script[i].reg, 0xe50);
758                                 break;
759                         case 4: /* 0 waits on Async BB R & W, Use PLL 1 for mem bus */
760                                 __reg_write(glamo, script[i].reg, 0xe00);
761                                 break;
762
763                         /* using PLL2 for memory bus increases CPU bandwidth significantly */
764                         case 5: /* 3 waits on Async BB R & W, Use PLL 2 for mem bus */
765                                 __reg_write(glamo, script[i].reg, 0xef3);
766                                 break;
767                         case 6: /* 2 waits on Async BB R & W, Use PLL 2 for mem bus */
768                                 __reg_write(glamo, script[i].reg, 0xea3);
769                                 break;
770                         case 7: /* 1 waits on Async BB R & W, Use PLL 2 for mem bus */
771                                 __reg_write(glamo, script[i].reg, 0xe53);
772                                 break;
773                         /* default of 0 or >7 is fastest */
774                         default: /* 0 waits on Async BB R & W, Use PLL 2 for mem bus */
775                                 __reg_write(glamo, script[i].reg, 0xe03);
776                                 break;
777                         }
778                         break;
779
780                 default:
781                         __reg_write(glamo, script[i].reg, script[i].val);
782                         break;
783                 }
784         }
785
786         return 0;
787 }
788 EXPORT_SYMBOL(glamo_run_script);
789
790 static struct glamo_script glamo_init_script[] = {
791         { GLAMO_REG_CLOCK_HOST,         0x1000 },
792                 { 0xfffe, 2 },
793         { GLAMO_REG_CLOCK_MEMORY,       0x1000 },
794         { GLAMO_REG_CLOCK_MEMORY,       0x2000 },
795         { GLAMO_REG_CLOCK_LCD,          0x1000 },
796         { GLAMO_REG_CLOCK_MMC,          0x1000 },
797         { GLAMO_REG_CLOCK_ISP,          0x1000 },
798         { GLAMO_REG_CLOCK_ISP,          0x3000 },
799         { GLAMO_REG_CLOCK_JPEG,         0x1000 },
800         { GLAMO_REG_CLOCK_3D,           0x1000 },
801         { GLAMO_REG_CLOCK_3D,           0x3000 },
802         { GLAMO_REG_CLOCK_2D,           0x1000 },
803         { GLAMO_REG_CLOCK_2D,           0x3000 },
804         { GLAMO_REG_CLOCK_RISC1,        0x1000 },
805         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
806         { GLAMO_REG_CLOCK_MPEG,         0x3000 },
807         { GLAMO_REG_CLOCK_MPROC,        0x1000 /*0x100f*/ },
808                 { 0xfffe, 2 },
809         { GLAMO_REG_CLOCK_HOST,         0x0000 },
810         { GLAMO_REG_CLOCK_MEMORY,       0x0000 },
811         { GLAMO_REG_CLOCK_LCD,          0x0000 },
812         { GLAMO_REG_CLOCK_MMC,          0x0000 },
813 #if 0
814 /* unused engines must be left in reset to stop MMC block read "blackouts" */
815         { GLAMO_REG_CLOCK_ISP,          0x0000 },
816         { GLAMO_REG_CLOCK_ISP,          0x0000 },
817         { GLAMO_REG_CLOCK_JPEG,         0x0000 },
818         { GLAMO_REG_CLOCK_3D,           0x0000 },
819         { GLAMO_REG_CLOCK_3D,           0x0000 },
820         { GLAMO_REG_CLOCK_2D,           0x0000 },
821         { GLAMO_REG_CLOCK_2D,           0x0000 },
822         { GLAMO_REG_CLOCK_RISC1,        0x0000 },
823         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
824         { GLAMO_REG_CLOCK_MPEG,         0x0000 },
825 #endif
826         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
827         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
828         { 0xfffd, 0 },
829         /*
830          * b9 of this register MUST be zero to get any interrupts on INT#
831          * the other set bits enable all the engine interrupt sources
832          */
833         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
834         { GLAMO_REG_CLOCK_GEN6,         0x2000 },
835         { GLAMO_REG_CLOCK_GEN7,         0x0101 },
836         { GLAMO_REG_CLOCK_GEN8,         0x0100 },
837         { GLAMO_REG_CLOCK_HOST,         0x000d },
838         /*
839          * b7..b4 = 0 = no wait states on read or write
840          * b0 = 1 select PLL2 for Host interface, b1 = enable it
841          */
842         { 0x200,        0x0e03 /* this is replaced by script parser */ },
843         { 0x202,        0x07ff },
844         { 0x212,        0x0000 },
845         { 0x214,        0x4000 },
846         { 0x216,        0xf00e },
847
848         /* S-Media recommended "set tiling mode to 512 mode for memory access
849          * more efficiency when 640x480" */
850         { GLAMO_REG_MEM_TYPE,           0x0c74 }, /* 8MB, 16 word pg wr+rd */
851         { GLAMO_REG_MEM_GEN,            0xafaf }, /* 63 grants min + max */
852
853         { GLAMO_REGOFS_HOSTBUS + 2,     0xffff }, /* enable  on MMIO*/
854
855         { GLAMO_REG_MEM_TIMING1,        0x0108 },
856         { GLAMO_REG_MEM_TIMING2,        0x0010 }, /* Taa = 3 MCLK */
857         { GLAMO_REG_MEM_TIMING3,        0x0000 },
858         { GLAMO_REG_MEM_TIMING4,        0x0000 }, /* CE1# delay fall/rise */
859         { GLAMO_REG_MEM_TIMING5,        0x0000 }, /* UB# LB# */
860         { GLAMO_REG_MEM_TIMING6,        0x0000 }, /* OE# */
861         { GLAMO_REG_MEM_TIMING7,        0x0000 }, /* WE# */
862         { GLAMO_REG_MEM_TIMING8,        0x1002 }, /* MCLK delay, was 0x1000 */
863         { GLAMO_REG_MEM_TIMING9,        0x6006 },
864         { GLAMO_REG_MEM_TIMING10,       0x00ff },
865         { GLAMO_REG_MEM_TIMING11,       0x0001 },
866         { GLAMO_REG_MEM_POWER1,         0x0020 },
867         { GLAMO_REG_MEM_POWER2,         0x0000 },
868         { GLAMO_REG_MEM_DRAM1,          0x0000 },
869                 { 0xfffe, 1 },
870         { GLAMO_REG_MEM_DRAM1,          0xc100 },
871                 { 0xfffe, 1 },
872         { GLAMO_REG_MEM_DRAM1,          0xe100 },
873         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
874         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
875         { GLAMO_REG_GPIO_GEN1,          0x000f },
876         { GLAMO_REG_GPIO_GEN2,          0x111e },
877         { GLAMO_REG_GPIO_GEN3,          0xccc3 },
878         { GLAMO_REG_GPIO_GEN4,          0x111e },
879         { GLAMO_REG_GPIO_GEN5,          0x000f },
880 };
881 #if 0
882 static struct glamo_script glamo_resume_script[] = {
883
884         { GLAMO_REG_PLL_GEN1,           0x05db },       /* 48MHz */
885         { GLAMO_REG_PLL_GEN3,           0x0aba },       /* 90MHz */
886         { GLAMO_REG_DFT_GEN6, 1 },
887                 { 0xfffe, 100 },
888                 { 0xfffd, 0 },
889         { 0x200,        0x0e03 },
890
891         /*
892          * b9 of this register MUST be zero to get any interrupts on INT#
893          * the other set bits enable all the engine interrupt sources
894          */
895         { GLAMO_REG_IRQ_ENABLE,         0x01ff },
896         { GLAMO_REG_CLOCK_HOST,         0x0018 },
897         { GLAMO_REG_CLOCK_GEN5_1, 0x18b1 },
898
899         { GLAMO_REG_MEM_DRAM1,          0x0000 },
900                 { 0xfffe, 1 },
901         { GLAMO_REG_MEM_DRAM1,          0xc100 },
902                 { 0xfffe, 1 },
903         { GLAMO_REG_MEM_DRAM1,          0xe100 },
904         { GLAMO_REG_MEM_DRAM2,          0x01d6 },
905         { GLAMO_REG_CLOCK_MEMORY,       0x000b },
906 };
907 #endif
908
909 enum glamo_power {
910         GLAMO_POWER_ON,
911         GLAMO_POWER_SUSPEND,
912 };
913
914 static void glamo_power(struct glamo_core *glamo,
915                         enum glamo_power new_state)
916 {
917         int n;
918         unsigned long flags;
919
920         spin_lock_irqsave(&glamo->lock, flags);
921
922         dev_info(&glamo->pdev->dev, "***** glamo_power -> %d\n", new_state);
923
924         /*
925 Power management
926 static const REG_VALUE_MASK_TYPE reg_powerOn[] =
927 {
928     { REG_GEN_DFT6,     REG_BIT_ALL,    REG_DATA(1u << 0)           },
929     { REG_GEN_PLL3,     0u,             REG_DATA(1u << 13)          },
930     { REG_GEN_MEM_CLK,  REG_BIT_ALL,    REG_BIT_EN_MOCACLK          },
931     { REG_MEM_DRAM2,    0u,             REG_BIT_EN_DEEP_POWER_DOWN  },
932     { REG_MEM_DRAM1,    0u,             REG_BIT_SELF_REFRESH        }
933 };
934
935 static const REG_VALUE_MASK_TYPE reg_powerStandby[] =
936 {
937     { REG_MEM_DRAM1,    REG_BIT_ALL,    REG_BIT_SELF_REFRESH    },
938     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK      },
939     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)      },
940     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)       }
941 };
942
943 static const REG_VALUE_MASK_TYPE reg_powerSuspend[] =
944 {
945     { REG_MEM_DRAM2,    REG_BIT_ALL,    REG_BIT_EN_DEEP_POWER_DOWN  },
946     { REG_GEN_MEM_CLK,  0u,             REG_BIT_EN_MOCACLK          },
947     { REG_GEN_PLL3,     REG_BIT_ALL,    REG_DATA(1u << 13)          },
948     { REG_GEN_DFT5,     REG_BIT_ALL,    REG_DATA(1u << 0)           }
949 };
950 */
951
952         switch (new_state) {
953         case GLAMO_POWER_ON:
954
955                 /*
956                  * glamo state on resume is nondeterministic in some
957                  * fundamental way, it has also been observed that the
958                  * Glamo reset pin can get asserted by, eg, touching it with
959                  * a scope probe.  So the only answer is to roll with it and
960                  * force an external reset on the Glamo during resume.
961                  */
962
963                 (glamo->pdata->glamo_external_reset)(0);
964                 udelay(10);
965                 (glamo->pdata->glamo_external_reset)(1);
966                 mdelay(5);
967
968                 glamo_run_script(glamo, glamo_init_script,
969                          ARRAY_SIZE(glamo_init_script), 0);
970
971                 break;
972
973         case GLAMO_POWER_SUSPEND:
974
975                 /* nuke interrupts */
976                 __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, 0x200);
977
978                 /* stash a copy of which engines were running */
979                 glamo->engine_enabled_bitfield_suspend =
980                                                  glamo->engine_enabled_bitfield;
981
982                 /* take down each engine before we kill mem and pll */
983                 for (n = 0; n < __NUM_GLAMO_ENGINES; n++)
984                         if (glamo->engine_enabled_bitfield & (1 << n))
985                                 __glamo_engine_disable(glamo, n);
986
987                 /* enable self-refresh */
988
989                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
990                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
991                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
992                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
993                                         GLAMO_MEM_REFRESH_COUNT);
994                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
995                                         GLAMO_MEM_DRAM1_EN_MODEREG_SET |
996                                         GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
997                                         GLAMO_MEM_DRAM1_EN_GATE_CKE |
998                                         GLAMO_MEM_DRAM1_SELF_REFRESH |
999                                         GLAMO_MEM_REFRESH_COUNT);
1000
1001                 /* force RAM into deep powerdown */
1002
1003                 __reg_write(glamo, GLAMO_REG_MEM_DRAM2,
1004                                         GLAMO_MEM_DRAM2_DEEP_PWRDOWN |
1005                                         (7 << 6) | /* tRC */
1006                                         (1 << 4) | /* tRP */
1007                                         (1 << 2) | /* tRCD */
1008                                         2); /* CAS latency */
1009
1010                 /* disable clocks to memory */
1011                 __reg_write(glamo, GLAMO_REG_CLOCK_MEMORY, 0);
1012
1013                 /* all dividers from OSCI */
1014                 __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1, 0x400, 0x400);
1015
1016                 /* PLL2 into bypass */
1017                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 12, 1 << 12);
1018
1019                 __reg_write(glamo, 0x200, 0x0e00);
1020
1021
1022                 /* kill PLLS 1 then 2 */
1023                 __reg_write(glamo, GLAMO_REG_DFT_GEN5, 0x0001);
1024                 __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 13, 1 << 13);
1025
1026                 break;
1027         }
1028
1029         spin_unlock_irqrestore(&glamo->lock, flags);
1030 }
1031
1032 #if 0
1033 #define MEMDETECT_RETRY 6
1034 static unsigned int detect_memsize(struct glamo_core *glamo)
1035 {
1036         int i;
1037
1038         /*static const u_int16_t pattern[] = {
1039                 0x1111, 0x8a8a, 0x2222, 0x7a7a,
1040                 0x3333, 0x6a6a, 0x4444, 0x5a5a,
1041                 0x5555, 0x4a4a, 0x6666, 0x3a3a,
1042                 0x7777, 0x2a2a, 0x8888, 0x1a1a
1043         }; */
1044
1045         for (i = 0; i < MEMDETECT_RETRY; i++) {
1046                 switch (glamo->type) {
1047                 case 3600:
1048                         __reg_write(glamo, GLAMO_REG_MEM_TYPE, 0x0072);
1049                         __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1050                         break;
1051                 case 3650:
1052                         switch (glamo->revision) {
1053                         case GLAMO_CORE_REV_A0:
1054                                 if (i & 1)
1055                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1056                                                     0x097a);
1057                                 else
1058                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1059                                                     0x0173);
1060
1061                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1062                                 msleep(1);
1063                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
1064                                 break;
1065                         default:
1066                                 if (i & 1)
1067                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1068                                                     0x0972);
1069                                 else
1070                                         __reg_write(glamo, GLAMO_REG_MEM_TYPE,
1071                                                     0x0872);
1072
1073                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
1074                                 msleep(1);
1075                                 __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xe100);
1076                                 break;
1077                         }
1078                         break;
1079                 case 3700:
1080                         /* FIXME */
1081                 default:
1082                         break;
1083                 }
1084
1085 #if 0
1086                 /* FIXME: finish implementation */
1087                 for (j = 0; j < 8; j++) {
1088                         __
1089 #endif
1090         }
1091
1092         return 0;
1093 }
1094 #endif
1095
1096 /* Find out if we can support this version of the Glamo chip */
1097 static int glamo_supported(struct glamo_core *glamo)
1098 {
1099         u_int16_t dev_id, rev_id; /*, memsize; */
1100
1101         dev_id = __reg_read(glamo, GLAMO_REG_DEVICE_ID);
1102         rev_id = __reg_read(glamo, GLAMO_REG_REVISION_ID);
1103
1104         switch (dev_id) {
1105         case 0x3650:
1106                 switch (rev_id) {
1107                 case GLAMO_CORE_REV_A2:
1108                         break;
1109                 case GLAMO_CORE_REV_A0:
1110                 case GLAMO_CORE_REV_A1:
1111                 case GLAMO_CORE_REV_A3:
1112                         dev_warn(&glamo->pdev->dev, "untested core revision "
1113                                  "%04x, your mileage may vary\n", rev_id);
1114                         break;
1115                 default:
1116                         dev_warn(&glamo->pdev->dev, "unknown glamo revision "
1117                                  "%04x, your mileage may vary\n", rev_id);
1118                         /* maybe should abort ? */
1119                 }
1120                 break;
1121         case 0x3600:
1122         case 0x3700:
1123         default:
1124                 dev_err(&glamo->pdev->dev, "unsupported Glamo device %04x\n",
1125                         dev_id);
1126                 return 0;
1127         }
1128
1129         dev_dbg(&glamo->pdev->dev, "Detected Glamo core %04x Revision %04x "
1130                  "(%uHz CPU / %uHz Memory)\n", dev_id, rev_id,
1131                  glamo_pll_rate(glamo, GLAMO_PLL1),
1132                  glamo_pll_rate(glamo, GLAMO_PLL2));
1133
1134         return 1;
1135 }
1136
1137 static int __init glamo_probe(struct platform_device *pdev)
1138 {
1139         int rc = 0, irq;
1140         struct glamo_core *glamo;
1141         struct platform_device *glamo_mmc_dev;
1142
1143         if (glamo_handle) {
1144                 dev_err(&pdev->dev,
1145                         "This driver supports only one instance\n");
1146                 return -EBUSY;
1147         }
1148
1149         glamo = kmalloc(GFP_KERNEL, sizeof(*glamo));
1150         if (!glamo)
1151                 return -ENOMEM;
1152
1153         spin_lock_init(&glamo->lock);
1154         glamo_handle = glamo;
1155         glamo->pdev = pdev;
1156         glamo->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1157         glamo->irq = platform_get_irq(pdev, 0);
1158         glamo->pdata = pdev->dev.platform_data;
1159         if (!glamo->mem || !glamo->pdata) {
1160                 dev_err(&pdev->dev, "platform device with no MEM/PDATA ?\n");
1161                 rc = -ENOENT;
1162                 goto bail_free;
1163         }
1164
1165         /* register a number of sibling devices whoise IOMEM resources
1166          * are siblings of pdev's IOMEM resource */
1167 #if 0
1168         glamo_core_dev.dev.parent = &pdev.dev;
1169         mangle_mem_resources(glamo_core_dev.resources,
1170                              glamo_core_dev.num_resources, glamo->mem);
1171         glamo_core_dev.resources[1].start = glamo->irq;
1172         glamo_core_dev.resources[1].end = glamo->irq;
1173         platform_device_register(&glamo_core_dev);
1174 #endif
1175         /* only remap the generic, hostbus and memory controller registers */
1176         glamo->base = ioremap(glamo->mem->start, 0x4000 /*GLAMO_REGOFS_VIDCAP*/);
1177         if (!glamo->base) {
1178                 dev_err(&pdev->dev, "failed to ioremap() memory region\n");
1179                 goto bail_free;
1180         }
1181
1182         platform_set_drvdata(pdev, glamo);
1183
1184         (glamo->pdata->glamo_external_reset)(0);
1185         udelay(10);
1186         (glamo->pdata->glamo_external_reset)(1);
1187         mdelay(10);
1188
1189         /*
1190          * finally set the mfd interrupts up
1191          * can't do them earlier or sibling probes blow up
1192          */
1193
1194         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1195                 set_irq_chip(irq, &glamo_irq_chip);
1196                 set_irq_handler(irq, handle_level_irq);
1197                 set_irq_flags(irq, IRQF_VALID);
1198         }
1199
1200         if (glamo->pdata->glamo_irq_is_wired &&
1201             !glamo->pdata->glamo_irq_is_wired()) {
1202                 set_irq_chained_handler(glamo->irq, glamo_irq_demux_handler);
1203                 set_irq_type(glamo->irq, IRQ_TYPE_EDGE_FALLING);
1204                 dev_info(&pdev->dev, "Glamo interrupt registered\n");
1205                 glamo->irq_works = 1;
1206         } else {
1207                 dev_err(&pdev->dev, "Glamo interrupt not used\n");
1208                 glamo->irq_works = 0;
1209         }
1210
1211
1212         /* confirm it isn't insane version */
1213         if (!glamo_supported(glamo)) {
1214                 dev_err(&pdev->dev, "This Glamo is not supported\n");
1215                 goto bail_irq;
1216         }
1217
1218         /* sysfs */
1219         rc = sysfs_create_group(&pdev->dev.kobj, &glamo_attr_group);
1220         if (rc < 0) {
1221                 dev_err(&pdev->dev, "cannot create sysfs group\n");
1222                 goto bail_irq;
1223         }
1224
1225         /* init the chip with canned register set */
1226
1227         dev_dbg(&glamo->pdev->dev, "running init script\n");
1228         glamo_run_script(glamo, glamo_init_script,
1229                          ARRAY_SIZE(glamo_init_script), 1);
1230
1231         dev_info(&glamo->pdev->dev, "Glamo core PLL1: %uHz, PLL2: %uHz\n",
1232                  glamo_pll_rate(glamo, GLAMO_PLL1),
1233                  glamo_pll_rate(glamo, GLAMO_PLL2));
1234
1235         /* bring MCI specific stuff over from our MFD platform data */
1236         glamo_mci_def_pdata.glamo_can_set_mci_power =
1237                                         glamo->pdata->glamo_can_set_mci_power;
1238         glamo_mci_def_pdata.glamo_mci_use_slow =
1239                                         glamo->pdata->glamo_mci_use_slow;
1240         glamo_mci_def_pdata.glamo_irq_is_wired =
1241                                         glamo->pdata->glamo_irq_is_wired;
1242
1243         /* start creating the siblings */
1244         glamo->pdata->glamo = glamo;
1245
1246         /* Command queue device (for DRM) */
1247         glamo_graphics_dev.dev.parent = &pdev->dev;
1248         glamo_graphics_dev.dev.platform_data = glamo->pdata;
1249         mangle_mem_resources(glamo_graphics_dev.resource,
1250                              glamo_graphics_dev.num_resources, glamo->mem);
1251         platform_device_register(&glamo_graphics_dev);
1252
1253         /* GPIO */
1254         glamo->pdata->spigpio_info->glamo = glamo;
1255         glamo_spigpio_dev.dev.parent = &pdev->dev;
1256         glamo_spigpio_dev.dev.platform_data = glamo->pdata->spigpio_info;
1257         platform_device_register(&glamo_spigpio_dev);
1258
1259         /* MMC */
1260         glamo_mmc_dev = glamo->pdata->mmc_dev;
1261         glamo_mmc_dev->name = "glamo-mci";
1262         glamo_mmc_dev->dev.parent = &pdev->dev;
1263         glamo_mmc_dev->resource = glamo_mmc_resources;
1264         glamo_mmc_dev->num_resources = ARRAY_SIZE(glamo_mmc_resources);
1265         glamo_mci_def_pdata.pglamo = glamo;
1266         mangle_mem_resources(glamo_mmc_dev->resource,
1267                              glamo_mmc_dev->num_resources, glamo->mem);
1268         platform_device_register(glamo_mmc_dev);
1269
1270         /* Only request the generic, hostbus and memory controller MMIO */
1271         glamo->mem = request_mem_region(glamo->mem->start,
1272                                         GLAMO_REGOFS_VIDCAP, "glamo-core");
1273         if (!glamo->mem) {
1274                 dev_err(&pdev->dev, "failed to request memory region\n");
1275                 goto bail_irq;
1276         }
1277
1278         return 0;
1279
1280 bail_irq:
1281         disable_irq(glamo->irq);
1282         set_irq_chained_handler(glamo->irq, NULL);
1283
1284         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1285                 set_irq_flags(irq, 0);
1286                 set_irq_chip(irq, NULL);
1287         }
1288
1289         iounmap(glamo->base);
1290 bail_free:
1291         platform_set_drvdata(pdev, NULL);
1292         glamo_handle = NULL;
1293         kfree(glamo);
1294
1295         return rc;
1296 }
1297
1298 static int glamo_remove(struct platform_device *pdev)
1299 {
1300         struct glamo_core *glamo = platform_get_drvdata(pdev);
1301         int irq;
1302
1303         disable_irq(glamo->irq);
1304         set_irq_chained_handler(glamo->irq, NULL);
1305
1306         for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
1307                 set_irq_flags(irq, 0);
1308                 set_irq_chip(irq, NULL);
1309         }
1310
1311         platform_set_drvdata(pdev, NULL);
1312         platform_device_unregister(glamo->pdata->mmc_dev);
1313         /* FIXME: Don't we need to unregister these as well?
1314          * platform_device_unregister(glamo->pdata->graphics_dev);
1315          * platform_device_unregister(glamo->pdata->gpio_dev); */
1316         iounmap(glamo->base);
1317         release_mem_region(glamo->mem->start, GLAMO_REGOFS_VIDCAP);
1318         glamo_handle = NULL;
1319         kfree(glamo);
1320
1321         return 0;
1322 }
1323
1324 #ifdef CONFIG_PM
1325
1326 static int glamo_suspend(struct platform_device *pdev, pm_message_t state)
1327 {
1328         glamo_handle->suspending = 1;
1329         glamo_power(glamo_handle, GLAMO_POWER_SUSPEND);
1330
1331         return 0;
1332 }
1333
1334 static int glamo_resume(struct platform_device *pdev)
1335 {
1336         glamo_power(glamo_handle, GLAMO_POWER_ON);
1337         glamo_handle->suspending = 0;
1338
1339         return 0;
1340 }
1341
1342 #else
1343 #define glamo_suspend NULL
1344 #define glamo_resume  NULL
1345 #endif
1346
1347 static struct platform_driver glamo_driver = {
1348         .probe          = glamo_probe,
1349         .remove         = glamo_remove,
1350         .suspend        = glamo_suspend,
1351         .resume = glamo_resume,
1352         .driver         = {
1353                 .name   = "glamo3362",
1354                 .owner  = THIS_MODULE,
1355         },
1356 };
1357
1358 static int __devinit glamo_init(void)
1359 {
1360         return platform_driver_register(&glamo_driver);
1361 }
1362
1363 static void __exit glamo_cleanup(void)
1364 {
1365         platform_driver_unregister(&glamo_driver);
1366 }
1367
1368 module_init(glamo_init);
1369 module_exit(glamo_cleanup);
1370
1371 MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
1372 MODULE_DESCRIPTION("Smedia Glamo 336x/337x core/resource driver");
1373 MODULE_LICENSE("GPL");