]> git.ipfire.org Git - people/arne_f/kernel.git/blob - drivers/clocksource/sh_cmt.c
NFSv4.1 handle ERR_DELAY error reclaiming locking state on delegation recall
[people/arne_f/kernel.git] / drivers / clocksource / sh_cmt.c
1 /*
2 * SuperH Timer Support - CMT
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/clk.h>
17 #include <linux/clockchips.h>
18 #include <linux/clocksource.h>
19 #include <linux/delay.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/ioport.h>
25 #include <linux/irq.h>
26 #include <linux/module.h>
27 #include <linux/of.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_domain.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/sh_timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34
35 struct sh_cmt_device;
36
37 /*
38 * The CMT comes in 5 different identified flavours, depending not only on the
39 * SoC but also on the particular instance. The following table lists the main
40 * characteristics of those flavours.
41 *
42 * 16B 32B 32B-F 48B 48B-2
43 * -----------------------------------------------------------------------------
44 * Channels 2 1/4 1 6 2/8
45 * Control Width 16 16 16 16 32
46 * Counter Width 16 32 32 32/48 32/48
47 * Shared Start/Stop Y Y Y Y N
48 *
49 * The 48-bit gen2 version has a per-channel start/stop register located in the
50 * channel registers block. All other versions have a shared start/stop register
51 * located in the global space.
52 *
53 * Channels are indexed from 0 to N-1 in the documentation. The channel index
54 * infers the start/stop bit position in the control register and the channel
55 * registers block address. Some CMT instances have a subset of channels
56 * available, in which case the index in the documentation doesn't match the
57 * "real" index as implemented in hardware. This is for instance the case with
58 * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
59 * in the documentation but using start/stop bit 5 and having its registers
60 * block at 0x60.
61 *
62 * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
63 * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
64 */
65
66 enum sh_cmt_model {
67 SH_CMT_16BIT,
68 SH_CMT_32BIT,
69 SH_CMT_32BIT_FAST,
70 SH_CMT_48BIT,
71 SH_CMT_48BIT_GEN2,
72 };
73
74 struct sh_cmt_info {
75 enum sh_cmt_model model;
76
77 unsigned long width; /* 16 or 32 bit version of hardware block */
78 u32 overflow_bit;
79 u32 clear_bits;
80
81 /* callbacks for CMSTR and CMCSR access */
82 u32 (*read_control)(void __iomem *base, unsigned long offs);
83 void (*write_control)(void __iomem *base, unsigned long offs,
84 u32 value);
85
86 /* callbacks for CMCNT and CMCOR access */
87 u32 (*read_count)(void __iomem *base, unsigned long offs);
88 void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
89 };
90
91 struct sh_cmt_channel {
92 struct sh_cmt_device *cmt;
93
94 unsigned int index; /* Index in the documentation */
95 unsigned int hwidx; /* Real hardware index */
96
97 void __iomem *iostart;
98 void __iomem *ioctrl;
99
100 unsigned int timer_bit;
101 unsigned long flags;
102 u32 match_value;
103 u32 next_match_value;
104 u32 max_match_value;
105 raw_spinlock_t lock;
106 struct clock_event_device ced;
107 struct clocksource cs;
108 u64 total_cycles;
109 bool cs_enabled;
110 };
111
112 struct sh_cmt_device {
113 struct platform_device *pdev;
114
115 const struct sh_cmt_info *info;
116
117 void __iomem *mapbase;
118 struct clk *clk;
119 unsigned long rate;
120
121 raw_spinlock_t lock; /* Protect the shared start/stop register */
122
123 struct sh_cmt_channel *channels;
124 unsigned int num_channels;
125 unsigned int hw_channels;
126
127 bool has_clockevent;
128 bool has_clocksource;
129 };
130
131 #define SH_CMT16_CMCSR_CMF (1 << 7)
132 #define SH_CMT16_CMCSR_CMIE (1 << 6)
133 #define SH_CMT16_CMCSR_CKS8 (0 << 0)
134 #define SH_CMT16_CMCSR_CKS32 (1 << 0)
135 #define SH_CMT16_CMCSR_CKS128 (2 << 0)
136 #define SH_CMT16_CMCSR_CKS512 (3 << 0)
137 #define SH_CMT16_CMCSR_CKS_MASK (3 << 0)
138
139 #define SH_CMT32_CMCSR_CMF (1 << 15)
140 #define SH_CMT32_CMCSR_OVF (1 << 14)
141 #define SH_CMT32_CMCSR_WRFLG (1 << 13)
142 #define SH_CMT32_CMCSR_STTF (1 << 12)
143 #define SH_CMT32_CMCSR_STPF (1 << 11)
144 #define SH_CMT32_CMCSR_SSIE (1 << 10)
145 #define SH_CMT32_CMCSR_CMS (1 << 9)
146 #define SH_CMT32_CMCSR_CMM (1 << 8)
147 #define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7)
148 #define SH_CMT32_CMCSR_CMR_NONE (0 << 4)
149 #define SH_CMT32_CMCSR_CMR_DMA (1 << 4)
150 #define SH_CMT32_CMCSR_CMR_IRQ (2 << 4)
151 #define SH_CMT32_CMCSR_CMR_MASK (3 << 4)
152 #define SH_CMT32_CMCSR_DBGIVD (1 << 3)
153 #define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0)
154 #define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0)
155 #define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0)
156 #define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
157 #define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
158
159 static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
160 {
161 return ioread16(base + (offs << 1));
162 }
163
164 static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
165 {
166 return ioread32(base + (offs << 2));
167 }
168
169 static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
170 {
171 iowrite16(value, base + (offs << 1));
172 }
173
174 static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
175 {
176 iowrite32(value, base + (offs << 2));
177 }
178
179 static const struct sh_cmt_info sh_cmt_info[] = {
180 [SH_CMT_16BIT] = {
181 .model = SH_CMT_16BIT,
182 .width = 16,
183 .overflow_bit = SH_CMT16_CMCSR_CMF,
184 .clear_bits = ~SH_CMT16_CMCSR_CMF,
185 .read_control = sh_cmt_read16,
186 .write_control = sh_cmt_write16,
187 .read_count = sh_cmt_read16,
188 .write_count = sh_cmt_write16,
189 },
190 [SH_CMT_32BIT] = {
191 .model = SH_CMT_32BIT,
192 .width = 32,
193 .overflow_bit = SH_CMT32_CMCSR_CMF,
194 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
195 .read_control = sh_cmt_read16,
196 .write_control = sh_cmt_write16,
197 .read_count = sh_cmt_read32,
198 .write_count = sh_cmt_write32,
199 },
200 [SH_CMT_32BIT_FAST] = {
201 .model = SH_CMT_32BIT_FAST,
202 .width = 32,
203 .overflow_bit = SH_CMT32_CMCSR_CMF,
204 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
205 .read_control = sh_cmt_read16,
206 .write_control = sh_cmt_write16,
207 .read_count = sh_cmt_read32,
208 .write_count = sh_cmt_write32,
209 },
210 [SH_CMT_48BIT] = {
211 .model = SH_CMT_48BIT,
212 .width = 32,
213 .overflow_bit = SH_CMT32_CMCSR_CMF,
214 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
215 .read_control = sh_cmt_read32,
216 .write_control = sh_cmt_write32,
217 .read_count = sh_cmt_read32,
218 .write_count = sh_cmt_write32,
219 },
220 [SH_CMT_48BIT_GEN2] = {
221 .model = SH_CMT_48BIT_GEN2,
222 .width = 32,
223 .overflow_bit = SH_CMT32_CMCSR_CMF,
224 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
225 .read_control = sh_cmt_read32,
226 .write_control = sh_cmt_write32,
227 .read_count = sh_cmt_read32,
228 .write_count = sh_cmt_write32,
229 },
230 };
231
232 #define CMCSR 0 /* channel register */
233 #define CMCNT 1 /* channel register */
234 #define CMCOR 2 /* channel register */
235
236 static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
237 {
238 if (ch->iostart)
239 return ch->cmt->info->read_control(ch->iostart, 0);
240 else
241 return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
242 }
243
244 static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
245 {
246 if (ch->iostart)
247 ch->cmt->info->write_control(ch->iostart, 0, value);
248 else
249 ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
250 }
251
252 static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
253 {
254 return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
255 }
256
257 static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
258 {
259 ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
260 }
261
262 static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
263 {
264 return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
265 }
266
267 static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
268 {
269 ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
270 }
271
272 static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
273 {
274 ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
275 }
276
277 static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
278 {
279 u32 v1, v2, v3;
280 u32 o1, o2;
281
282 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
283
284 /* Make sure the timer value is stable. Stolen from acpi_pm.c */
285 do {
286 o2 = o1;
287 v1 = sh_cmt_read_cmcnt(ch);
288 v2 = sh_cmt_read_cmcnt(ch);
289 v3 = sh_cmt_read_cmcnt(ch);
290 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
291 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
292 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
293
294 *has_wrapped = o1;
295 return v2;
296 }
297
298 static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
299 {
300 unsigned long flags;
301 u32 value;
302
303 /* start stop register shared by multiple timer channels */
304 raw_spin_lock_irqsave(&ch->cmt->lock, flags);
305 value = sh_cmt_read_cmstr(ch);
306
307 if (start)
308 value |= 1 << ch->timer_bit;
309 else
310 value &= ~(1 << ch->timer_bit);
311
312 sh_cmt_write_cmstr(ch, value);
313 raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
314 }
315
316 static int sh_cmt_enable(struct sh_cmt_channel *ch)
317 {
318 int k, ret;
319
320 pm_runtime_get_sync(&ch->cmt->pdev->dev);
321 dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
322
323 /* enable clock */
324 ret = clk_enable(ch->cmt->clk);
325 if (ret) {
326 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
327 ch->index);
328 goto err0;
329 }
330
331 /* make sure channel is disabled */
332 sh_cmt_start_stop_ch(ch, 0);
333
334 /* configure channel, periodic mode and maximum timeout */
335 if (ch->cmt->info->width == 16) {
336 sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
337 SH_CMT16_CMCSR_CKS512);
338 } else {
339 sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
340 SH_CMT32_CMCSR_CMTOUT_IE |
341 SH_CMT32_CMCSR_CMR_IRQ |
342 SH_CMT32_CMCSR_CKS_RCLK8);
343 }
344
345 sh_cmt_write_cmcor(ch, 0xffffffff);
346 sh_cmt_write_cmcnt(ch, 0);
347
348 /*
349 * According to the sh73a0 user's manual, as CMCNT can be operated
350 * only by the RCLK (Pseudo 32 KHz), there's one restriction on
351 * modifying CMCNT register; two RCLK cycles are necessary before
352 * this register is either read or any modification of the value
353 * it holds is reflected in the LSI's actual operation.
354 *
355 * While at it, we're supposed to clear out the CMCNT as of this
356 * moment, so make sure it's processed properly here. This will
357 * take RCLKx2 at maximum.
358 */
359 for (k = 0; k < 100; k++) {
360 if (!sh_cmt_read_cmcnt(ch))
361 break;
362 udelay(1);
363 }
364
365 if (sh_cmt_read_cmcnt(ch)) {
366 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
367 ch->index);
368 ret = -ETIMEDOUT;
369 goto err1;
370 }
371
372 /* enable channel */
373 sh_cmt_start_stop_ch(ch, 1);
374 return 0;
375 err1:
376 /* stop clock */
377 clk_disable(ch->cmt->clk);
378
379 err0:
380 return ret;
381 }
382
383 static void sh_cmt_disable(struct sh_cmt_channel *ch)
384 {
385 /* disable channel */
386 sh_cmt_start_stop_ch(ch, 0);
387
388 /* disable interrupts in CMT block */
389 sh_cmt_write_cmcsr(ch, 0);
390
391 /* stop clock */
392 clk_disable(ch->cmt->clk);
393
394 dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
395 pm_runtime_put(&ch->cmt->pdev->dev);
396 }
397
398 /* private flags */
399 #define FLAG_CLOCKEVENT (1 << 0)
400 #define FLAG_CLOCKSOURCE (1 << 1)
401 #define FLAG_REPROGRAM (1 << 2)
402 #define FLAG_SKIPEVENT (1 << 3)
403 #define FLAG_IRQCONTEXT (1 << 4)
404
405 static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
406 int absolute)
407 {
408 u32 value = ch->next_match_value;
409 u32 new_match;
410 u32 delay = 0;
411 u32 now = 0;
412 u32 has_wrapped;
413
414 now = sh_cmt_get_counter(ch, &has_wrapped);
415 ch->flags |= FLAG_REPROGRAM; /* force reprogram */
416
417 if (has_wrapped) {
418 /* we're competing with the interrupt handler.
419 * -> let the interrupt handler reprogram the timer.
420 * -> interrupt number two handles the event.
421 */
422 ch->flags |= FLAG_SKIPEVENT;
423 return;
424 }
425
426 if (absolute)
427 now = 0;
428
429 do {
430 /* reprogram the timer hardware,
431 * but don't save the new match value yet.
432 */
433 new_match = now + value + delay;
434 if (new_match > ch->max_match_value)
435 new_match = ch->max_match_value;
436
437 sh_cmt_write_cmcor(ch, new_match);
438
439 now = sh_cmt_get_counter(ch, &has_wrapped);
440 if (has_wrapped && (new_match > ch->match_value)) {
441 /* we are changing to a greater match value,
442 * so this wrap must be caused by the counter
443 * matching the old value.
444 * -> first interrupt reprograms the timer.
445 * -> interrupt number two handles the event.
446 */
447 ch->flags |= FLAG_SKIPEVENT;
448 break;
449 }
450
451 if (has_wrapped) {
452 /* we are changing to a smaller match value,
453 * so the wrap must be caused by the counter
454 * matching the new value.
455 * -> save programmed match value.
456 * -> let isr handle the event.
457 */
458 ch->match_value = new_match;
459 break;
460 }
461
462 /* be safe: verify hardware settings */
463 if (now < new_match) {
464 /* timer value is below match value, all good.
465 * this makes sure we won't miss any match events.
466 * -> save programmed match value.
467 * -> let isr handle the event.
468 */
469 ch->match_value = new_match;
470 break;
471 }
472
473 /* the counter has reached a value greater
474 * than our new match value. and since the
475 * has_wrapped flag isn't set we must have
476 * programmed a too close event.
477 * -> increase delay and retry.
478 */
479 if (delay)
480 delay <<= 1;
481 else
482 delay = 1;
483
484 if (!delay)
485 dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
486 ch->index);
487
488 } while (delay);
489 }
490
491 static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
492 {
493 if (delta > ch->max_match_value)
494 dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
495 ch->index);
496
497 ch->next_match_value = delta;
498 sh_cmt_clock_event_program_verify(ch, 0);
499 }
500
501 static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
502 {
503 unsigned long flags;
504
505 raw_spin_lock_irqsave(&ch->lock, flags);
506 __sh_cmt_set_next(ch, delta);
507 raw_spin_unlock_irqrestore(&ch->lock, flags);
508 }
509
510 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
511 {
512 struct sh_cmt_channel *ch = dev_id;
513
514 /* clear flags */
515 sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
516 ch->cmt->info->clear_bits);
517
518 /* update clock source counter to begin with if enabled
519 * the wrap flag should be cleared by the timer specific
520 * isr before we end up here.
521 */
522 if (ch->flags & FLAG_CLOCKSOURCE)
523 ch->total_cycles += ch->match_value + 1;
524
525 if (!(ch->flags & FLAG_REPROGRAM))
526 ch->next_match_value = ch->max_match_value;
527
528 ch->flags |= FLAG_IRQCONTEXT;
529
530 if (ch->flags & FLAG_CLOCKEVENT) {
531 if (!(ch->flags & FLAG_SKIPEVENT)) {
532 if (clockevent_state_oneshot(&ch->ced)) {
533 ch->next_match_value = ch->max_match_value;
534 ch->flags |= FLAG_REPROGRAM;
535 }
536
537 ch->ced.event_handler(&ch->ced);
538 }
539 }
540
541 ch->flags &= ~FLAG_SKIPEVENT;
542
543 if (ch->flags & FLAG_REPROGRAM) {
544 ch->flags &= ~FLAG_REPROGRAM;
545 sh_cmt_clock_event_program_verify(ch, 1);
546
547 if (ch->flags & FLAG_CLOCKEVENT)
548 if ((clockevent_state_shutdown(&ch->ced))
549 || (ch->match_value == ch->next_match_value))
550 ch->flags &= ~FLAG_REPROGRAM;
551 }
552
553 ch->flags &= ~FLAG_IRQCONTEXT;
554
555 return IRQ_HANDLED;
556 }
557
558 static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
559 {
560 int ret = 0;
561 unsigned long flags;
562
563 raw_spin_lock_irqsave(&ch->lock, flags);
564
565 if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
566 ret = sh_cmt_enable(ch);
567
568 if (ret)
569 goto out;
570 ch->flags |= flag;
571
572 /* setup timeout if no clockevent */
573 if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
574 __sh_cmt_set_next(ch, ch->max_match_value);
575 out:
576 raw_spin_unlock_irqrestore(&ch->lock, flags);
577
578 return ret;
579 }
580
581 static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
582 {
583 unsigned long flags;
584 unsigned long f;
585
586 raw_spin_lock_irqsave(&ch->lock, flags);
587
588 f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
589 ch->flags &= ~flag;
590
591 if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
592 sh_cmt_disable(ch);
593
594 /* adjust the timeout to maximum if only clocksource left */
595 if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
596 __sh_cmt_set_next(ch, ch->max_match_value);
597
598 raw_spin_unlock_irqrestore(&ch->lock, flags);
599 }
600
601 static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
602 {
603 return container_of(cs, struct sh_cmt_channel, cs);
604 }
605
606 static u64 sh_cmt_clocksource_read(struct clocksource *cs)
607 {
608 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
609 unsigned long flags;
610 u32 has_wrapped;
611 u64 value;
612 u32 raw;
613
614 raw_spin_lock_irqsave(&ch->lock, flags);
615 value = ch->total_cycles;
616 raw = sh_cmt_get_counter(ch, &has_wrapped);
617
618 if (unlikely(has_wrapped))
619 raw += ch->match_value + 1;
620 raw_spin_unlock_irqrestore(&ch->lock, flags);
621
622 return value + raw;
623 }
624
625 static int sh_cmt_clocksource_enable(struct clocksource *cs)
626 {
627 int ret;
628 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
629
630 WARN_ON(ch->cs_enabled);
631
632 ch->total_cycles = 0;
633
634 ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
635 if (!ret)
636 ch->cs_enabled = true;
637
638 return ret;
639 }
640
641 static void sh_cmt_clocksource_disable(struct clocksource *cs)
642 {
643 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
644
645 WARN_ON(!ch->cs_enabled);
646
647 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
648 ch->cs_enabled = false;
649 }
650
651 static void sh_cmt_clocksource_suspend(struct clocksource *cs)
652 {
653 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
654
655 if (!ch->cs_enabled)
656 return;
657
658 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
659 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
660 }
661
662 static void sh_cmt_clocksource_resume(struct clocksource *cs)
663 {
664 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
665
666 if (!ch->cs_enabled)
667 return;
668
669 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
670 sh_cmt_start(ch, FLAG_CLOCKSOURCE);
671 }
672
673 static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
674 const char *name)
675 {
676 struct clocksource *cs = &ch->cs;
677
678 cs->name = name;
679 cs->rating = 125;
680 cs->read = sh_cmt_clocksource_read;
681 cs->enable = sh_cmt_clocksource_enable;
682 cs->disable = sh_cmt_clocksource_disable;
683 cs->suspend = sh_cmt_clocksource_suspend;
684 cs->resume = sh_cmt_clocksource_resume;
685 cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
686 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
687
688 dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
689 ch->index);
690
691 clocksource_register_hz(cs, ch->cmt->rate);
692 return 0;
693 }
694
695 static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
696 {
697 return container_of(ced, struct sh_cmt_channel, ced);
698 }
699
700 static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
701 {
702 sh_cmt_start(ch, FLAG_CLOCKEVENT);
703
704 if (periodic)
705 sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
706 else
707 sh_cmt_set_next(ch, ch->max_match_value);
708 }
709
710 static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
711 {
712 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
713
714 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
715 return 0;
716 }
717
718 static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
719 int periodic)
720 {
721 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
722
723 /* deal with old setting first */
724 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
725 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
726
727 dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
728 ch->index, periodic ? "periodic" : "oneshot");
729 sh_cmt_clock_event_start(ch, periodic);
730 return 0;
731 }
732
733 static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
734 {
735 return sh_cmt_clock_event_set_state(ced, 0);
736 }
737
738 static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
739 {
740 return sh_cmt_clock_event_set_state(ced, 1);
741 }
742
743 static int sh_cmt_clock_event_next(unsigned long delta,
744 struct clock_event_device *ced)
745 {
746 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
747
748 BUG_ON(!clockevent_state_oneshot(ced));
749 if (likely(ch->flags & FLAG_IRQCONTEXT))
750 ch->next_match_value = delta - 1;
751 else
752 sh_cmt_set_next(ch, delta - 1);
753
754 return 0;
755 }
756
757 static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
758 {
759 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
760
761 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
762 clk_unprepare(ch->cmt->clk);
763 }
764
765 static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
766 {
767 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
768
769 clk_prepare(ch->cmt->clk);
770 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
771 }
772
773 static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
774 const char *name)
775 {
776 struct clock_event_device *ced = &ch->ced;
777 int irq;
778 int ret;
779
780 irq = platform_get_irq(ch->cmt->pdev, ch->index);
781 if (irq < 0) {
782 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n",
783 ch->index);
784 return irq;
785 }
786
787 ret = request_irq(irq, sh_cmt_interrupt,
788 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
789 dev_name(&ch->cmt->pdev->dev), ch);
790 if (ret) {
791 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
792 ch->index, irq);
793 return ret;
794 }
795
796 ced->name = name;
797 ced->features = CLOCK_EVT_FEAT_PERIODIC;
798 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
799 ced->rating = 125;
800 ced->cpumask = cpu_possible_mask;
801 ced->set_next_event = sh_cmt_clock_event_next;
802 ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
803 ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
804 ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
805 ced->suspend = sh_cmt_clock_event_suspend;
806 ced->resume = sh_cmt_clock_event_resume;
807
808 /* TODO: calculate good shift from rate and counter bit width */
809 ced->shift = 32;
810 ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
811 ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
812 ced->max_delta_ticks = ch->max_match_value;
813 ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
814 ced->min_delta_ticks = 0x1f;
815
816 dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
817 ch->index);
818 clockevents_register_device(ced);
819
820 return 0;
821 }
822
823 static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
824 bool clockevent, bool clocksource)
825 {
826 int ret;
827
828 if (clockevent) {
829 ch->cmt->has_clockevent = true;
830 ret = sh_cmt_register_clockevent(ch, name);
831 if (ret < 0)
832 return ret;
833 }
834
835 if (clocksource) {
836 ch->cmt->has_clocksource = true;
837 sh_cmt_register_clocksource(ch, name);
838 }
839
840 return 0;
841 }
842
843 static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
844 unsigned int hwidx, bool clockevent,
845 bool clocksource, struct sh_cmt_device *cmt)
846 {
847 int ret;
848
849 /* Skip unused channels. */
850 if (!clockevent && !clocksource)
851 return 0;
852
853 ch->cmt = cmt;
854 ch->index = index;
855 ch->hwidx = hwidx;
856
857 /*
858 * Compute the address of the channel control register block. For the
859 * timers with a per-channel start/stop register, compute its address
860 * as well.
861 */
862 switch (cmt->info->model) {
863 case SH_CMT_16BIT:
864 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
865 break;
866 case SH_CMT_32BIT:
867 case SH_CMT_48BIT:
868 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
869 break;
870 case SH_CMT_32BIT_FAST:
871 /*
872 * The 32-bit "fast" timer has a single channel at hwidx 5 but
873 * is located at offset 0x40 instead of 0x60 for some reason.
874 */
875 ch->ioctrl = cmt->mapbase + 0x40;
876 break;
877 case SH_CMT_48BIT_GEN2:
878 ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
879 ch->ioctrl = ch->iostart + 0x10;
880 break;
881 }
882
883 if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
884 ch->max_match_value = ~0;
885 else
886 ch->max_match_value = (1 << cmt->info->width) - 1;
887
888 ch->match_value = ch->max_match_value;
889 raw_spin_lock_init(&ch->lock);
890
891 ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx;
892
893 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
894 clockevent, clocksource);
895 if (ret) {
896 dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
897 ch->index);
898 return ret;
899 }
900 ch->cs_enabled = false;
901
902 return 0;
903 }
904
905 static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
906 {
907 struct resource *mem;
908
909 mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
910 if (!mem) {
911 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
912 return -ENXIO;
913 }
914
915 cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
916 if (cmt->mapbase == NULL) {
917 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
918 return -ENXIO;
919 }
920
921 return 0;
922 }
923
924 static const struct platform_device_id sh_cmt_id_table[] = {
925 { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
926 { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
927 { }
928 };
929 MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
930
931 static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
932 { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] },
933 { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] },
934 { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] },
935 { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] },
936 { }
937 };
938 MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
939
940 static int sh_cmt_parse_dt(struct sh_cmt_device *cmt)
941 {
942 struct device_node *np = cmt->pdev->dev.of_node;
943
944 return of_property_read_u32(np, "renesas,channels-mask",
945 &cmt->hw_channels);
946 }
947
948 static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
949 {
950 unsigned int mask;
951 unsigned int i;
952 int ret;
953
954 cmt->pdev = pdev;
955 raw_spin_lock_init(&cmt->lock);
956
957 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
958 const struct of_device_id *id;
959
960 id = of_match_node(sh_cmt_of_table, pdev->dev.of_node);
961 cmt->info = id->data;
962
963 ret = sh_cmt_parse_dt(cmt);
964 if (ret < 0)
965 return ret;
966 } else if (pdev->dev.platform_data) {
967 struct sh_timer_config *cfg = pdev->dev.platform_data;
968 const struct platform_device_id *id = pdev->id_entry;
969
970 cmt->info = (const struct sh_cmt_info *)id->driver_data;
971 cmt->hw_channels = cfg->channels_mask;
972 } else {
973 dev_err(&cmt->pdev->dev, "missing platform data\n");
974 return -ENXIO;
975 }
976
977 /* Get hold of clock. */
978 cmt->clk = clk_get(&cmt->pdev->dev, "fck");
979 if (IS_ERR(cmt->clk)) {
980 dev_err(&cmt->pdev->dev, "cannot get clock\n");
981 return PTR_ERR(cmt->clk);
982 }
983
984 ret = clk_prepare(cmt->clk);
985 if (ret < 0)
986 goto err_clk_put;
987
988 /* Determine clock rate. */
989 ret = clk_enable(cmt->clk);
990 if (ret < 0)
991 goto err_clk_unprepare;
992
993 if (cmt->info->width == 16)
994 cmt->rate = clk_get_rate(cmt->clk) / 512;
995 else
996 cmt->rate = clk_get_rate(cmt->clk) / 8;
997
998 clk_disable(cmt->clk);
999
1000 /* Map the memory resource(s). */
1001 ret = sh_cmt_map_memory(cmt);
1002 if (ret < 0)
1003 goto err_clk_unprepare;
1004
1005 /* Allocate and setup the channels. */
1006 cmt->num_channels = hweight8(cmt->hw_channels);
1007 cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels),
1008 GFP_KERNEL);
1009 if (cmt->channels == NULL) {
1010 ret = -ENOMEM;
1011 goto err_unmap;
1012 }
1013
1014 /*
1015 * Use the first channel as a clock event device and the second channel
1016 * as a clock source. If only one channel is available use it for both.
1017 */
1018 for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
1019 unsigned int hwidx = ffs(mask) - 1;
1020 bool clocksource = i == 1 || cmt->num_channels == 1;
1021 bool clockevent = i == 0;
1022
1023 ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1024 clockevent, clocksource, cmt);
1025 if (ret < 0)
1026 goto err_unmap;
1027
1028 mask &= ~(1 << hwidx);
1029 }
1030
1031 platform_set_drvdata(pdev, cmt);
1032
1033 return 0;
1034
1035 err_unmap:
1036 kfree(cmt->channels);
1037 iounmap(cmt->mapbase);
1038 err_clk_unprepare:
1039 clk_unprepare(cmt->clk);
1040 err_clk_put:
1041 clk_put(cmt->clk);
1042 return ret;
1043 }
1044
1045 static int sh_cmt_probe(struct platform_device *pdev)
1046 {
1047 struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
1048 int ret;
1049
1050 if (!is_early_platform_device(pdev)) {
1051 pm_runtime_set_active(&pdev->dev);
1052 pm_runtime_enable(&pdev->dev);
1053 }
1054
1055 if (cmt) {
1056 dev_info(&pdev->dev, "kept as earlytimer\n");
1057 goto out;
1058 }
1059
1060 cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
1061 if (cmt == NULL)
1062 return -ENOMEM;
1063
1064 ret = sh_cmt_setup(cmt, pdev);
1065 if (ret) {
1066 kfree(cmt);
1067 pm_runtime_idle(&pdev->dev);
1068 return ret;
1069 }
1070 if (is_early_platform_device(pdev))
1071 return 0;
1072
1073 out:
1074 if (cmt->has_clockevent || cmt->has_clocksource)
1075 pm_runtime_irq_safe(&pdev->dev);
1076 else
1077 pm_runtime_idle(&pdev->dev);
1078
1079 return 0;
1080 }
1081
1082 static int sh_cmt_remove(struct platform_device *pdev)
1083 {
1084 return -EBUSY; /* cannot unregister clockevent and clocksource */
1085 }
1086
1087 static struct platform_driver sh_cmt_device_driver = {
1088 .probe = sh_cmt_probe,
1089 .remove = sh_cmt_remove,
1090 .driver = {
1091 .name = "sh_cmt",
1092 .of_match_table = of_match_ptr(sh_cmt_of_table),
1093 },
1094 .id_table = sh_cmt_id_table,
1095 };
1096
1097 static int __init sh_cmt_init(void)
1098 {
1099 return platform_driver_register(&sh_cmt_device_driver);
1100 }
1101
1102 static void __exit sh_cmt_exit(void)
1103 {
1104 platform_driver_unregister(&sh_cmt_device_driver);
1105 }
1106
1107 early_platform_init("earlytimer", &sh_cmt_device_driver);
1108 subsys_initcall(sh_cmt_init);
1109 module_exit(sh_cmt_exit);
1110
1111 MODULE_AUTHOR("Magnus Damm");
1112 MODULE_DESCRIPTION("SuperH CMT Timer Driver");
1113 MODULE_LICENSE("GPL v2");