Newer
Older
/*******************************************************************************
*
* Copyright(c) 2016 Intel Corporation.
* Lei Chuanhua <chuanhua.lei@intel.com>
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
******************************************************************************/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/irq.h>
#include <linux/sched_clock.h>
#include <linux/cpu.h>
#include <asm/time.h>
#define MS(_v, _f) (((_v) & (_f)) >> _f##_S)
#define SM(_v, _f) (((_v) << _f##_S) & (_f))
#define CLC_DIS BIT(0)
#define CLC_SUSPEND BIT(4)
#define CLC_RMC BIT(8)
#define CLC_SMC BIT(16)
#define ID_VER 0x1Fu
#define ID_VER_S 0
#define ID_CFG 0xE0u
#define ID_CFG_S 5
#define ID_ID 0xFF00u
#define ID_ID_S 8
#define GPTC_CON(X) (0x10 + (X) * 0x20)
#define CON_EN_STAT BIT(0) /* RO only */
#define CON_COUNT_UP BIT(1) /* Down up or down */
#define CON_CNT BIT(2)
#define CON_ONESHOT BIT(3) /* Stop or continue when overflowing */
#define CON_EXT BIT(4) /* 32 or 16 bit */
#define CON_EDGE_RISE BIT(6)
#define CON_EDGE_FALL BIT(7)
#define CON_EDGE_ANY (CON_EDGE_RISE | CON_EDGE_FALL)
#define CON_SYNC BIT(8) /* Signal sync to module clock or not */
#define GPTC_RUN(X) (0x18 + (X) * 0x20)
#define RUN_EN BIT(0)
#define RUN_STOP BIT(1)
#define RUN_RELOAD BIT(2)
#define GPTC_RLD(X) (0x20 + (X) * 0x20)
#define GPTC_CNT(X) (0x28 + (X) * 0x20)
#define GPTC_IRNEN 0xf4
#define GPTC_IRNICR 0xf8
#define GPTC_IRNCR 0xfc
enum gptc_cnt_dir {
GPTC_COUNT_DOWN = 0,
GPTC_COUNT_UP,
};
enum gptc_timer_type {
TIMER_TYPE_CLK_SRC,
TIMER_TYPE_CLK_EVT,
TIMER_TYPE_WDT,
TIMER_TYPE_MAX,
};
/* Hardwre GPTC struct */
struct gptc {
u32 id;
struct device_node *np;
void __iomem *base;
unsigned long phy_base;
struct clk *fpiclk;
struct clk *gateclk;
u32 fpifreq;
spinlock_t lock;
struct list_head parent; /* Timers belonging to itsef */
struct list_head next; /* Link to next GPTC */
struct kref ref;
};
struct gptc *gptc; /* Point back to parent */
void __iomem *base;
unsigned long phy_base;
u32 gptcid;
u32 tid; /* 0, 1, 2 only */
u32 cpuid;
u32 irq;
u32 frequency;
enum gptc_cnt_dir dir;
bool used;
struct list_head child; /* Node in parent list */
struct list_head clksrc; /* Node in clock source list */
struct list_head clkevt; /* Node in clock event list */
struct list_head wdt; /* Node in watchdog timer */
struct clocksource cs;
};
struct gptc_clockevent {
u32 ticks_per_jiffy;
struct clock_event_device ce;
char name[16];
};
static LIST_HEAD(gptc_list);
static LIST_HEAD(gptc_clksrc_list);
static LIST_HEAD(gptc_clkevt_list);
static LIST_HEAD(gptc_wdt_list);
unsigned long gptc_phy_base;
static bool gptc_clksrc_init;
static DEFINE_PER_CPU(struct gptc_clockevent, gptc_event_device);
static inline struct gptc_clockevent *to_gptc_clockevent(
struct clock_event_device *evt)
{
return container_of(evt, struct gptc_clockevent, ce);
}
static inline struct gptc_timer *clkevt_to_gptc_timer(
struct clock_event_device *evt)
{
struct gptc_clockevent *gptce = container_of(evt,
struct gptc_clockevent, ce);
}
static inline struct gptc_timer *clksrc_to_gptc_timer(struct clocksource *src)
{
struct gptc_clocksource *gptcs = container_of(src,
struct gptc_clocksource, cs);
}
static inline u32 gptc_readl(struct gptc_timer *timer, u32 offs)
{
return ioread32(timer->base + offs);
}
static inline void gptc_writel(struct gptc_timer *timer, unsigned long val,
u32 offs)
{
iowrite32(val, timer->base + offs);
}
static u32 gptc_read_counter(struct gptc_timer *timer)
{
return gptc_readl(timer, GPTC_CNT(timer->tid));
}
static inline void gptc_stop_counter(struct gptc_timer *timer)
{
gptc_writel(timer, RUN_STOP, GPTC_RUN(timer->tid));
}
static inline void gptc_reload_counter(struct gptc_timer *timer,
unsigned long cycles)
{
gptc_writel(timer, cycles, GPTC_RLD(timer->tid));
}
static inline void gptc_reset_counter(struct gptc_timer *timer)
{
gptc_reload_counter(timer, 0);
}
static inline void gptc_start_counter(struct gptc_timer *timer)
{
gptc_writel(timer, RUN_EN, GPTC_RUN(timer->tid));
}
static inline void gptc_reload_and_run(struct gptc_timer *timer)
{
gptc_writel(timer, RUN_EN | RUN_RELOAD, GPTC_RUN(timer->tid));
}
static inline void gptc_clc_enable(void __iomem *base)
{
iowrite32(CLC_SUSPEND | CLC_RMC, base + GPTC_CLC);
}
static inline void gptc_irq_mask_all(void __iomem *base)
{
iowrite32(0x00, base + GPTC_IRNEN);
}
static inline void gptc_irq_clear_all(void __iomem *base)
{
iowrite32(0xff, base + GPTC_IRNCR);
}
static inline void gptc_irq_mask(struct gptc_timer *timer)
{
u32 reg;
unsigned long flags;
struct gptc *gptc = timer->gptc;
spin_lock_irqsave(&gptc->lock, flags);
reg = gptc_readl(timer, GPTC_IRNEN);
gptc_writel(timer, reg, GPTC_IRNEN);
spin_unlock_irqrestore(&gptc->lock, flags);
}
static inline void gptc_irq_unmask(struct gptc_timer *timer)
{
u32 reg;
unsigned long flags;
struct gptc *gptc = timer->gptc;
spin_lock_irqsave(&gptc->lock, flags);
reg = gptc_readl(timer, GPTC_IRNEN);
gptc_writel(timer, reg, GPTC_IRNEN);
spin_unlock_irqrestore(&gptc->lock, flags);
}
static inline void gptc_irq_ack(struct gptc_timer *timer)
{
gptc_writel(timer, BIT(timer->tid * 2), GPTC_IRNCR);
}
static void gptc_enable_32bit_timer(struct gptc_timer *timer)
{
u32 reg;
reg = gptc_readl(timer, GPTC_CON(timer->tid));
gptc_writel(timer, reg, GPTC_CON(timer->tid));
}
static void gptc_count_dir(struct gptc_timer *timer)
{
u32 reg;
reg = gptc_readl(timer, GPTC_CON(timer->tid));
if (timer->dir == GPTC_COUNT_UP)
reg |= CON_COUNT_UP;
else
reg &= ~CON_COUNT_UP;
gptc_writel(timer, reg, GPTC_CON(timer->tid));
}
static void gptc_mode_setup(struct gptc_timer *timer, bool oneshot)
{
u32 reg;
reg = gptc_readl(timer, GPTC_CON(timer->tid));
if (oneshot)
reg |= CON_ONESHOT;
else
reg &= ~CON_ONESHOT;
gptc_writel(timer, reg, GPTC_CON(timer->tid));
}
static irqreturn_t gptc_timer_interrupt(int irq, void *data)
{
struct gptc_clockevent *gptce = data;
struct gptc_timer *timer = gptce->timer;
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
gptc_irq_ack(timer);
gptce->ce.event_handler(&gptce->ce);
return IRQ_HANDLED;
}
static int gptc_clkevt_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
struct gptc_timer *timer = clkevt_to_gptc_timer(evt);
WARN_ON(cycles == 0);
gptc_stop_counter(timer);
gptc_mode_setup(timer, true);
gptc_reload_counter(timer, cycles);
gptc_reload_and_run(timer);
return 0;
}
static int gptc_clkevt_shutdown(struct clock_event_device *evt)
{
struct gptc_timer *timer = clkevt_to_gptc_timer(evt);
gptc_stop_counter(timer);
gptc_reset_counter(timer);
return 0;
}
static int gptc_clkevt_periodic(struct clock_event_device *evt)
{
struct gptc_clockevent *gptce = to_gptc_clockevent(evt);
struct gptc_timer *timer = gptce->timer;
gptc_stop_counter(timer);
gptc_mode_setup(timer, false);
gptc_reload_counter(timer, gptce->ticks_per_jiffy);
gptc_start_counter(timer);
return 0;
}
static int gptc_clkevt_resume(struct clock_event_device *evt)
{
struct gptc_timer *timer = clkevt_to_gptc_timer(evt);
gptc_start_counter(timer);
return 0;
}
static cycle_t gptc_hpt_read(struct clocksource *cs)
{
struct gptc_timer *timer = clksrc_to_gptc_timer(cs);
return (cycle_t)gptc_read_counter(timer);
}
static void gptc_global_init(struct gptc *gptc)
gptc_clc_enable(gptc->base);
gptc_irq_mask_all(gptc->base);
gptc_irq_clear_all(gptc->base);
}
static void gptc_per_timer_init(struct gptc_timer *timer)
{
gptc_count_dir(timer);
gptc_enable_32bit_timer(timer);
gptc_reset_counter(timer);
if (timer->type == TIMER_TYPE_CLK_SRC)
static const char *const timer_type_to_str(u32 type)
switch (type) {
case TIMER_TYPE_CLK_SRC:
static void gptc_of_config_print(struct gptc *gptc)
pr_debug("GPTC%d timer list info\n", gptc->id);
list_for_each_entry(timer, &gptc->parent, child) {
pr_debug("timer%d base %p gptcid %d freq %d tid %d cpuid %d irq %d clk %s\n",
i, timer->base, timer->gptcid, timer->frequency,
timer->tid, timer->cpuid, timer->irq,
timer_type_to_str(timer->type));
i++;
static int gptc_clock_init(struct gptc *gptc)
gptc->gateclk = of_clk_get_by_name(np, "gptc");
if (IS_ERR_OR_NULL(gptc->gateclk)) {
pr_err("Failed to get gptc gate clk: %ld\n",
PTR_ERR(gptc->gateclk));
return gptc->gateclk ? PTR_ERR(gptc->gateclk) : -ENODEV;
}
gptc->fpiclk = of_clk_get_by_name(np, "fpi");
if (IS_ERR_OR_NULL(gptc->fpiclk)) {
pr_err("Failed to get gptc frequency clk: %ld\n",
PTR_ERR(gptc->fpiclk));
return gptc->fpiclk ? PTR_ERR(gptc->fpiclk) : -ENODEV;
}
return 0;
}
static void gptc_clock_deinit(struct gptc *gptc)
{
gptc->gateclk = NULL;
gptc->fpiclk = NULL;
}
static int gptc_clock_enable(struct gptc *gptc)
{
int ret;
if (IS_ERR_OR_NULL(gptc->gateclk) ||
IS_ERR_OR_NULL(gptc->fpiclk)) {
pr_err("%s clock(s) is/are not initialized\n", __func__);
ret = -EIO;
goto out;
ret = clk_prepare_enable(gptc->gateclk);
pr_err("%s failed to enable gate clk: %d\n", __func__, ret);
goto out;
ret = clk_prepare_enable(gptc->fpiclk);
if (ret) {
pr_err("%s failed to enable fpi clk: %d\n", __func__, ret);
goto err_gateclk_disable;
gptc->fpifreq = clk_get_rate(gptc->fpiclk);
return 0;
err_gateclk_disable:
clk_disable_unprepare(gptc->gateclk);
out:
return ret;
}
static void gptc_clock_disable(struct gptc *gptc)
{
if (!IS_ERR_OR_NULL(gptc->gateclk))
clk_disable_unprepare(gptc->gateclk);
if (!IS_ERR_OR_NULL(gptc->fpiclk))
clk_disable_unprepare(gptc->fpiclk);
}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
static void __gptc_release(struct kref *ref)
{
struct gptc *gptc = container_of(ref, struct gptc, ref);
gptc_clock_disable(gptc);
list_del(&gptc->next);
kfree(gptc);
}
static int gptc_get(struct gptc *gptc)
{
if (!gptc)
return 0;
kref_get(&gptc->ref);
return 1;
}
static void gptc_put(struct gptc *gptc)
{
if (!gptc)
return;
kref_put(&gptc->ref, __gptc_release);
}
static int gptc_of_parse_timer(struct gptc *gptc)
{
u32 type;
struct of_phandle_args clkspec;
int index, ret, nr_timers;
struct gptc_timer *timer;
u32 tid;
u32 cpuid;
struct device_node *np = gptc->np;
nr_timers = of_count_phandle_with_args(np, "intel,clk",
"#gptc-cells");
if (nr_timers <= 0) {
pr_err("gptc%d: invalid value of phandler property at %s\n",
gptc->id, np->full_name);
pr_debug("%s nr_timers %d available\n", __func__, nr_timers);
for (index = 0; index < nr_timers; index++) {
ret = of_parse_phandle_with_args(np, "intel,clk",
"#gptc-cells", index, &clkspec);
if (ret < 0)
return ret;
pr_debug("%s args_count %d arg[0] %d arg[1] %d arg[2] %d\n",
__func__, clkspec.args_count, clkspec.args[0],
clkspec.args[1], clkspec.args[2]);
if (clkspec.args_count != 3) {
pr_err("%s: invalid gptc clk property\n", __func__);
return -EINVAL;
type = clkspec.args[0];
tid = clkspec.args[1];
cpuid = clkspec.args[2];
/* Ignore CPU id check */
if (type > TIMER_TYPE_MAX || tid > (TIMER_PER_GPTC - 1)) {
pr_err("%s invalid clk type %d or timer id %d\n",
__func__, type, tid);
return -EINVAL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
INIT_LIST_HEAD(&timer->child);
timer->gptc = gptc;
timer->base = gptc->base;
timer->phy_base = gptc->phy_base;
timer->gptcid = gptc->id;
timer->tid = tid;
timer->type = type;
timer->frequency = gptc->fpifreq;
timer->used = false;
list_add_tail(&timer->child, &gptc->parent);
switch (type) {
case TIMER_TYPE_CLK_SRC:
INIT_LIST_HEAD(&timer->clksrc);
timer->irq = 0;
timer->dir = GPTC_COUNT_UP;
timer->cpuid = 0;
list_add_tail(&timer->clksrc, &gptc_clksrc_list);
break;
case TIMER_TYPE_CLK_EVT:
case TIMER_TYPE_WDT:
timer->irq = irq_of_parse_and_map(np, timer->tid);
WARN_ON(timer->irq < 0);
timer->cpuid = cpuid;
if (type == TIMER_TYPE_CLK_EVT) {
INIT_LIST_HEAD(&timer->clkevt);
list_add_tail(&timer->clkevt,
&gptc_clkevt_list);
} else {
INIT_LIST_HEAD(&timer->wdt);
list_add_tail(&timer->wdt, &gptc_wdt_list);
}
break;
default:
break;
static int gptc_of_init(struct device_node *np)
{
int ret;
u32 gptcid;
struct resource res;
void __iomem *base;
struct gptc *gptc;
/* Which GPTC is being handled */
gptcid = of_alias_get_id(np, "timer");
if (gptcid >= (GPTC_MAX - 1))
return -EINVAL;
ret = of_address_to_resource(np, 0, &res);
if (WARN_ON(ret))
return ret;
base = of_iomap(np, 0);
if (!base) {
pr_err("Can't map GPTC base address\n");
return -ENXIO;
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
gptc = kzalloc(sizeof(*gptc), GFP_KERNEL);
if (!gptc)
goto err_iomap;
INIT_LIST_HEAD(&gptc->parent);
INIT_LIST_HEAD(&gptc->next);
spin_lock_init(&gptc->lock);
kref_init(&gptc->ref);
gptc->np = np;
gptc->id = gptcid;
gptc->base = base;
gptc->phy_base = res.start;
ret = gptc_clock_init(gptc);
if (ret)
goto err_clk_init;
ret = gptc_clock_enable(gptc);
if (ret)
goto err_clk_en;
ret = gptc_of_parse_timer(gptc);
if (ret)
goto err_parse_fail;
list_add_tail(&gptc->next, &gptc_list);
gptc_global_init(gptc);
gptc_of_config_print(gptc);
err_parse_fail:
gptc_clock_disable(gptc);
err_clk_en:
gptc_clock_deinit(gptc);
err_clk_init:
kfree(gptc);
err_iomap:
iounmap(base);
return ret;
}
static struct gptc_clocksource gptc_clksrc = {
.cs = {
.name = "gptc",
.read = gptc_hpt_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS
| CLOCK_SOURCE_SUSPEND_NONSTOP,
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
},
};
static cycle_t gptc_read_count(void)
{
return gptc_hpt_read(&gptc_clksrc.cs);
}
#ifdef CONFIG_X86
static unsigned long sched_clock_mult __read_mostly;
static cycle_t cycle_last, cycle_offset;
static DEFINE_SPINLOCK(gptc_shed_lock);
unsigned long long notrace sched_clock(void)
{
cycle_t cycle;
unsigned long flags;
if (!gptc_clksrc_init)
return 0;
spin_lock_irqsave(&gptc_shed_lock, flags);
cycle = gptc_read_count();
cycle &= gptc_clksrc.cs.mask;
/* Counter wrapped */
if (unlikely(cycle_last > cycle))
cycle_offset += BIT_ULL(32);
cycle_last = cycle;
cycle += cycle_offset;
spin_unlock_irqrestore(&gptc_shed_lock, flags);
return cycle * sched_clock_mult;
}
#else
static u64 __maybe_unused notrace gptc_read_sched_clock(void)
{
return (u64)gptc_read_count();
}
#endif /* CONFIG_X86 */
static int gptc_clocksource_init(void)
{
list_for_each_entry(timer, &gptc_clksrc_list, clksrc) {
if (!timer->used) {
/* Only one clock source from GPTC allowed */
if (gptc_clksrc_init)
return -EEXIST;
/* Record for VDSO */
gptc_phy_base = timer->phy_base;
/*
* Calculate a somewhat reasonable rating value
* in 10MHz
*/
gptc_clksrc.cs.rating =
250 + timer->frequency / 10000000;
gptc_per_timer_init(timer);
ret = clocksource_register_hz(&gptc_clksrc.cs,
timer->frequency);
if (ret < 0)
pr_warn("GPTC: Unable to register clocksource\n");
#ifdef CONFIG_X86
sched_clock_mult = NSEC_PER_SEC / timer->frequency;
#else
sched_clock_register(gptc_read_sched_clock,
32, timer->frequency);
#endif /* CONFIG_MIPS */
timer->used = true;
gptc_clksrc_init = true;
pr_debug("gptc %d timer %d clk src register @cpu%d\n",
timer->gptcid, timer->tid, timer->cpuid);
return 0;
}
}
return -EINVAL;
}
static struct clock_event_device gptc_per_timer_clockevent = {
.name = "gptc_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.set_state_shutdown = gptc_clkevt_shutdown,
.set_state_periodic = gptc_clkevt_periodic,
.set_state_oneshot = gptc_clkevt_shutdown,
.set_next_event = gptc_clkevt_next_event,
.tick_resume = gptc_clkevt_resume,
.irq = 0,
};
static int gptc_clockevent_cpu_init(unsigned int cpu,
struct gptc_clockevent *cd)
{
int ret;
struct gptc_timer *timer;
struct gptc_clockevent *gptc_clkevt;
struct clock_event_device *levt;
if (!cd)
return -EINVAL;
gptc_clkevt = cd;
levt = &cd->ce;
list_for_each_entry(timer, &gptc_clkevt_list, clkevt) {
if (!timer->used && (timer->cpuid == cpu)) {
gptc_clkevt->timer = timer;
gptc_clkevt->ticks_per_jiffy =
DIV_ROUND_UP(timer->frequency, HZ);
memcpy(levt, &gptc_per_timer_clockevent, sizeof(*levt));
levt->irq = timer->irq;
levt->cpumask = cpumask_of(cpu);
snprintf(gptc_clkevt->name, sizeof(gptc_clkevt->name),
"gptc_event%d", cpu);
levt->name = gptc_clkevt->name;
gptc_per_timer_init(timer);
ret = request_irq(levt->irq, gptc_timer_interrupt,
IRQF_TIMER, gptc_clkevt->name, gptc_clkevt);
if (ret) {
pr_err("gptc clkevt register failed @cpu%d\n",
cpu);
return ret;
}
irq_set_affinity(levt->irq, cpumask_of(cpu));
clockevents_config_and_register(levt,
timer->used = true;
pr_debug("gptc %d timer %d clk evt register @cpu%d\n",
timer->gptcid, timer->tid, timer->cpuid);
return 0;
}
}
return -EINVAL;
}
static int gptc_clockevent_cpu_exit(struct gptc_clockevent *cd)
{
struct clock_event_device *levt;
struct gptc_timer *timer;
if (!cd)
return -EINVAL;
levt = &cd->ce;
if (!timer)
return -EINVAL;
if (levt->irq) {
free_irq(levt->irq, cd);
levt->irq = 0;
}
gptc_irq_mask(timer);
list_del(&timer->clkevt);
gptc = timer->gptc;
kfree(timer);
gptc_put(gptc);
return 0;
}
static int gptc_starting_cpu(unsigned int cpu)
{
gptc_clockevent_cpu_init(cpu, this_cpu_ptr(&gptc_event_device));
return 0;
}
static int gptc_dying_cpu(unsigned int cpu)
{
gptc_clockevent_cpu_exit(this_cpu_ptr(&gptc_event_device));
return 0;
cpuhp_setup_state(CPUHP_AP_INTEL_GPTC_TIMER_STARTING,
"AP_INTEL_GPTC_TIMER_STARTING", gptc_starting_cpu,
gptc_dying_cpu);
}
static int __init gptc_timer_init(struct device_node *np)
{
gptc_clocksource_init();
/* Register immediately the clock event on BSP */
gptc_clkevent_init();
#ifdef CONFIG_X86
global_clock_event = &gptc_per_timer_clockevent;
#endif
return 0;
}
CLOCKSOURCE_OF_DECLARE(lantiq_gptc_timer, "lantiq,gptc", gptc_timer_init);
CLOCKSOURCE_OF_DECLARE(intel_gptc_timer, "intel,gptc", gptc_timer_init);
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
static void *gptc_seq_start(struct seq_file *s, loff_t *pos)
{
if (list_empty(&gptc_list))
return NULL;
return seq_list_start(&gptc_list, *pos);
}
static void *gptc_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
return seq_list_next(v, &gptc_list, pos);
}
static void gptc_seq_stop(struct seq_file *s, void *v)
{
}
static int gptc_seq_show(struct seq_file *s, void *v)
{
int i = 0;
struct gptc *gptc;
struct gptc_timer *timer;
gptc = list_entry(v, struct gptc, next);
seq_printf(s, "GPTC%d base %p phy %lx freq %d ref %d\n",
gptc->id, gptc->base, gptc->phy_base, gptc->fpifreq,
atomic_read(&gptc->ref.refcount));
seq_printf(s, "CLC %08x ID %08x IRNEN %08x IRNICR %08x IRNCR %08x\n",
ioread32(gptc->base + GPTC_CLC),
ioread32(gptc->base + GPTC_ID),
ioread32(gptc->base + GPTC_IRNEN),
ioread32(gptc->base + GPTC_IRNICR),
ioread32(gptc->base + GPTC_IRNCR));
list_for_each_entry(timer, &gptc->parent, child) {
seq_printf(s, "\ttimer%d base %p freq %d tid %d cpuid %d irq %d clk %s %s\n",
i, timer->base, timer->frequency, timer->tid,
timer->cpuid, timer->irq,
timer_type_to_str(timer->type),
timer->used ? "used" : "unused");
seq_printf(s, "\tCON %08x RUN %08x RLD %08x CNT %08x\n",
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
gptc_readl(timer, GPTC_CON(i)),
gptc_readl(timer, GPTC_RUN(i)),
gptc_readl(timer, GPTC_RLD(i)),
gptc_readl(timer, GPTC_CNT(i)));
i++;
}
seq_putc(s, '\n');
return 0;
}
static const struct seq_operations gptc_seq_ops = {
.start = gptc_seq_start,
.next = gptc_seq_next,
.stop = gptc_seq_stop,
.show = gptc_seq_show,
};
static int gptc_open(struct inode *inode, struct file *file)
{
int err;
err = seq_open(file, &gptc_seq_ops);
if (err)
return err;
return 0;
}
static const struct file_operations gptc_ops = {
.owner = THIS_MODULE,
.open = gptc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int gptc_debugfs_init(void)
{
struct dentry *file;
struct dentry *debugfs;
debugfs = debugfs_create_dir("gptc", NULL);
if (!debugfs)
return -ENOMEM;
file = debugfs_create_file("status", 0444, debugfs, NULL, &gptc_ops);
if (!file)
goto remove;
return 0;
remove:
debugfs_remove_recursive(debugfs);
debugfs = NULL;
return -ENOMEM;
}
late_initcall(gptc_debugfs_init);