Newer
Older
/*
* Copyright(C) 2017 Intel Corporation.
* Lei Chuanhua <chuanhua.lei@intel.com>
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/irq.h>
#include <linux/sched_clock.h>
#include <linux/cpu.h>
#include <clocksource/intel-gptc-timer.h>
#ifdef CONFIG_LTQ_VMB
#include <asm/ltq_vmb.h>
#include <linux/irqchip/mips-gic.h>
#endif
#define CLC_DIS BIT(0)
#define CLC_SUSPEND BIT(4)
#define CLC_RMC BIT(8)
#define CLC_SMC BIT(16)
#define ID_VER 0x1Fu
#define ID_VER_S 0
#define ID_CFG 0xE0u
#define ID_CFG_S 5
#define ID_ID 0xFF00u
#define ID_ID_S 8
#define GPTC_CON(X) (0x10 + (X) * 0x20)
#define CON_EN_STAT BIT(0) /* RO only */
#define CON_COUNT_UP BIT(1) /* Down up or down */
#define CON_CNT BIT(2)
#define CON_ONESHOT BIT(3) /* Stop or continue when overflowing */
#define CON_EXT BIT(4) /* 32 or 16 bit */
#define CON_EDGE_RISE BIT(6)
#define CON_EDGE_FALL BIT(7)
#define CON_EDGE_ANY (CON_EDGE_RISE | CON_EDGE_FALL)
#define CON_SYNC BIT(8) /* Signal sync to module clock or not */
#define GPTC_RUN(X) (0x18 + (X) * 0x20)
#define RUN_EN BIT(0)
#define RUN_STOP BIT(1)
#define RUN_RELOAD BIT(2)
#define GPTC_RLD(X) (0x20 + (X) * 0x20)
#define GPTC_CNT(X) (0x28 + (X) * 0x20)
#define GPTC_IRNEN 0xf4
#define GPTC_IRNICR 0xf8
#define GPTC_IRNCR 0xfc
enum gptc_cnt_dir {
GPTC_COUNT_DOWN = 0,
GPTC_COUNT_UP,
};
enum gptc_timer_type {
TIMER_TYPE_CLK_SRC,
TIMER_TYPE_CLK_EVT,
TIMER_TYPE_WDT,
TIMER_TYPE_MAX,
};
/* Hardwre GPTC struct */
struct gptc {
u32 id;
struct device_node *np;
void __iomem *base;
unsigned long phy_base;
struct clk *gateclk;
u32 fpifreq;
spinlock_t lock; /* Shared register access */
struct list_head parent; /* Timers belonging to itsef */
struct list_head next; /* Link to next GPTC */
struct kref ref;
};
struct gptc *gptc; /* Point back to parent */
void __iomem *base;
unsigned long phy_base;
u32 gptcid;
u32 tid; /* 0, 1, 2 only */
u32 cpuid;
u32 irq;
u32 frequency;
enum gptc_cnt_dir dir;
bool used;
bool irq_registered;
u32 yield_pin;
void (*call_back)(void *);
void *call_back_param;
struct list_head child; /* Node in parent list */
struct list_head clksrc; /* Node in clock source list */
struct list_head clkevt; /* Node in clock event list */
struct list_head wdt; /* Node in watchdog timer */
struct clocksource cs;
};
struct gptc_clockevent {
u32 ticks_per_jiffy;
struct clock_event_device ce;
char name[16];
};
static LIST_HEAD(gptc_list);
static LIST_HEAD(gptc_clksrc_list);
static LIST_HEAD(gptc_clkevt_list);
static LIST_HEAD(gptc_wdt_list);
unsigned long gptc_phy_base;
static bool gptc_clksrc_init;
static DEFINE_PER_CPU(struct gptc_clockevent, gptc_event_device);
static inline struct gptc_clockevent *to_gptc_clockevent(
struct clock_event_device *evt)
{
return container_of(evt, struct gptc_clockevent, ce);
}
static inline struct gptc_timer *clkevt_to_gptc_timer(
struct clock_event_device *evt)
{
struct gptc_clockevent *gptce = container_of(evt,
struct gptc_clockevent, ce);
}
static inline struct gptc_timer *clksrc_to_gptc_timer(struct clocksource *src)
{
struct gptc_clocksource *gptcs = container_of(src,
struct gptc_clocksource, cs);
}
static inline u32 gptc_readl(struct gptc_timer *timer, u32 offs)
{
return ioread32(timer->base + offs);
}
static inline void gptc_writel(struct gptc_timer *timer, unsigned long val,
{
iowrite32(val, timer->base + offs);
}
static u32 gptc_read_counter(struct gptc_timer *timer)
{
return gptc_readl(timer, GPTC_CNT(timer->tid));
}
static inline void gptc_stop_counter(struct gptc_timer *timer)
{
gptc_writel(timer, RUN_STOP, GPTC_RUN(timer->tid));
}
static inline void gptc_reload_counter(struct gptc_timer *timer,
gptc_writel(timer, cycles, GPTC_RLD(timer->tid));
}
static inline void gptc_reset_counter(struct gptc_timer *timer)
{
gptc_reload_counter(timer, 0);
}
static inline void gptc_start_counter(struct gptc_timer *timer)
{
gptc_writel(timer, RUN_EN, GPTC_RUN(timer->tid));
}
static inline void gptc_reload_and_run(struct gptc_timer *timer)
{
gptc_writel(timer, RUN_EN | RUN_RELOAD, GPTC_RUN(timer->tid));
}
static inline void gptc_clc_enable(void __iomem *base)
{
iowrite32(CLC_SUSPEND | CLC_RMC, base + GPTC_CLC);
}
static inline void gptc_irq_mask_all(void __iomem *base)
{
iowrite32(0x00, base + GPTC_IRNEN);
}
static inline void gptc_irq_clear_all(void __iomem *base)
{
iowrite32(0xff, base + GPTC_IRNCR);
}
static inline void gptc_irq_mask(struct gptc_timer *timer)
{
u32 reg;
unsigned long flags;
struct gptc *gptc = timer->gptc;
spin_lock_irqsave(&gptc->lock, flags);
reg = gptc_readl(timer, GPTC_IRNEN);
gptc_writel(timer, reg, GPTC_IRNEN);
spin_unlock_irqrestore(&gptc->lock, flags);
}
static inline void gptc_irq_unmask(struct gptc_timer *timer)
{
u32 reg;
unsigned long flags;
struct gptc *gptc = timer->gptc;
spin_lock_irqsave(&gptc->lock, flags);
reg = gptc_readl(timer, GPTC_IRNEN);
gptc_writel(timer, reg, GPTC_IRNEN);
spin_unlock_irqrestore(&gptc->lock, flags);
}
static inline void gptc_irq_ack(struct gptc_timer *timer)
{
gptc_writel(timer, BIT(timer->tid * 2), GPTC_IRNCR);
static inline int gptc_irq_read(struct gptc_timer *timer)
{
u32 reg;
reg = gptc_readl(timer, GPTC_IRNCR);
if (reg & (BIT(timer->tid * 2)))
return 1;
else
return 0;
}
static void gptc_enable_32bit_timer(struct gptc_timer *timer)
{
u32 reg;
reg = gptc_readl(timer, GPTC_CON(timer->tid));
gptc_writel(timer, reg, GPTC_CON(timer->tid));
}
static void gptc_count_dir(struct gptc_timer *timer)
{
u32 reg;
reg = gptc_readl(timer, GPTC_CON(timer->tid));
if (timer->dir == GPTC_COUNT_UP)
reg |= CON_COUNT_UP;
else
reg &= ~CON_COUNT_UP;
gptc_writel(timer, reg, GPTC_CON(timer->tid));
}
static void gptc_mode_setup(struct gptc_timer *timer, bool oneshot)
{
u32 reg;
reg = gptc_readl(timer, GPTC_CON(timer->tid));
if (oneshot)
reg |= CON_ONESHOT;
else
reg &= ~CON_ONESHOT;
gptc_writel(timer, reg, GPTC_CON(timer->tid));
}
static irqreturn_t gptc_timer_interrupt(int irq, void *data)
{
struct gptc_clockevent *gptce = data;
struct gptc_timer *timer = gptce->timer;
gptc_irq_mask(timer);
gptc_irq_ack(timer);
gptce->ce.event_handler(&gptce->ce);
gptc_irq_unmask(timer);
return IRQ_HANDLED;
}
static int gptc_clkevt_next_event(unsigned long cycles,
{
struct gptc_timer *timer = clkevt_to_gptc_timer(evt);
WARN_ON(cycles == 0);
gptc_stop_counter(timer);
gptc_mode_setup(timer, true);
gptc_reload_counter(timer, cycles);
gptc_reload_and_run(timer);
return 0;
}
static int gptc_clkevt_shutdown(struct clock_event_device *evt)
{
struct gptc_timer *timer = clkevt_to_gptc_timer(evt);
gptc_stop_counter(timer);
gptc_reset_counter(timer);
return 0;
}
static int gptc_clkevt_periodic(struct clock_event_device *evt)
{
struct gptc_clockevent *gptce = to_gptc_clockevent(evt);
struct gptc_timer *timer = gptce->timer;
gptc_stop_counter(timer);
gptc_mode_setup(timer, false);
gptc_reload_counter(timer, gptce->ticks_per_jiffy);
gptc_start_counter(timer);
return 0;
}
static int gptc_clkevt_resume(struct clock_event_device *evt)
{
struct gptc_timer *timer = clkevt_to_gptc_timer(evt);
gptc_start_counter(timer);
return 0;
}
static cycle_t gptc_hpt_read(struct clocksource *cs)
{
struct gptc_timer *timer = clksrc_to_gptc_timer(cs);
return (cycle_t)gptc_read_counter(timer);
}
static void gptc_global_init(struct gptc *gptc)
gptc_clc_enable(gptc->base);
gptc_irq_mask_all(gptc->base);
gptc_irq_clear_all(gptc->base);
}
static void gptc_per_timer_init(struct gptc_timer *timer)
{
gptc_count_dir(timer);
gptc_enable_32bit_timer(timer);
if (timer->type == TIMER_TYPE_HEARTBEAT)
gptc_reload_counter(timer, 1); /* TODO for interval */
else
gptc_reset_counter(timer);
if (timer->type == TIMER_TYPE_CLK_SRC ||
timer->type == TIMER_TYPE_HEARTBEAT)
static const char *const timer_type_to_str(u32 type)
switch (type) {
case TIMER_TYPE_CLK_SRC:
case TIMER_TYPE_HEARTBEAT:
return "heartbeat";
static void gptc_of_config_print(struct gptc *gptc)
pr_debug("GPTC%d timer list info\n", gptc->id);
list_for_each_entry(timer, &gptc->parent, child) {
pr_debug("timer%d base %p gptcid %d freq %d tid %d cpuid %d irq %d clk %s\n",
i, timer->base, timer->gptcid, timer->frequency,
timer->tid, timer->cpuid, timer->irq,
timer_type_to_str(timer->type));
static int gptc_clock_init(struct gptc *gptc)
gptc->gateclk = of_clk_get_by_name(np, "gptc");
if (IS_ERR_OR_NULL(gptc->gateclk)) {
pr_err("Failed to get gptc gate clk: %ld\n",
PTR_ERR(gptc->gateclk));
return gptc->gateclk ? PTR_ERR(gptc->gateclk) : -ENODEV;
gptc->freqclk = of_clk_get_by_name(np, "freq");
if (IS_ERR_OR_NULL(gptc->freqclk)) {
pr_err("Failed to get gptc frequency clk: %ld\n",
PTR_ERR(gptc->freqclk));
return gptc->freqclk ? PTR_ERR(gptc->freqclk) : -ENODEV;
}
return 0;
}
static void gptc_clock_deinit(struct gptc *gptc)
{
gptc->gateclk = NULL;
}
static int gptc_clock_enable(struct gptc *gptc)
{
int ret;
if (IS_ERR_OR_NULL(gptc->gateclk) ||
pr_err("%s clock(s) is/are not initialized\n", __func__);
ret = -EIO;
goto out;
ret = clk_prepare_enable(gptc->gateclk);
pr_err("%s failed to enable gate clk: %d\n", __func__, ret);
goto out;
if (ret) {
pr_err("%s failed to enable fpi clk: %d\n", __func__, ret);
goto err_gateclk_disable;
gptc->fpifreq = clk_get_rate(gptc->freqclk);
return 0;
err_gateclk_disable:
clk_disable_unprepare(gptc->gateclk);
out:
return ret;
}
static void gptc_clock_disable(struct gptc *gptc)
{
if (!IS_ERR_OR_NULL(gptc->gateclk))
clk_disable_unprepare(gptc->gateclk);
if (!IS_ERR_OR_NULL(gptc->freqclk))
clk_disable_unprepare(gptc->freqclk);
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
static void __gptc_release(struct kref *ref)
{
struct gptc *gptc = container_of(ref, struct gptc, ref);
gptc_clock_disable(gptc);
list_del(&gptc->next);
kfree(gptc);
}
static int gptc_get(struct gptc *gptc)
{
if (!gptc)
return 0;
kref_get(&gptc->ref);
return 1;
}
static void gptc_put(struct gptc *gptc)
{
if (!gptc)
return;
kref_put(&gptc->ref, __gptc_release);
}
static int gptc_of_parse_timer(struct gptc *gptc)
{
u32 type;
struct of_phandle_args clkspec;
int index, ret, nr_timers;
struct gptc_timer *timer;
u32 tid;
u32 cpuid;
struct device_node *np = gptc->np;
nr_timers = of_count_phandle_with_args(np, "intel,clk", "#gptc-cells");
pr_err("gptc%d: invalid value of phandler property at %s\n",
pr_debug("%s nr_timers %d available\n", __func__, nr_timers);
for (index = 0; index < nr_timers; index++) {
ret = of_parse_phandle_with_args(np, "intel,clk", "#gptc-cells",
index, &clkspec);
if (ret < 0)
return ret;
pr_debug("%s args_count %d arg[0] %d arg[1] %d arg[2] %d\n",
__func__, clkspec.args_count, clkspec.args[0],
clkspec.args[1], clkspec.args[2]);
if (clkspec.args_count != 3) {
pr_err("%s: invalid gptc clk property\n", __func__);
return -EINVAL;
type = clkspec.args[0];
tid = clkspec.args[1];
cpuid = clkspec.args[2];
/* Ignore CPU id check */
if (type > TIMER_TYPE_MAX || tid > (TIMER_PER_GPTC - 1)) {
pr_err("%s invalid clk type %d or timer id %d\n",
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
INIT_LIST_HEAD(&timer->child);
timer->gptc = gptc;
timer->base = gptc->base;
timer->phy_base = gptc->phy_base;
timer->gptcid = gptc->id;
timer->tid = tid;
timer->type = type;
timer->frequency = gptc->fpifreq;
timer->used = false;
timer->irq_registered = false;
list_add_tail(&timer->child, &gptc->parent);
switch (type) {
case TIMER_TYPE_CLK_SRC:
INIT_LIST_HEAD(&timer->clksrc);
timer->irq = 0;
timer->dir = GPTC_COUNT_UP;
timer->cpuid = 0;
list_add_tail(&timer->clksrc, &gptc_clksrc_list);
break;
case TIMER_TYPE_HEARTBEAT:
INIT_LIST_HEAD(&timer->heartbeat);
timer->irq = 0;
timer->cpuid = 0;
list_add_tail(&timer->heartbeat, &gptc_heartbeat_list);
break;
case TIMER_TYPE_CLK_EVT:
case TIMER_TYPE_WDT:
timer->irq = irq_of_parse_and_map(np, timer->tid);
timer->cpuid = cpuid;
if (type == TIMER_TYPE_CLK_EVT) {
INIT_LIST_HEAD(&timer->clkevt);
list_add_tail(&timer->clkevt,
} else {
INIT_LIST_HEAD(&timer->wdt);
list_add_tail(&timer->wdt, &gptc_wdt_list);
}
break;
case TIMER_TYPE_HT_YIELD:
timer->irq = irq_of_parse_and_map(np, timer->tid);
timer->dir = GPTC_COUNT_DOWN;
timer->cpuid = 0;
list_add_tail(&timer->ht_yield, &gptc_ht_yield_list);
static int gptc_of_init(struct device_node *np)
{
int ret;
u32 gptcid;
struct resource res;
void __iomem *base;
struct gptc *gptc;
/* Which GPTC is being handled */
gptcid = of_alias_get_id(np, "timer");
if (gptcid >= (GPTC_MAX - 1))
return -EINVAL;
ret = of_address_to_resource(np, 0, &res);
if (WARN_ON(ret))
return ret;
base = of_iomap(np, 0);
if (!base) {
pr_err("Can't map GPTC base address\n");
return -ENXIO;
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
gptc = kzalloc(sizeof(*gptc), GFP_KERNEL);
if (!gptc)
goto err_iomap;
INIT_LIST_HEAD(&gptc->parent);
INIT_LIST_HEAD(&gptc->next);
spin_lock_init(&gptc->lock);
kref_init(&gptc->ref);
gptc->np = np;
gptc->id = gptcid;
gptc->base = base;
gptc->phy_base = res.start;
ret = gptc_clock_init(gptc);
if (ret)
goto err_clk_init;
ret = gptc_clock_enable(gptc);
if (ret)
goto err_clk_en;
ret = gptc_of_parse_timer(gptc);
if (ret)
goto err_parse_fail;
list_add_tail(&gptc->next, &gptc_list);
gptc_global_init(gptc);
gptc_of_config_print(gptc);
err_parse_fail:
gptc_clock_disable(gptc);
err_clk_en:
gptc_clock_deinit(gptc);
err_clk_init:
kfree(gptc);
err_iomap:
iounmap(base);
return ret;
}
static struct gptc_clocksource gptc_clksrc = {
.cs = {
.name = "gptc",
.read = gptc_hpt_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS
| CLOCK_SOURCE_SUSPEND_NONSTOP,
{
return gptc_hpt_read(&gptc_clksrc.cs);
}
static unsigned long sched_clock_mult __read_mostly;
static cycle_t cycle_last, cycle_offset;
static DEFINE_SPINLOCK(gptc_shed_lock);
unsigned long long notrace sched_clock(void)
{
cycle_t cycle;
unsigned long flags;
if (!gptc_clksrc_init)
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
spin_lock_irqsave(&gptc_shed_lock, flags);
cycle = gptc_read_count();
cycle &= gptc_clksrc.cs.mask;
/* Counter wrapped */
if (unlikely(cycle_last > cycle))
cycle_offset += BIT_ULL(32);
cycle_last = cycle;
cycle += cycle_offset;
spin_unlock_irqrestore(&gptc_shed_lock, flags);
return cycle * sched_clock_mult;
}
#endif /* CONFIG_GPTC_SCHED_CLOCK */
#ifndef CONFIG_X86
static u64 __maybe_unused notrace gptc_read_sched_clock(void)
{
return (u64)gptc_read_count();
}
#endif /* CONFIG_X86 */
static int gptc_clocksource_init(void)
{
list_for_each_entry(timer, &gptc_clksrc_list, clksrc) {
if (!timer->used) {
/* Only one clock source from GPTC allowed */
if (gptc_clksrc_init)
return -EEXIST;
/* Record for VDSO */
gptc_phy_base = timer->phy_base;
/*
* Calculate a somewhat reasonable rating value
* in 10MHz
*/
gptc_clksrc.cs.rating =
250 + timer->frequency / 10000000;
gptc_per_timer_init(timer);
ret = clocksource_register_hz(&gptc_clksrc.cs,
if (ret < 0)
pr_warn("GPTC: Unable to register clocksource\n");
#endif /* CONFIG_X86 */
#ifdef CONFIG_GPTC_SCHED_CLOCK
sched_clock_mult = NSEC_PER_SEC / timer->frequency;
#endif /* CONFIG_GPTC_SCHED_CLOCK */
pr_debug("gptc %d timer %d clk src register @cpu%d\n",
timer->gptcid, timer->tid, timer->cpuid);
return 0;
}
}
return -EINVAL;
}
static struct clock_event_device gptc_per_timer_clockevent = {
.name = "gptc_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.set_state_shutdown = gptc_clkevt_shutdown,
.set_state_periodic = gptc_clkevt_periodic,
.set_state_oneshot = gptc_clkevt_shutdown,
.set_next_event = gptc_clkevt_next_event,
.tick_resume = gptc_clkevt_resume,
.irq = 0,
};
static int gptc_clockevent_cpu_init(unsigned int cpu,
struct clock_event_device *ce;
list_for_each_entry(timer, &gptc_clkevt_list, clkevt) {
if (!timer->used && (timer->cpuid == cpu)) {
memcpy(ce, &gptc_per_timer_clockevent, sizeof(*ce));
ce->irq = timer->irq;
ce->cpumask = cpumask_of(cpu);
ce->name = cd->name;
clockevents_config_and_register(ce, timer->frequency,
timer->used = true;
pr_debug("gptc %d timer %d clk evt register @cpu%d\n",
timer->gptcid, timer->tid, timer->cpuid);
return 0;
}
}
return -EINVAL;
}
static int gptc_clockevent_cpu_exit(struct gptc_clockevent *cd)
{
struct clock_event_device *levt;
struct gptc_timer *timer;
if (!cd)
return -EINVAL;
levt = &cd->ce;
if (!timer)
return -EINVAL;
if (levt->irq) {
free_irq(levt->irq, cd);
levt->irq = 0;
}
gptc_irq_mask(timer);
list_del(&timer->clkevt);
gptc = timer->gptc;
kfree(timer);
gptc_put(gptc);
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
static void gptc_clkevt_irq_init(void)
{
int ret;
int cpu;
struct gptc_timer *timer;
struct gptc_clockevent *gptc_evt;
for_each_possible_cpu(cpu) {
gptc_evt = per_cpu_ptr(&gptc_event_device, cpu);
list_for_each_entry(timer, &gptc_clkevt_list, clkevt) {
if (!timer->irq_registered && (timer->cpuid == cpu)) {
gptc_evt->timer = timer;
gptc_evt->ticks_per_jiffy =
DIV_ROUND_UP(timer->frequency, HZ);
snprintf(gptc_evt->name, sizeof(gptc_evt->name),
"gptc_event%d", cpu);
ret = request_irq(timer->irq,
gptc_timer_interrupt,
IRQF_TIMER | IRQF_NOBALANCING,
gptc_evt->name, gptc_evt);
if (ret) {
pr_err("gptc irq %d register failed @cpu%d\n",
timer->irq, cpu);
break;
}
irq_set_affinity(timer->irq, cpumask_of(cpu));
timer->irq_registered = true;
pr_debug("gptc %d timer %d irq %d register @cpu%d\n",
timer->gptcid, timer->tid, timer->irq,
timer->cpuid);
}
}
}
}
gptc_clockevent_cpu_init(cpu, this_cpu_ptr(&gptc_event_device));
return 0;
}
static int gptc_dying_cpu(unsigned int cpu)
{
gptc_clockevent_cpu_exit(this_cpu_ptr(&gptc_event_device));
return 0;
gptc_clkevt_irq_init();
cpuhp_setup_state(CPUHP_AP_INTEL_GPTC_TIMER_STARTING,
"AP_INTEL_GPTC_TIMER_STARTING", gptc_starting_cpu,
gptc_dying_cpu);
}
static int __init gptc_timer_init(struct device_node *np)
{
gptc_clocksource_init();
/* Register immediately the clock event on BSP */
gptc_clkevent_init();
#ifdef CONFIG_X86
global_clock_event = &gptc_per_timer_clockevent;
#endif
return 0;
}
CLOCKSOURCE_OF_DECLARE(lantiq_gptc_timer, "lantiq,gptc", gptc_timer_init);
CLOCKSOURCE_OF_DECLARE(intel_gptc_timer, "intel,gptc", gptc_timer_init);
static int __init gptc_heartbeat_init(void)
{
struct gptc_timer *timer;
list_for_each_entry(timer, &gptc_heartbeat_list, heartbeat) {
if (!timer->used) {
gptc_per_timer_init(timer);
timer->used = true;
gptc_get(timer->gptc);
pr_debug("gptc %d timer %d heartbeat register @cpu%d\n",
timer->gptcid, timer->tid, timer->cpuid);
return 0;
}
}
return -EINVAL;
}
arch_initcall(gptc_heartbeat_init);
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#ifdef CONFIG_LTQ_VMB
#define GPTC_TC_THREAD_STACK_SIZE 4096
#define GPTC_TC_THREAD_STACK_RESERVED_SIZE (32 + sizeof(struct pt_regs))
static u8 tc_thread_stack[GPTC_TC_THREAD_STACK_SIZE] __aligned(16);
static u8 tc_thread_gp[GPTC_TC_THREAD_STACK_SIZE] __aligned(16);
static void tc_thread(u32 arg0, u32 arg1)
{
u32 mask, yqmask;
struct gptc_timer *timer = (struct gptc_timer *)arg0;
/* init the yq mask */
mask = (1 << timer->yield_pin);
yqmask = read_c0_yqmask();
yqmask |= mask;
write_c0_yqmask(yqmask);
ehb();
while (1) {
mips_mt_yield(mask);
if (gptc_irq_read(timer)) {
gptc_irq_ack(timer);
gic_clear_edge(timer->irq);
/* Do the call back stuff */
if (timer->call_back)
timer->call_back(timer->call_back_param);
}
}
}
static irqreturn_t tc_dummy_isr(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
int gptc_ht_yield_init(struct gptc_ht_yield *param, void *call_back,
void *call_back_param)
{
struct gptc_timer *timer;
int tc_num, cpu;
unsigned long cycles, interval;
struct TC_launch_t tc_launch;
int ret;
if (!param || !call_back ||
param->interval == 0) {
pr_err("Bad parameter.\n");
return -EINVAL;
}
list_for_each_entry(timer, &gptc_ht_yield_list, ht_yield) {
if (!timer->used) {
cpu = smp_processor_id();
gptc_per_timer_init(timer);
timer->used = true;
gptc_get(timer->gptc);
timer->call_back = call_back;
timer->call_back_param = call_back_param;