Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,20 @@
config SOC_TMS320C6455
bool "TMS320C6455"
default n
config SOC_TMS320C6457
bool "TMS320C6457"
default n
config SOC_TMS320C6472
bool "TMS320C6472"
default n
config SOC_TMS320C6474
bool "TMS320C6474"
default n
config SOC_TMS320C6678
bool "TMS320C6678"
default n

View file

@ -0,0 +1,12 @@
#
# Makefile for arch/c6x/platforms
#
# Copyright 2010, 2011 Texas Instruments Incorporated
#
obj-y = platform.o cache.o megamod-pic.o pll.o plldata.o timer64.o
obj-y += dscr.o
# SoC objects
obj-$(CONFIG_SOC_TMS320C6455) += emif.o
obj-$(CONFIG_SOC_TMS320C6457) += emif.o

445
arch/c6x/platforms/cache.c Normal file
View file

@ -0,0 +1,445 @@
/*
* Copyright (C) 2011 Texas Instruments Incorporated
* Author: Mark Salter <msalter@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include <asm/cache.h>
#include <asm/soc.h>
/*
* Internal Memory Control Registers for caches
*/
#define IMCR_CCFG 0x0000
#define IMCR_L1PCFG 0x0020
#define IMCR_L1PCC 0x0024
#define IMCR_L1DCFG 0x0040
#define IMCR_L1DCC 0x0044
#define IMCR_L2ALLOC0 0x2000
#define IMCR_L2ALLOC1 0x2004
#define IMCR_L2ALLOC2 0x2008
#define IMCR_L2ALLOC3 0x200c
#define IMCR_L2WBAR 0x4000
#define IMCR_L2WWC 0x4004
#define IMCR_L2WIBAR 0x4010
#define IMCR_L2WIWC 0x4014
#define IMCR_L2IBAR 0x4018
#define IMCR_L2IWC 0x401c
#define IMCR_L1PIBAR 0x4020
#define IMCR_L1PIWC 0x4024
#define IMCR_L1DWIBAR 0x4030
#define IMCR_L1DWIWC 0x4034
#define IMCR_L1DWBAR 0x4040
#define IMCR_L1DWWC 0x4044
#define IMCR_L1DIBAR 0x4048
#define IMCR_L1DIWC 0x404c
#define IMCR_L2WB 0x5000
#define IMCR_L2WBINV 0x5004
#define IMCR_L2INV 0x5008
#define IMCR_L1PINV 0x5028
#define IMCR_L1DWB 0x5040
#define IMCR_L1DWBINV 0x5044
#define IMCR_L1DINV 0x5048
#define IMCR_MAR_BASE 0x8000
#define IMCR_MAR96_111 0x8180
#define IMCR_MAR128_191 0x8200
#define IMCR_MAR224_239 0x8380
#define IMCR_L2MPFAR 0xa000
#define IMCR_L2MPFSR 0xa004
#define IMCR_L2MPFCR 0xa008
#define IMCR_L2MPLK0 0xa100
#define IMCR_L2MPLK1 0xa104
#define IMCR_L2MPLK2 0xa108
#define IMCR_L2MPLK3 0xa10c
#define IMCR_L2MPLKCMD 0xa110
#define IMCR_L2MPLKSTAT 0xa114
#define IMCR_L2MPPA_BASE 0xa200
#define IMCR_L1PMPFAR 0xa400
#define IMCR_L1PMPFSR 0xa404
#define IMCR_L1PMPFCR 0xa408
#define IMCR_L1PMPLK0 0xa500
#define IMCR_L1PMPLK1 0xa504
#define IMCR_L1PMPLK2 0xa508
#define IMCR_L1PMPLK3 0xa50c
#define IMCR_L1PMPLKCMD 0xa510
#define IMCR_L1PMPLKSTAT 0xa514
#define IMCR_L1PMPPA_BASE 0xa600
#define IMCR_L1DMPFAR 0xac00
#define IMCR_L1DMPFSR 0xac04
#define IMCR_L1DMPFCR 0xac08
#define IMCR_L1DMPLK0 0xad00
#define IMCR_L1DMPLK1 0xad04
#define IMCR_L1DMPLK2 0xad08
#define IMCR_L1DMPLK3 0xad0c
#define IMCR_L1DMPLKCMD 0xad10
#define IMCR_L1DMPLKSTAT 0xad14
#define IMCR_L1DMPPA_BASE 0xae00
#define IMCR_L2PDWAKE0 0xc040
#define IMCR_L2PDWAKE1 0xc044
#define IMCR_L2PDSLEEP0 0xc050
#define IMCR_L2PDSLEEP1 0xc054
#define IMCR_L2PDSTAT0 0xc060
#define IMCR_L2PDSTAT1 0xc064
/*
* CCFG register values and bits
*/
#define L2MODE_0K_CACHE 0x0
#define L2MODE_32K_CACHE 0x1
#define L2MODE_64K_CACHE 0x2
#define L2MODE_128K_CACHE 0x3
#define L2MODE_256K_CACHE 0x7
#define L2PRIO_URGENT 0x0
#define L2PRIO_HIGH 0x1
#define L2PRIO_MEDIUM 0x2
#define L2PRIO_LOW 0x3
#define CCFG_ID 0x100 /* Invalidate L1P bit */
#define CCFG_IP 0x200 /* Invalidate L1D bit */
static void __iomem *cache_base;
/*
* L1 & L2 caches generic functions
*/
#define imcr_get(reg) soc_readl(cache_base + (reg))
#define imcr_set(reg, value) \
do { \
soc_writel((value), cache_base + (reg)); \
soc_readl(cache_base + (reg)); \
} while (0)
static void cache_block_operation_wait(unsigned int wc_reg)
{
/* Wait for completion */
while (imcr_get(wc_reg))
cpu_relax();
}
static DEFINE_SPINLOCK(cache_lock);
/*
* Generic function to perform a block cache operation as
* invalidate or writeback/invalidate
*/
static void cache_block_operation(unsigned int *start,
unsigned int *end,
unsigned int bar_reg,
unsigned int wc_reg)
{
unsigned long flags;
unsigned int wcnt =
(L2_CACHE_ALIGN_CNT((unsigned int) end)
- L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
unsigned int wc = 0;
for (; wcnt; wcnt -= wc, start += wc) {
loop:
spin_lock_irqsave(&cache_lock, flags);
/*
* If another cache operation is occuring
*/
if (unlikely(imcr_get(wc_reg))) {
spin_unlock_irqrestore(&cache_lock, flags);
/* Wait for previous operation completion */
cache_block_operation_wait(wc_reg);
/* Try again */
goto loop;
}
imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
if (wcnt > 0xffff)
wc = 0xffff;
else
wc = wcnt;
/* Set word count value in the WC register */
imcr_set(wc_reg, wc & 0xffff);
spin_unlock_irqrestore(&cache_lock, flags);
/* Wait for completion */
cache_block_operation_wait(wc_reg);
}
}
static void cache_block_operation_nowait(unsigned int *start,
unsigned int *end,
unsigned int bar_reg,
unsigned int wc_reg)
{
unsigned long flags;
unsigned int wcnt =
(L2_CACHE_ALIGN_CNT((unsigned int) end)
- L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
unsigned int wc = 0;
for (; wcnt; wcnt -= wc, start += wc) {
spin_lock_irqsave(&cache_lock, flags);
imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
if (wcnt > 0xffff)
wc = 0xffff;
else
wc = wcnt;
/* Set word count value in the WC register */
imcr_set(wc_reg, wc & 0xffff);
spin_unlock_irqrestore(&cache_lock, flags);
/* Don't wait for completion on last cache operation */
if (wcnt > 0xffff)
cache_block_operation_wait(wc_reg);
}
}
/*
* L1 caches management
*/
/*
* Disable L1 caches
*/
void L1_cache_off(void)
{
unsigned int dummy;
imcr_set(IMCR_L1PCFG, 0);
dummy = imcr_get(IMCR_L1PCFG);
imcr_set(IMCR_L1DCFG, 0);
dummy = imcr_get(IMCR_L1DCFG);
}
/*
* Enable L1 caches
*/
void L1_cache_on(void)
{
unsigned int dummy;
imcr_set(IMCR_L1PCFG, 7);
dummy = imcr_get(IMCR_L1PCFG);
imcr_set(IMCR_L1DCFG, 7);
dummy = imcr_get(IMCR_L1DCFG);
}
/*
* L1P global-invalidate all
*/
void L1P_cache_global_invalidate(void)
{
unsigned int set = 1;
imcr_set(IMCR_L1PINV, set);
while (imcr_get(IMCR_L1PINV) & 1)
cpu_relax();
}
/*
* L1D global-invalidate all
*
* Warning: this operation causes all updated data in L1D to
* be discarded rather than written back to the lower levels of
* memory
*/
void L1D_cache_global_invalidate(void)
{
unsigned int set = 1;
imcr_set(IMCR_L1DINV, set);
while (imcr_get(IMCR_L1DINV) & 1)
cpu_relax();
}
void L1D_cache_global_writeback(void)
{
unsigned int set = 1;
imcr_set(IMCR_L1DWB, set);
while (imcr_get(IMCR_L1DWB) & 1)
cpu_relax();
}
void L1D_cache_global_writeback_invalidate(void)
{
unsigned int set = 1;
imcr_set(IMCR_L1DWBINV, set);
while (imcr_get(IMCR_L1DWBINV) & 1)
cpu_relax();
}
/*
* L2 caches management
*/
/*
* Set L2 operation mode
*/
void L2_cache_set_mode(unsigned int mode)
{
unsigned int ccfg = imcr_get(IMCR_CCFG);
/* Clear and set the L2MODE bits in CCFG */
ccfg &= ~7;
ccfg |= (mode & 7);
imcr_set(IMCR_CCFG, ccfg);
ccfg = imcr_get(IMCR_CCFG);
}
/*
* L2 global-writeback and global-invalidate all
*/
void L2_cache_global_writeback_invalidate(void)
{
imcr_set(IMCR_L2WBINV, 1);
while (imcr_get(IMCR_L2WBINV))
cpu_relax();
}
/*
* L2 global-writeback all
*/
void L2_cache_global_writeback(void)
{
imcr_set(IMCR_L2WB, 1);
while (imcr_get(IMCR_L2WB))
cpu_relax();
}
/*
* Cacheability controls
*/
void enable_caching(unsigned long start, unsigned long end)
{
unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
for (; mar <= mar_e; mar += 4)
imcr_set(mar, imcr_get(mar) | 1);
}
void disable_caching(unsigned long start, unsigned long end)
{
unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
for (; mar <= mar_e; mar += 4)
imcr_set(mar, imcr_get(mar) & ~1);
}
/*
* L1 block operations
*/
void L1P_cache_block_invalidate(unsigned int start, unsigned int end)
{
cache_block_operation((unsigned int *) start,
(unsigned int *) end,
IMCR_L1PIBAR, IMCR_L1PIWC);
}
void L1D_cache_block_invalidate(unsigned int start, unsigned int end)
{
cache_block_operation((unsigned int *) start,
(unsigned int *) end,
IMCR_L1DIBAR, IMCR_L1DIWC);
}
void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
{
cache_block_operation((unsigned int *) start,
(unsigned int *) end,
IMCR_L1DWIBAR, IMCR_L1DWIWC);
}
void L1D_cache_block_writeback(unsigned int start, unsigned int end)
{
cache_block_operation((unsigned int *) start,
(unsigned int *) end,
IMCR_L1DWBAR, IMCR_L1DWWC);
}
/*
* L2 block operations
*/
void L2_cache_block_invalidate(unsigned int start, unsigned int end)
{
cache_block_operation((unsigned int *) start,
(unsigned int *) end,
IMCR_L2IBAR, IMCR_L2IWC);
}
void L2_cache_block_writeback(unsigned int start, unsigned int end)
{
cache_block_operation((unsigned int *) start,
(unsigned int *) end,
IMCR_L2WBAR, IMCR_L2WWC);
}
void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
{
cache_block_operation((unsigned int *) start,
(unsigned int *) end,
IMCR_L2WIBAR, IMCR_L2WIWC);
}
void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end)
{
cache_block_operation_nowait((unsigned int *) start,
(unsigned int *) end,
IMCR_L2IBAR, IMCR_L2IWC);
}
void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end)
{
cache_block_operation_nowait((unsigned int *) start,
(unsigned int *) end,
IMCR_L2WBAR, IMCR_L2WWC);
}
void L2_cache_block_writeback_invalidate_nowait(unsigned int start,
unsigned int end)
{
cache_block_operation_nowait((unsigned int *) start,
(unsigned int *) end,
IMCR_L2WIBAR, IMCR_L2WIWC);
}
/*
* L1 and L2 caches configuration
*/
void __init c6x_cache_init(void)
{
struct device_node *node;
node = of_find_compatible_node(NULL, NULL, "ti,c64x+cache");
if (!node)
return;
cache_base = of_iomap(node, 0);
of_node_put(node);
if (!cache_base)
return;
/* Set L2 caches on the the whole L2 SRAM memory */
L2_cache_set_mode(L2MODE_SIZE);
/* Enable L1 */
L1_cache_on();
}

598
arch/c6x/platforms/dscr.c Normal file
View file

@ -0,0 +1,598 @@
/*
* Device State Control Registers driver
*
* Copyright (C) 2011 Texas Instruments Incorporated
* Author: Mark Salter <msalter@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* The Device State Control Registers (DSCR) provide SoC level control over
* a number of peripherals. Details vary considerably among the various SoC
* parts. In general, the DSCR block will provide one or more configuration
* registers often protected by a lock register. One or more key values must
* be written to a lock register in order to unlock the configuration register.
* The configuration register may be used to enable (and disable in some
* cases) SoC pin drivers, peripheral clock sources (internal or pin), etc.
* In some cases, a configuration register is write once or the individual
* bits are write once. That is, you may be able to enable a device, but
* will not be able to disable it.
*
* In addition to device configuration, the DSCR block may provide registers
* which are used to reset SoC peripherals, provide device ID information,
* provide MAC addresses, and other miscellaneous functions.
*/
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/soc.h>
#include <asm/dscr.h>
#define MAX_DEVSTATE_IDS 32
#define MAX_DEVCTL_REGS 8
#define MAX_DEVSTAT_REGS 8
#define MAX_LOCKED_REGS 4
#define MAX_SOC_EMACS 2
struct rmii_reset_reg {
u32 reg;
u32 mask;
};
/*
* Some registerd may be locked. In order to write to these
* registers, the key value must first be written to the lockreg.
*/
struct locked_reg {
u32 reg; /* offset from base */
u32 lockreg; /* offset from base */
u32 key; /* unlock key */
};
/*
* This describes a contiguous area of like control bits used to enable/disable
* SoC devices. Each controllable device is given an ID which is used by the
* individual device drivers to control the device state. These IDs start at
* zero and are assigned sequentially to the control bitfield ranges described
* by this structure.
*/
struct devstate_ctl_reg {
u32 reg; /* register holding the control bits */
u8 start_id; /* start id of this range */
u8 num_ids; /* number of devices in this range */
u8 enable_only; /* bits are write-once to enable only */
u8 enable; /* value used to enable device */
u8 disable; /* value used to disable device */
u8 shift; /* starting (rightmost) bit in range */
u8 nbits; /* number of bits per device */
};
/*
* This describes a region of status bits indicating the state of
* various devices. This is used internally to wait for status
* change completion when enabling/disabling a device. Status is
* optional and not all device controls will have a corresponding
* status.
*/
struct devstate_stat_reg {
u32 reg; /* register holding the status bits */
u8 start_id; /* start id of this range */
u8 num_ids; /* number of devices in this range */
u8 enable; /* value indicating enabled state */
u8 disable; /* value indicating disabled state */
u8 shift; /* starting (rightmost) bit in range */
u8 nbits; /* number of bits per device */
};
struct devstate_info {
struct devstate_ctl_reg *ctl;
struct devstate_stat_reg *stat;
};
/* These are callbacks to SOC-specific code. */
struct dscr_ops {
void (*init)(struct device_node *node);
};
struct dscr_regs {
spinlock_t lock;
void __iomem *base;
u32 kick_reg[2];
u32 kick_key[2];
struct locked_reg locked[MAX_LOCKED_REGS];
struct devstate_info devstate_info[MAX_DEVSTATE_IDS];
struct rmii_reset_reg rmii_resets[MAX_SOC_EMACS];
struct devstate_ctl_reg devctl[MAX_DEVCTL_REGS];
struct devstate_stat_reg devstat[MAX_DEVSTAT_REGS];
};
static struct dscr_regs dscr;
static struct locked_reg *find_locked_reg(u32 reg)
{
int i;
for (i = 0; i < MAX_LOCKED_REGS; i++)
if (dscr.locked[i].key && reg == dscr.locked[i].reg)
return &dscr.locked[i];
return NULL;
}
/*
* Write to a register with one lock
*/
static void dscr_write_locked1(u32 reg, u32 val,
u32 lock, u32 key)
{
void __iomem *reg_addr = dscr.base + reg;
void __iomem *lock_addr = dscr.base + lock;
/*
* For some registers, the lock is relocked after a short number
* of cycles. We have to put the lock write and register write in
* the same fetch packet to meet this timing. The .align ensures
* the two stw instructions are in the same fetch packet.
*/
asm volatile ("b .s2 0f\n"
"nop 5\n"
" .align 5\n"
"0:\n"
"stw .D1T2 %3,*%2\n"
"stw .D1T2 %1,*%0\n"
:
: "a"(reg_addr), "b"(val), "a"(lock_addr), "b"(key)
);
/* in case the hw doesn't reset the lock */
soc_writel(0, lock_addr);
}
/*
* Write to a register protected by two lock registers
*/
static void dscr_write_locked2(u32 reg, u32 val,
u32 lock0, u32 key0,
u32 lock1, u32 key1)
{
soc_writel(key0, dscr.base + lock0);
soc_writel(key1, dscr.base + lock1);
soc_writel(val, dscr.base + reg);
soc_writel(0, dscr.base + lock0);
soc_writel(0, dscr.base + lock1);
}
static void dscr_write(u32 reg, u32 val)
{
struct locked_reg *lock;
lock = find_locked_reg(reg);
if (lock)
dscr_write_locked1(reg, val, lock->lockreg, lock->key);
else if (dscr.kick_key[0])
dscr_write_locked2(reg, val, dscr.kick_reg[0], dscr.kick_key[0],
dscr.kick_reg[1], dscr.kick_key[1]);
else
soc_writel(val, dscr.base + reg);
}
/*
* Drivers can use this interface to enable/disable SoC IP blocks.
*/
void dscr_set_devstate(int id, enum dscr_devstate_t state)
{
struct devstate_ctl_reg *ctl;
struct devstate_stat_reg *stat;
struct devstate_info *info;
u32 ctl_val, val;
int ctl_shift, ctl_mask;
unsigned long flags;
if (!dscr.base)
return;
if (id < 0 || id >= MAX_DEVSTATE_IDS)
return;
info = &dscr.devstate_info[id];
ctl = info->ctl;
stat = info->stat;
if (ctl == NULL)
return;
ctl_shift = ctl->shift + ctl->nbits * (id - ctl->start_id);
ctl_mask = ((1 << ctl->nbits) - 1) << ctl_shift;
switch (state) {
case DSCR_DEVSTATE_ENABLED:
ctl_val = ctl->enable << ctl_shift;
break;
case DSCR_DEVSTATE_DISABLED:
if (ctl->enable_only)
return;
ctl_val = ctl->disable << ctl_shift;
break;
default:
return;
}
spin_lock_irqsave(&dscr.lock, flags);
val = soc_readl(dscr.base + ctl->reg);
val &= ~ctl_mask;
val |= ctl_val;
dscr_write(ctl->reg, val);
spin_unlock_irqrestore(&dscr.lock, flags);
if (!stat)
return;
ctl_shift = stat->shift + stat->nbits * (id - stat->start_id);
if (state == DSCR_DEVSTATE_ENABLED)
ctl_val = stat->enable;
else
ctl_val = stat->disable;
do {
val = soc_readl(dscr.base + stat->reg);
val >>= ctl_shift;
val &= ((1 << stat->nbits) - 1);
} while (val != ctl_val);
}
EXPORT_SYMBOL(dscr_set_devstate);
/*
* Drivers can use this to reset RMII module.
*/
void dscr_rmii_reset(int id, int assert)
{
struct rmii_reset_reg *r;
unsigned long flags;
u32 val;
if (id < 0 || id >= MAX_SOC_EMACS)
return;
r = &dscr.rmii_resets[id];
if (r->mask == 0)
return;
spin_lock_irqsave(&dscr.lock, flags);
val = soc_readl(dscr.base + r->reg);
if (assert)
dscr_write(r->reg, val | r->mask);
else
dscr_write(r->reg, val & ~(r->mask));
spin_unlock_irqrestore(&dscr.lock, flags);
}
EXPORT_SYMBOL(dscr_rmii_reset);
static void __init dscr_parse_devstat(struct device_node *node,
void __iomem *base)
{
u32 val;
int err;
err = of_property_read_u32_array(node, "ti,dscr-devstat", &val, 1);
if (!err)
c6x_devstat = soc_readl(base + val);
printk(KERN_INFO "DEVSTAT: %08x\n", c6x_devstat);
}
static void __init dscr_parse_silicon_rev(struct device_node *node,
void __iomem *base)
{
u32 vals[3];
int err;
err = of_property_read_u32_array(node, "ti,dscr-silicon-rev", vals, 3);
if (!err) {
c6x_silicon_rev = soc_readl(base + vals[0]);
c6x_silicon_rev >>= vals[1];
c6x_silicon_rev &= vals[2];
}
}
/*
* Some SoCs will have a pair of fuse registers which hold
* an ethernet MAC address. The "ti,dscr-mac-fuse-regs"
* property is a mapping from fuse register bytes to MAC
* address bytes. The expected format is:
*
* ti,dscr-mac-fuse-regs = <reg0 b3 b2 b1 b0
* reg1 b3 b2 b1 b0>
*
* reg0 and reg1 are the offsets of the two fuse registers.
* b3-b0 positionally represent bytes within the fuse register.
* b3 is the most significant byte and b0 is the least.
* Allowable values for b3-b0 are:
*
* 0 = fuse register byte not used in MAC address
* 1-6 = index+1 into c6x_fuse_mac[]
*/
static void __init dscr_parse_mac_fuse(struct device_node *node,
void __iomem *base)
{
u32 vals[10], fuse;
int f, i, j, err;
err = of_property_read_u32_array(node, "ti,dscr-mac-fuse-regs",
vals, 10);
if (err)
return;
for (f = 0; f < 2; f++) {
fuse = soc_readl(base + vals[f * 5]);
for (j = (f * 5) + 1, i = 24; i >= 0; i -= 8, j++)
if (vals[j] && vals[j] <= 6)
c6x_fuse_mac[vals[j] - 1] = fuse >> i;
}
}
static void __init dscr_parse_rmii_resets(struct device_node *node,
void __iomem *base)
{
const __be32 *p;
int i, size;
/* look for RMII reset registers */
p = of_get_property(node, "ti,dscr-rmii-resets", &size);
if (p) {
/* parse all the reg/mask pairs we can handle */
size /= (sizeof(*p) * 2);
if (size > MAX_SOC_EMACS)
size = MAX_SOC_EMACS;
for (i = 0; i < size; i++) {
dscr.rmii_resets[i].reg = be32_to_cpup(p++);
dscr.rmii_resets[i].mask = be32_to_cpup(p++);
}
}
}
static void __init dscr_parse_privperm(struct device_node *node,
void __iomem *base)
{
u32 vals[2];
int err;
err = of_property_read_u32_array(node, "ti,dscr-privperm", vals, 2);
if (err)
return;
dscr_write(vals[0], vals[1]);
}
/*
* SoCs may have "locked" DSCR registers which can only be written
* to only after writing a key value to a lock registers. These
* regisers can be described with the "ti,dscr-locked-regs" property.
* This property provides a list of register descriptions with each
* description consisting of three values.
*
* ti,dscr-locked-regs = <reg0 lockreg0 key0
* ...
* regN lockregN keyN>;
*
* reg is the offset of the locked register
* lockreg is the offset of the lock register
* key is the unlock key written to lockreg
*
*/
static void __init dscr_parse_locked_regs(struct device_node *node,
void __iomem *base)
{
struct locked_reg *r;
const __be32 *p;
int i, size;
p = of_get_property(node, "ti,dscr-locked-regs", &size);
if (p) {
/* parse all the register descriptions we can handle */
size /= (sizeof(*p) * 3);
if (size > MAX_LOCKED_REGS)
size = MAX_LOCKED_REGS;
for (i = 0; i < size; i++) {
r = &dscr.locked[i];
r->reg = be32_to_cpup(p++);
r->lockreg = be32_to_cpup(p++);
r->key = be32_to_cpup(p++);
}
}
}
/*
* SoCs may have DSCR registers which are only write enabled after
* writing specific key values to two registers. The two key registers
* and the key values can be parsed from a "ti,dscr-kick-regs"
* propety with the following layout:
*
* ti,dscr-kick-regs = <kickreg0 key0 kickreg1 key1>
*
* kickreg is the offset of the "kick" register
* key is the value which unlocks writing for protected regs
*/
static void __init dscr_parse_kick_regs(struct device_node *node,
void __iomem *base)
{
u32 vals[4];
int err;
err = of_property_read_u32_array(node, "ti,dscr-kick-regs", vals, 4);
if (!err) {
dscr.kick_reg[0] = vals[0];
dscr.kick_key[0] = vals[1];
dscr.kick_reg[1] = vals[2];
dscr.kick_key[1] = vals[3];
}
}
/*
* SoCs may provide controls to enable/disable individual IP blocks. These
* controls in the DSCR usually control pin drivers but also may control
* clocking and or resets. The device tree is used to describe the bitfields
* in registers used to control device state. The number of bits and their
* values may vary even within the same register.
*
* The layout of these bitfields is described by the ti,dscr-devstate-ctl-regs
* property. This property is a list where each element describes a contiguous
* range of control fields with like properties. Each element of the list
* consists of 7 cells with the following values:
*
* start_id num_ids reg enable disable start_bit nbits
*
* start_id is device id for the first device control in the range
* num_ids is the number of device controls in the range
* reg is the offset of the register holding the control bits
* enable is the value to enable a device
* disable is the value to disable a device (0xffffffff if cannot disable)
* start_bit is the bit number of the first bit in the range
* nbits is the number of bits per device control
*/
static void __init dscr_parse_devstate_ctl_regs(struct device_node *node,
void __iomem *base)
{
struct devstate_ctl_reg *r;
const __be32 *p;
int i, j, size;
p = of_get_property(node, "ti,dscr-devstate-ctl-regs", &size);
if (p) {
/* parse all the ranges we can handle */
size /= (sizeof(*p) * 7);
if (size > MAX_DEVCTL_REGS)
size = MAX_DEVCTL_REGS;
for (i = 0; i < size; i++) {
r = &dscr.devctl[i];
r->start_id = be32_to_cpup(p++);
r->num_ids = be32_to_cpup(p++);
r->reg = be32_to_cpup(p++);
r->enable = be32_to_cpup(p++);
r->disable = be32_to_cpup(p++);
if (r->disable == 0xffffffff)
r->enable_only = 1;
r->shift = be32_to_cpup(p++);
r->nbits = be32_to_cpup(p++);
for (j = r->start_id;
j < (r->start_id + r->num_ids);
j++)
dscr.devstate_info[j].ctl = r;
}
}
}
/*
* SoCs may provide status registers indicating the state (enabled/disabled) of
* devices on the SoC. The device tree is used to describe the bitfields in
* registers used to provide device status. The number of bits and their
* values used to provide status may vary even within the same register.
*
* The layout of these bitfields is described by the ti,dscr-devstate-stat-regs
* property. This property is a list where each element describes a contiguous
* range of status fields with like properties. Each element of the list
* consists of 7 cells with the following values:
*
* start_id num_ids reg enable disable start_bit nbits
*
* start_id is device id for the first device status in the range
* num_ids is the number of devices covered by the range
* reg is the offset of the register holding the status bits
* enable is the value indicating device is enabled
* disable is the value indicating device is disabled
* start_bit is the bit number of the first bit in the range
* nbits is the number of bits per device status
*/
static void __init dscr_parse_devstate_stat_regs(struct device_node *node,
void __iomem *base)
{
struct devstate_stat_reg *r;
const __be32 *p;
int i, j, size;
p = of_get_property(node, "ti,dscr-devstate-stat-regs", &size);
if (p) {
/* parse all the ranges we can handle */
size /= (sizeof(*p) * 7);
if (size > MAX_DEVSTAT_REGS)
size = MAX_DEVSTAT_REGS;
for (i = 0; i < size; i++) {
r = &dscr.devstat[i];
r->start_id = be32_to_cpup(p++);
r->num_ids = be32_to_cpup(p++);
r->reg = be32_to_cpup(p++);
r->enable = be32_to_cpup(p++);
r->disable = be32_to_cpup(p++);
r->shift = be32_to_cpup(p++);
r->nbits = be32_to_cpup(p++);
for (j = r->start_id;
j < (r->start_id + r->num_ids);
j++)
dscr.devstate_info[j].stat = r;
}
}
}
static struct of_device_id dscr_ids[] __initdata = {
{ .compatible = "ti,c64x+dscr" },
{}
};
/*
* Probe for DSCR area.
*
* This has to be done early on in case timer or interrupt controller
* needs something. e.g. On C6455 SoC, timer must be enabled through
* DSCR before it is functional.
*/
void __init dscr_probe(void)
{
struct device_node *node;
void __iomem *base;
spin_lock_init(&dscr.lock);
node = of_find_matching_node(NULL, dscr_ids);
if (!node)
return;
base = of_iomap(node, 0);
if (!base) {
of_node_put(node);
return;
}
dscr.base = base;
dscr_parse_devstat(node, base);
dscr_parse_silicon_rev(node, base);
dscr_parse_mac_fuse(node, base);
dscr_parse_rmii_resets(node, base);
dscr_parse_locked_regs(node, base);
dscr_parse_kick_regs(node, base);
dscr_parse_devstate_ctl_regs(node, base);
dscr_parse_devstate_stat_regs(node, base);
dscr_parse_privperm(node, base);
}

87
arch/c6x/platforms/emif.c Normal file
View file

@ -0,0 +1,87 @@
/*
* External Memory Interface
*
* Copyright (C) 2011 Texas Instruments Incorporated
* Author: Mark Salter <msalter@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include <asm/soc.h>
#include <asm/dscr.h>
#define NUM_EMIFA_CHIP_ENABLES 4
struct emifa_regs {
u32 midr;
u32 stat;
u32 reserved1[6];
u32 bprio;
u32 reserved2[23];
u32 cecfg[NUM_EMIFA_CHIP_ENABLES];
u32 reserved3[4];
u32 awcc;
u32 reserved4[7];
u32 intraw;
u32 intmsk;
u32 intmskset;
u32 intmskclr;
};
static struct of_device_id emifa_match[] __initdata = {
{ .compatible = "ti,c64x+emifa" },
{}
};
/*
* Parse device tree for existence of an EMIF (External Memory Interface)
* and initialize it if found.
*/
static int __init c6x_emifa_init(void)
{
struct emifa_regs __iomem *regs;
struct device_node *node;
const __be32 *p;
u32 val;
int i, len, err;
node = of_find_matching_node(NULL, emifa_match);
if (!node)
return 0;
regs = of_iomap(node, 0);
if (!regs)
return 0;
/* look for a dscr-based enable for emifa pin buffers */
err = of_property_read_u32_array(node, "ti,dscr-dev-enable", &val, 1);
if (!err)
dscr_set_devstate(val, DSCR_DEVSTATE_ENABLED);
/* set up the chip enables */
p = of_get_property(node, "ti,emifa-ce-config", &len);
if (p) {
len /= sizeof(u32);
if (len > NUM_EMIFA_CHIP_ENABLES)
len = NUM_EMIFA_CHIP_ENABLES;
for (i = 0; i <= len; i++)
soc_writel(be32_to_cpup(&p[i]), &regs->cecfg[i]);
}
err = of_property_read_u32_array(node, "ti,emifa-burst-priority", &val, 1);
if (!err)
soc_writel(val, &regs->bprio);
err = of_property_read_u32_array(node, "ti,emifa-async-wait-control", &val, 1);
if (!err)
soc_writel(val, &regs->awcc);
iounmap(regs);
of_node_put(node);
return 0;
}
pure_initcall(c6x_emifa_init);

View file

@ -0,0 +1,346 @@
/*
* Support for C64x+ Megamodule Interrupt Controller
*
* Copyright (C) 2010, 2011 Texas Instruments Incorporated
* Contributed by: Mark Salter <msalter@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <asm/soc.h>
#include <asm/megamod-pic.h>
#define NR_COMBINERS 4
#define NR_MUX_OUTPUTS 12
#define IRQ_UNMAPPED 0xffff
/*
* Megamodule Interrupt Controller register layout
*/
struct megamod_regs {
u32 evtflag[8];
u32 evtset[8];
u32 evtclr[8];
u32 reserved0[8];
u32 evtmask[8];
u32 mevtflag[8];
u32 expmask[8];
u32 mexpflag[8];
u32 intmux_unused;
u32 intmux[7];
u32 reserved1[8];
u32 aegmux[2];
u32 reserved2[14];
u32 intxstat;
u32 intxclr;
u32 intdmask;
u32 reserved3[13];
u32 evtasrt;
};
struct megamod_pic {
struct irq_domain *irqhost;
struct megamod_regs __iomem *regs;
raw_spinlock_t lock;
/* hw mux mapping */
unsigned int output_to_irq[NR_MUX_OUTPUTS];
};
static struct megamod_pic *mm_pic;
struct megamod_cascade_data {
struct megamod_pic *pic;
int index;
};
static struct megamod_cascade_data cascade_data[NR_COMBINERS];
static void mask_megamod(struct irq_data *data)
{
struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
irq_hw_number_t src = irqd_to_hwirq(data);
u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
raw_spin_lock(&pic->lock);
soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask);
raw_spin_unlock(&pic->lock);
}
static void unmask_megamod(struct irq_data *data)
{
struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
irq_hw_number_t src = irqd_to_hwirq(data);
u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
raw_spin_lock(&pic->lock);
soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask);
raw_spin_unlock(&pic->lock);
}
static struct irq_chip megamod_chip = {
.name = "megamod",
.irq_mask = mask_megamod,
.irq_unmask = unmask_megamod,
};
static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
struct megamod_cascade_data *cascade;
struct megamod_pic *pic;
u32 events;
int n, idx;
cascade = irq_desc_get_handler_data(desc);
pic = cascade->pic;
idx = cascade->index;
while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) {
n = __ffs(events);
irq = irq_linear_revmap(pic->irqhost, idx * 32 + n);
soc_writel(1 << n, &pic->regs->evtclr[idx]);
generic_handle_irq(irq);
}
}
static int megamod_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct megamod_pic *pic = h->host_data;
int i;
/* We shouldn't see a hwirq which is muxed to core controller */
for (i = 0; i < NR_MUX_OUTPUTS; i++)
if (pic->output_to_irq[i] == hw)
return -1;
irq_set_chip_data(virq, pic);
irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq);
/* Set default irq type */
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static const struct irq_domain_ops megamod_domain_ops = {
.map = megamod_map,
.xlate = irq_domain_xlate_onecell,
};
static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
{
int index, offset;
u32 val;
if (src < 0 || src >= (NR_COMBINERS * 32)) {
pic->output_to_irq[output] = IRQ_UNMAPPED;
return;
}
/* four mappings per mux register */
index = output / 4;
offset = (output & 3) * 8;
val = soc_readl(&pic->regs->intmux[index]);
val &= ~(0xff << offset);
val |= src << offset;
soc_writel(val, &pic->regs->intmux[index]);
}
/*
* Parse the MUX mapping, if one exists.
*
* The MUX map is an array of up to 12 cells; one for each usable core priority
* interrupt. The value of a given cell is the megamodule interrupt source
* which is to me MUXed to the output corresponding to the cell position
* withing the array. The first cell in the array corresponds to priority
* 4 and the last (12th) cell corresponds to priority 15. The allowed
* values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt
* sources (0 - 3) are not allowed to be mapped through this property. They
* are handled through the "interrupts" property. This allows us to use a
* value of zero as a "do not map" placeholder.
*/
static void __init parse_priority_map(struct megamod_pic *pic,
int *mapping, int size)
{
struct device_node *np = pic->irqhost->of_node;
const __be32 *map;
int i, maplen;
u32 val;
map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen);
if (map) {
maplen /= 4;
if (maplen > size)
maplen = size;
for (i = 0; i < maplen; i++) {
val = be32_to_cpup(map);
if (val && val >= 4)
mapping[i] = val;
++map;
}
}
}
static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
{
struct megamod_pic *pic;
int i, irq;
int mapping[NR_MUX_OUTPUTS];
pr_info("Initializing C64x+ Megamodule PIC\n");
pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
if (!pic) {
pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
return NULL;
}
pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
&megamod_domain_ops, pic);
if (!pic->irqhost) {
pr_err("%s: Could not alloc host.\n", np->full_name);
goto error_free;
}
pic->irqhost->host_data = pic;
raw_spin_lock_init(&pic->lock);
pic->regs = of_iomap(np, 0);
if (!pic->regs) {
pr_err("%s: Could not map registers.\n", np->full_name);
goto error_free;
}
/* Initialize MUX map */
for (i = 0; i < ARRAY_SIZE(mapping); i++)
mapping[i] = IRQ_UNMAPPED;
parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));
/*
* We can have up to 12 interrupts cascading to the core controller.
* These cascades can be from the combined interrupt sources or for
* individual interrupt sources. The "interrupts" property only
* deals with the cascaded combined interrupts. The individual
* interrupts muxed to the core controller use the core controller
* as their interrupt parent.
*/
for (i = 0; i < NR_COMBINERS; i++) {
struct irq_data *irq_data;
irq_hw_number_t hwirq;
irq = irq_of_parse_and_map(np, i);
if (irq == NO_IRQ)
continue;
irq_data = irq_get_irq_data(irq);
if (!irq_data) {
pr_err("%s: combiner-%d no irq_data for virq %d!\n",
np->full_name, i, irq);
continue;
}
hwirq = irq_data->hwirq;
/*
* Check that device tree provided something in the range
* of the core priority interrupts (4 - 15).
*/
if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
pr_err("%s: combiner-%d core irq %ld out of range!\n",
np->full_name, i, hwirq);
continue;
}
/* record the mapping */
mapping[hwirq - 4] = i;
pr_debug("%s: combiner-%d cascading to hwirq %ld\n",
np->full_name, i, hwirq);
cascade_data[i].pic = pic;
cascade_data[i].index = i;
/* mask and clear all events in combiner */
soc_writel(~0, &pic->regs->evtmask[i]);
soc_writel(~0, &pic->regs->evtclr[i]);
irq_set_handler_data(irq, &cascade_data[i]);
irq_set_chained_handler(irq, megamod_irq_cascade);
}
/* Finally, set up the MUX registers */
for (i = 0; i < NR_MUX_OUTPUTS; i++) {
if (mapping[i] != IRQ_UNMAPPED) {
pr_debug("%s: setting mux %d to priority %d\n",
np->full_name, mapping[i], i + 4);
set_megamod_mux(pic, mapping[i], i);
}
}
return pic;
error_free:
kfree(pic);
return NULL;
}
/*
* Return next active event after ACK'ing it.
* Return -1 if no events active.
*/
static int get_exception(void)
{
int i, bit;
u32 mask;
for (i = 0; i < NR_COMBINERS; i++) {
mask = soc_readl(&mm_pic->regs->mexpflag[i]);
if (mask) {
bit = __ffs(mask);
soc_writel(1 << bit, &mm_pic->regs->evtclr[i]);
return (i * 32) + bit;
}
}
return -1;
}
static void assert_event(unsigned int val)
{
soc_writel(val, &mm_pic->regs->evtasrt);
}
void __init megamod_pic_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic");
if (!np)
return;
mm_pic = init_megamod_pic(np);
of_node_put(np);
soc_ops.get_exception = get_exception;
soc_ops.assert_event = assert_event;
return;
}

View file

@ -0,0 +1,17 @@
/*
* Copyright 2011 Texas Instruments Incorporated
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/of_platform.h>
static int __init c6x_device_probe(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
core_initcall(c6x_device_probe);

444
arch/c6x/platforms/pll.c Normal file
View file

@ -0,0 +1,444 @@
/*
* Clock and PLL control for C64x+ devices
*
* Copyright (C) 2010, 2011 Texas Instruments.
* Contributed by: Mark Salter <msalter@redhat.com>
*
* Copied heavily from arm/mach-davinci/clock.c, so:
*
* Copyright (C) 2006-2007 Texas Instruments.
* Copyright (C) 2008-2009 Deep Root Systems, LLC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/err.h>
#include <asm/clock.h>
#include <asm/soc.h>
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clockfw_lock);
static void __clk_enable(struct clk *clk)
{
if (clk->parent)
__clk_enable(clk->parent);
clk->usecount++;
}
static void __clk_disable(struct clk *clk)
{
if (WARN_ON(clk->usecount == 0))
return;
--clk->usecount;
if (clk->parent)
__clk_disable(clk->parent);
}
int clk_enable(struct clk *clk)
{
unsigned long flags;
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
spin_lock_irqsave(&clockfw_lock, flags);
__clk_enable(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
return 0;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
unsigned long flags;
if (clk == NULL || IS_ERR(clk))
return;
spin_lock_irqsave(&clockfw_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
return clk->rate;
}
EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
if (clk->round_rate)
return clk->round_rate(clk, rate);
return clk->rate;
}
EXPORT_SYMBOL(clk_round_rate);
/* Propagate rate to children */
static void propagate_rate(struct clk *root)
{
struct clk *clk;
list_for_each_entry(clk, &root->children, childnode) {
if (clk->recalc)
clk->rate = clk->recalc(clk);
propagate_rate(clk);
}
}
int clk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long flags;
int ret = -EINVAL;
if (clk == NULL || IS_ERR(clk))
return ret;
if (clk->set_rate)
ret = clk->set_rate(clk, rate);
spin_lock_irqsave(&clockfw_lock, flags);
if (ret == 0) {
if (clk->recalc)
clk->rate = clk->recalc(clk);
propagate_rate(clk);
}
spin_unlock_irqrestore(&clockfw_lock, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
int clk_set_parent(struct clk *clk, struct clk *parent)
{
unsigned long flags;
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
/* Cannot change parent on enabled clock */
if (WARN_ON(clk->usecount))
return -EINVAL;
mutex_lock(&clocks_mutex);
clk->parent = parent;
list_del_init(&clk->childnode);
list_add(&clk->childnode, &clk->parent->children);
mutex_unlock(&clocks_mutex);
spin_lock_irqsave(&clockfw_lock, flags);
if (clk->recalc)
clk->rate = clk->recalc(clk);
propagate_rate(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
return 0;
}
EXPORT_SYMBOL(clk_set_parent);
int clk_register(struct clk *clk)
{
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
if (WARN(clk->parent && !clk->parent->rate,
"CLK: %s parent %s has no rate!\n",
clk->name, clk->parent->name))
return -EINVAL;
mutex_lock(&clocks_mutex);
list_add_tail(&clk->node, &clocks);
if (clk->parent)
list_add_tail(&clk->childnode, &clk->parent->children);
mutex_unlock(&clocks_mutex);
/* If rate is already set, use it */
if (clk->rate)
return 0;
/* Else, see if there is a way to calculate it */
if (clk->recalc)
clk->rate = clk->recalc(clk);
/* Otherwise, default to parent rate */
else if (clk->parent)
clk->rate = clk->parent->rate;
return 0;
}
EXPORT_SYMBOL(clk_register);
void clk_unregister(struct clk *clk)
{
if (clk == NULL || IS_ERR(clk))
return;
mutex_lock(&clocks_mutex);
list_del(&clk->node);
list_del(&clk->childnode);
mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clk_unregister);
static u32 pll_read(struct pll_data *pll, int reg)
{
return soc_readl(pll->base + reg);
}
static unsigned long clk_sysclk_recalc(struct clk *clk)
{
u32 v, plldiv = 0;
struct pll_data *pll;
unsigned long rate = clk->rate;
if (WARN_ON(!clk->parent))
return rate;
rate = clk->parent->rate;
/* the parent must be a PLL */
if (WARN_ON(!clk->parent->pll_data))
return rate;
pll = clk->parent->pll_data;
/* If pre-PLL, source clock is before the multiplier and divider(s) */
if (clk->flags & PRE_PLL)
rate = pll->input_rate;
if (!clk->div) {
pr_debug("%s: (no divider) rate = %lu KHz\n",
clk->name, rate / 1000);
return rate;
}
if (clk->flags & FIXED_DIV_PLL) {
rate /= clk->div;
pr_debug("%s: (fixed divide by %d) rate = %lu KHz\n",
clk->name, clk->div, rate / 1000);
return rate;
}
v = pll_read(pll, clk->div);
if (v & PLLDIV_EN)
plldiv = (v & PLLDIV_RATIO_MASK) + 1;
if (plldiv == 0)
plldiv = 1;
rate /= plldiv;
pr_debug("%s: (divide by %d) rate = %lu KHz\n",
clk->name, plldiv, rate / 1000);
return rate;
}
static unsigned long clk_leafclk_recalc(struct clk *clk)
{
if (WARN_ON(!clk->parent))
return clk->rate;
pr_debug("%s: (parent %s) rate = %lu KHz\n",
clk->name, clk->parent->name, clk->parent->rate / 1000);
return clk->parent->rate;
}
static unsigned long clk_pllclk_recalc(struct clk *clk)
{
u32 ctrl, mult = 0, prediv = 0, postdiv = 0;
u8 bypass;
struct pll_data *pll = clk->pll_data;
unsigned long rate = clk->rate;
if (clk->flags & FIXED_RATE_PLL)
return rate;
ctrl = pll_read(pll, PLLCTL);
rate = pll->input_rate = clk->parent->rate;
if (ctrl & PLLCTL_PLLEN)
bypass = 0;
else
bypass = 1;
if (pll->flags & PLL_HAS_MUL) {
mult = pll_read(pll, PLLM);
mult = (mult & PLLM_PLLM_MASK) + 1;
}
if (pll->flags & PLL_HAS_PRE) {
prediv = pll_read(pll, PLLPRE);
if (prediv & PLLDIV_EN)
prediv = (prediv & PLLDIV_RATIO_MASK) + 1;
else
prediv = 0;
}
if (pll->flags & PLL_HAS_POST) {
postdiv = pll_read(pll, PLLPOST);
if (postdiv & PLLDIV_EN)
postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1;
else
postdiv = 1;
}
if (!bypass) {
if (prediv)
rate /= prediv;
if (mult)
rate *= mult;
if (postdiv)
rate /= postdiv;
pr_debug("PLL%d: input = %luMHz, pre[%d] mul[%d] post[%d] "
"--> %luMHz output.\n",
pll->num, clk->parent->rate / 1000000,
prediv, mult, postdiv, rate / 1000000);
} else
pr_debug("PLL%d: input = %luMHz, bypass mode.\n",
pll->num, clk->parent->rate / 1000000);
return rate;
}
static void __init __init_clk(struct clk *clk)
{
INIT_LIST_HEAD(&clk->node);
INIT_LIST_HEAD(&clk->children);
INIT_LIST_HEAD(&clk->childnode);
if (!clk->recalc) {
/* Check if clock is a PLL */
if (clk->pll_data)
clk->recalc = clk_pllclk_recalc;
/* Else, if it is a PLL-derived clock */
else if (clk->flags & CLK_PLL)
clk->recalc = clk_sysclk_recalc;
/* Otherwise, it is a leaf clock (PSC clock) */
else if (clk->parent)
clk->recalc = clk_leafclk_recalc;
}
}
void __init c6x_clks_init(struct clk_lookup *clocks)
{
struct clk_lookup *c;
struct clk *clk;
size_t num_clocks = 0;
for (c = clocks; c->clk; c++) {
clk = c->clk;
__init_clk(clk);
clk_register(clk);
num_clocks++;
/* Turn on clocks that Linux doesn't otherwise manage */
if (clk->flags & ALWAYS_ENABLED)
clk_enable(clk);
}
clkdev_add_table(clocks, num_clocks);
}
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#define CLKNAME_MAX 10 /* longest clock name */
#define NEST_DELTA 2
#define NEST_MAX 4
static void
dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
{
char *state;
char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
struct clk *clk;
unsigned i;
if (parent->flags & CLK_PLL)
state = "pll";
else
state = "";
/* <nest spaces> name <pad to end> */
memset(buf, ' ', sizeof(buf) - 1);
buf[sizeof(buf) - 1] = 0;
i = strlen(parent->name);
memcpy(buf + nest, parent->name,
min(i, (unsigned)(sizeof(buf) - 1 - nest)));
seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
buf, parent->usecount, state, clk_get_rate(parent));
/* REVISIT show device associations too */
/* cost is now small, but not linear... */
list_for_each_entry(clk, &parent->children, childnode) {
dump_clock(s, nest + NEST_DELTA, clk);
}
}
static int c6x_ck_show(struct seq_file *m, void *v)
{
struct clk *clk;
/*
* Show clock tree; We trust nonzero usecounts equate to PSC enables...
*/
mutex_lock(&clocks_mutex);
list_for_each_entry(clk, &clocks, node)
if (!clk->parent)
dump_clock(m, 0, clk);
mutex_unlock(&clocks_mutex);
return 0;
}
static int c6x_ck_open(struct inode *inode, struct file *file)
{
return single_open(file, c6x_ck_show, NULL);
}
static const struct file_operations c6x_ck_operations = {
.open = c6x_ck_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init c6x_clk_debugfs_init(void)
{
debugfs_create_file("c6x_clocks", S_IFREG | S_IRUGO, NULL, NULL,
&c6x_ck_operations);
return 0;
}
device_initcall(c6x_clk_debugfs_init);
#endif /* CONFIG_DEBUG_FS */

View file

@ -0,0 +1,469 @@
/*
* Port on Texas Instruments TMS320C6x architecture
*
* Copyright (C) 2011 Texas Instruments Incorporated
* Author: Mark Salter <msalter@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/clock.h>
#include <asm/setup.h>
#include <asm/irq.h>
/*
* Common SoC clock support.
*/
/* Default input for PLL1 */
struct clk clkin1 = {
.name = "clkin1",
.node = LIST_HEAD_INIT(clkin1.node),
.children = LIST_HEAD_INIT(clkin1.children),
.childnode = LIST_HEAD_INIT(clkin1.childnode),
};
struct pll_data c6x_soc_pll1 = {
.num = 1,
.sysclks = {
{
.name = "pll1",
.parent = &clkin1,
.pll_data = &c6x_soc_pll1,
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk1",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk2",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk3",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk4",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk5",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk6",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk7",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk8",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk9",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk10",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk11",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk12",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk13",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk14",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk15",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
{
.name = "pll1_sysclk16",
.parent = &c6x_soc_pll1.sysclks[0],
.flags = CLK_PLL,
},
},
};
/* CPU core clock */
struct clk c6x_core_clk = {
.name = "core",
};
/* miscellaneous IO clocks */
struct clk c6x_i2c_clk = {
.name = "i2c",
};
struct clk c6x_watchdog_clk = {
.name = "watchdog",
};
struct clk c6x_mcbsp1_clk = {
.name = "mcbsp1",
};
struct clk c6x_mcbsp2_clk = {
.name = "mcbsp2",
};
struct clk c6x_mdio_clk = {
.name = "mdio",
};
#ifdef CONFIG_SOC_TMS320C6455
static struct clk_lookup c6455_clks[] = {
CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
CLK(NULL, "core", &c6x_core_clk),
CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
CLK("watchdog", NULL, &c6x_watchdog_clk),
CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
CLK("", NULL, NULL)
};
static void __init c6455_setup_clocks(struct device_node *node)
{
struct pll_data *pll = &c6x_soc_pll1;
struct clk *sysclks = pll->sysclks;
pll->flags = PLL_HAS_PRE | PLL_HAS_MUL;
sysclks[2].flags |= FIXED_DIV_PLL;
sysclks[2].div = 3;
sysclks[3].flags |= FIXED_DIV_PLL;
sysclks[3].div = 6;
sysclks[4].div = PLLDIV4;
sysclks[5].div = PLLDIV5;
c6x_core_clk.parent = &sysclks[0];
c6x_i2c_clk.parent = &sysclks[3];
c6x_watchdog_clk.parent = &sysclks[3];
c6x_mdio_clk.parent = &sysclks[3];
c6x_clks_init(c6455_clks);
}
#endif /* CONFIG_SOC_TMS320C6455 */
#ifdef CONFIG_SOC_TMS320C6457
static struct clk_lookup c6457_clks[] = {
CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]),
CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
CLK(NULL, "core", &c6x_core_clk),
CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
CLK("watchdog", NULL, &c6x_watchdog_clk),
CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
CLK("", NULL, NULL)
};
static void __init c6457_setup_clocks(struct device_node *node)
{
struct pll_data *pll = &c6x_soc_pll1;
struct clk *sysclks = pll->sysclks;
pll->flags = PLL_HAS_MUL | PLL_HAS_POST;
sysclks[1].flags |= FIXED_DIV_PLL;
sysclks[1].div = 1;
sysclks[2].flags |= FIXED_DIV_PLL;
sysclks[2].div = 3;
sysclks[3].flags |= FIXED_DIV_PLL;
sysclks[3].div = 6;
sysclks[4].div = PLLDIV4;
sysclks[5].div = PLLDIV5;
c6x_core_clk.parent = &sysclks[1];
c6x_i2c_clk.parent = &sysclks[3];
c6x_watchdog_clk.parent = &sysclks[5];
c6x_mdio_clk.parent = &sysclks[5];
c6x_clks_init(c6457_clks);
}
#endif /* CONFIG_SOC_TMS320C6455 */
#ifdef CONFIG_SOC_TMS320C6472
static struct clk_lookup c6472_clks[] = {
CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]),
CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]),
CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]),
CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
CLK(NULL, "core", &c6x_core_clk),
CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
CLK("watchdog", NULL, &c6x_watchdog_clk),
CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
CLK("", NULL, NULL)
};
/* assumptions used for delay loop calculations */
#define MIN_CLKIN1_KHz 15625
#define MAX_CORE_KHz 700000
#define MIN_PLLOUT_KHz MIN_CLKIN1_KHz
static void __init c6472_setup_clocks(struct device_node *node)
{
struct pll_data *pll = &c6x_soc_pll1;
struct clk *sysclks = pll->sysclks;
int i;
pll->flags = PLL_HAS_MUL;
for (i = 1; i <= 6; i++) {
sysclks[i].flags |= FIXED_DIV_PLL;
sysclks[i].div = 1;
}
sysclks[7].flags |= FIXED_DIV_PLL;
sysclks[7].div = 3;
sysclks[8].flags |= FIXED_DIV_PLL;
sysclks[8].div = 6;
sysclks[9].flags |= FIXED_DIV_PLL;
sysclks[9].div = 2;
sysclks[10].div = PLLDIV10;
c6x_core_clk.parent = &sysclks[get_coreid() + 1];
c6x_i2c_clk.parent = &sysclks[8];
c6x_watchdog_clk.parent = &sysclks[8];
c6x_mdio_clk.parent = &sysclks[5];
c6x_clks_init(c6472_clks);
}
#endif /* CONFIG_SOC_TMS320C6472 */
#ifdef CONFIG_SOC_TMS320C6474
static struct clk_lookup c6474_clks[] = {
CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]),
CLK(NULL, "pll1_sysclk12", &c6x_soc_pll1.sysclks[12]),
CLK(NULL, "pll1_sysclk13", &c6x_soc_pll1.sysclks[13]),
CLK(NULL, "core", &c6x_core_clk),
CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
CLK("mcbsp.1", NULL, &c6x_mcbsp1_clk),
CLK("mcbsp.2", NULL, &c6x_mcbsp2_clk),
CLK("watchdog", NULL, &c6x_watchdog_clk),
CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
CLK("", NULL, NULL)
};
static void __init c6474_setup_clocks(struct device_node *node)
{
struct pll_data *pll = &c6x_soc_pll1;
struct clk *sysclks = pll->sysclks;
pll->flags = PLL_HAS_MUL;
sysclks[7].flags |= FIXED_DIV_PLL;
sysclks[7].div = 1;
sysclks[9].flags |= FIXED_DIV_PLL;
sysclks[9].div = 3;
sysclks[10].flags |= FIXED_DIV_PLL;
sysclks[10].div = 6;
sysclks[11].div = PLLDIV11;
sysclks[12].flags |= FIXED_DIV_PLL;
sysclks[12].div = 2;
sysclks[13].div = PLLDIV13;
c6x_core_clk.parent = &sysclks[7];
c6x_i2c_clk.parent = &sysclks[10];
c6x_watchdog_clk.parent = &sysclks[10];
c6x_mcbsp1_clk.parent = &sysclks[10];
c6x_mcbsp2_clk.parent = &sysclks[10];
c6x_clks_init(c6474_clks);
}
#endif /* CONFIG_SOC_TMS320C6474 */
#ifdef CONFIG_SOC_TMS320C6678
static struct clk_lookup c6678_clks[] = {
CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
CLK(NULL, "pll1_refclk", &c6x_soc_pll1.sysclks[1]),
CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]),
CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]),
CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]),
CLK(NULL, "core", &c6x_core_clk),
CLK("", NULL, NULL)
};
static void __init c6678_setup_clocks(struct device_node *node)
{
struct pll_data *pll = &c6x_soc_pll1;
struct clk *sysclks = pll->sysclks;
pll->flags = PLL_HAS_MUL;
sysclks[1].flags |= FIXED_DIV_PLL;
sysclks[1].div = 1;
sysclks[2].div = PLLDIV2;
sysclks[3].flags |= FIXED_DIV_PLL;
sysclks[3].div = 2;
sysclks[4].flags |= FIXED_DIV_PLL;
sysclks[4].div = 3;
sysclks[5].div = PLLDIV5;
sysclks[6].flags |= FIXED_DIV_PLL;
sysclks[6].div = 64;
sysclks[7].flags |= FIXED_DIV_PLL;
sysclks[7].div = 6;
sysclks[8].div = PLLDIV8;
sysclks[9].flags |= FIXED_DIV_PLL;
sysclks[9].div = 12;
sysclks[10].flags |= FIXED_DIV_PLL;
sysclks[10].div = 3;
sysclks[11].flags |= FIXED_DIV_PLL;
sysclks[11].div = 6;
c6x_core_clk.parent = &sysclks[0];
c6x_i2c_clk.parent = &sysclks[7];
c6x_clks_init(c6678_clks);
}
#endif /* CONFIG_SOC_TMS320C6678 */
static struct of_device_id c6x_clkc_match[] __initdata = {
#ifdef CONFIG_SOC_TMS320C6455
{ .compatible = "ti,c6455-pll", .data = c6455_setup_clocks },
#endif
#ifdef CONFIG_SOC_TMS320C6457
{ .compatible = "ti,c6457-pll", .data = c6457_setup_clocks },
#endif
#ifdef CONFIG_SOC_TMS320C6472
{ .compatible = "ti,c6472-pll", .data = c6472_setup_clocks },
#endif
#ifdef CONFIG_SOC_TMS320C6474
{ .compatible = "ti,c6474-pll", .data = c6474_setup_clocks },
#endif
#ifdef CONFIG_SOC_TMS320C6678
{ .compatible = "ti,c6678-pll", .data = c6678_setup_clocks },
#endif
{ .compatible = "ti,c64x+pll" },
{}
};
void __init c64x_setup_clocks(void)
{
void (*__setup_clocks)(struct device_node *np);
struct pll_data *pll = &c6x_soc_pll1;
struct device_node *node;
const struct of_device_id *id;
int err;
u32 val;
node = of_find_matching_node(NULL, c6x_clkc_match);
if (!node)
return;
pll->base = of_iomap(node, 0);
if (!pll->base)
goto out;
err = of_property_read_u32(node, "clock-frequency", &val);
if (err || val == 0) {
pr_err("%s: no clock-frequency found! Using %dMHz\n",
node->full_name, (int)val / 1000000);
val = 25000000;
}
clkin1.rate = val;
err = of_property_read_u32(node, "ti,c64x+pll-bypass-delay", &val);
if (err)
val = 5000;
pll->bypass_delay = val;
err = of_property_read_u32(node, "ti,c64x+pll-reset-delay", &val);
if (err)
val = 30000;
pll->reset_delay = val;
err = of_property_read_u32(node, "ti,c64x+pll-lock-delay", &val);
if (err)
val = 30000;
pll->lock_delay = val;
/* id->data is a pointer to SoC-specific setup */
id = of_match_node(c6x_clkc_match, node);
if (id && id->data) {
__setup_clocks = id->data;
__setup_clocks(node);
}
out:
of_node_put(node);
}

View file

@ -0,0 +1,245 @@
/*
* Copyright (C) 2010, 2011 Texas Instruments Incorporated
* Contributed by: Mark Salter (msalter@redhat.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <asm/soc.h>
#include <asm/dscr.h>
#include <asm/special_insns.h>
#include <asm/timer64.h>
struct timer_regs {
u32 reserved0;
u32 emumgt;
u32 reserved1;
u32 reserved2;
u32 cntlo;
u32 cnthi;
u32 prdlo;
u32 prdhi;
u32 tcr;
u32 tgcr;
u32 wdtcr;
};
static struct timer_regs __iomem *timer;
#define TCR_TSTATLO 0x001
#define TCR_INVOUTPLO 0x002
#define TCR_INVINPLO 0x004
#define TCR_CPLO 0x008
#define TCR_ENAMODELO_ONCE 0x040
#define TCR_ENAMODELO_CONT 0x080
#define TCR_ENAMODELO_MASK 0x0c0
#define TCR_PWIDLO_MASK 0x030
#define TCR_CLKSRCLO 0x100
#define TCR_TIENLO 0x200
#define TCR_TSTATHI (0x001 << 16)
#define TCR_INVOUTPHI (0x002 << 16)
#define TCR_CPHI (0x008 << 16)
#define TCR_PWIDHI_MASK (0x030 << 16)
#define TCR_ENAMODEHI_ONCE (0x040 << 16)
#define TCR_ENAMODEHI_CONT (0x080 << 16)
#define TCR_ENAMODEHI_MASK (0x0c0 << 16)
#define TGCR_TIMLORS 0x001
#define TGCR_TIMHIRS 0x002
#define TGCR_TIMMODE_UD32 0x004
#define TGCR_TIMMODE_WDT64 0x008
#define TGCR_TIMMODE_CD32 0x00c
#define TGCR_TIMMODE_MASK 0x00c
#define TGCR_PSCHI_MASK (0x00f << 8)
#define TGCR_TDDRHI_MASK (0x00f << 12)
/*
* Timer clocks are divided down from the CPU clock
* The divisor is in the EMUMGTCLKSPD register
*/
#define TIMER_DIVISOR \
((soc_readl(&timer->emumgt) & (0xf << 16)) >> 16)
#define TIMER64_RATE (c6x_core_freq / TIMER_DIVISOR)
#define TIMER64_MODE_DISABLED 0
#define TIMER64_MODE_ONE_SHOT TCR_ENAMODELO_ONCE
#define TIMER64_MODE_PERIODIC TCR_ENAMODELO_CONT
static int timer64_mode;
static int timer64_devstate_id = -1;
static void timer64_config(unsigned long period)
{
u32 tcr = soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK;
soc_writel(tcr, &timer->tcr);
soc_writel(period - 1, &timer->prdlo);
soc_writel(0, &timer->cntlo);
tcr |= timer64_mode;
soc_writel(tcr, &timer->tcr);
}
static void timer64_enable(void)
{
u32 val;
if (timer64_devstate_id >= 0)
dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
/* disable timer, reset count */
soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr);
soc_writel(0, &timer->prdlo);
/* use internal clock and 1 cycle pulse width */
val = soc_readl(&timer->tcr);
soc_writel(val & ~(TCR_CLKSRCLO | TCR_PWIDLO_MASK), &timer->tcr);
/* dual 32-bit unchained mode */
val = soc_readl(&timer->tgcr) & ~TGCR_TIMMODE_MASK;
soc_writel(val, &timer->tgcr);
soc_writel(val | (TGCR_TIMLORS | TGCR_TIMMODE_UD32), &timer->tgcr);
}
static void timer64_disable(void)
{
/* disable timer, reset count */
soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr);
soc_writel(0, &timer->prdlo);
if (timer64_devstate_id >= 0)
dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_DISABLED);
}
static int next_event(unsigned long delta,
struct clock_event_device *evt)
{
timer64_config(delta);
return 0;
}
static void set_clock_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
timer64_enable();
timer64_mode = TIMER64_MODE_PERIODIC;
timer64_config(TIMER64_RATE / HZ);
break;
case CLOCK_EVT_MODE_ONESHOT:
timer64_enable();
timer64_mode = TIMER64_MODE_ONE_SHOT;
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
timer64_mode = TIMER64_MODE_DISABLED;
timer64_disable();
break;
case CLOCK_EVT_MODE_RESUME:
break;
}
}
static struct clock_event_device t64_clockevent_device = {
.name = "TIMER64_EVT32_TIMER",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.rating = 200,
.set_mode = set_clock_mode,
.set_next_event = next_event,
};
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = &t64_clockevent_device;
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct irqaction timer_iact = {
.name = "timer",
.flags = IRQF_TIMER,
.handler = timer_interrupt,
.dev_id = &t64_clockevent_device,
};
void __init timer64_init(void)
{
struct clock_event_device *cd = &t64_clockevent_device;
struct device_node *np, *first = NULL;
u32 val;
int err, found = 0;
for_each_compatible_node(np, NULL, "ti,c64x+timer64") {
err = of_property_read_u32(np, "ti,core-mask", &val);
if (!err) {
if (val & (1 << get_coreid())) {
found = 1;
break;
}
} else if (!first)
first = np;
}
if (!found) {
/* try first one with no core-mask */
if (first)
np = of_node_get(first);
else {
pr_debug("Cannot find ti,c64x+timer64 timer.\n");
return;
}
}
timer = of_iomap(np, 0);
if (!timer) {
pr_debug("%s: Cannot map timer registers.\n", np->full_name);
goto out;
}
pr_debug("%s: Timer registers=%p.\n", np->full_name, timer);
cd->irq = irq_of_parse_and_map(np, 0);
if (cd->irq == NO_IRQ) {
pr_debug("%s: Cannot find interrupt.\n", np->full_name);
iounmap(timer);
goto out;
}
/* If there is a device state control, save the ID. */
err = of_property_read_u32(np, "ti,dscr-dev-enable", &val);
if (!err) {
timer64_devstate_id = val;
/*
* It is necessary to enable the timer block here because
* the TIMER_DIVISOR macro needs to read a timer register
* to get the divisor.
*/
dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
}
pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq);
clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(250, cd);
cd->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(cd);
setup_irq(cd->irq, &timer_iact);
out:
of_node_put(np);
return;
}