summaryrefslogtreecommitdiffstats
path: root/arch/cris/arch-v32/mach-a3
diff options
context:
space:
mode:
authorJesper Nilsson <jespern@stork.se.axis.com>2007-11-29 17:11:23 +0100
committerJesper Nilsson <jesper.nilsson@axis.com>2008-02-08 11:06:23 +0100
commit035e111f9a9b29843bc899f03d56f19d94bebb53 (patch)
treecb46b3c0eb6d9f2cc915522153454d3acaa7fcda /arch/cris/arch-v32/mach-a3
parent6107c61fd3e6b47106b078db1726ad814564efef (diff)
downloadblackbird-op-linux-035e111f9a9b29843bc899f03d56f19d94bebb53.tar.gz
blackbird-op-linux-035e111f9a9b29843bc899f03d56f19d94bebb53.zip
CRIS v32: Add new machine dependent files for Etrax-FS and Artpec-3.
The two chips are somewhat different, and needs different handling. Adds handing of the dma, dram initialization, hardware settings, io, memory arbiter and pinmux Also moves the dma, dram initialization and io from CRIS v32 common files.
Diffstat (limited to 'arch/cris/arch-v32/mach-a3')
-rw-r--r--arch/cris/arch-v32/mach-a3/Kconfig110
-rw-r--r--arch/cris/arch-v32/mach-a3/Makefile11
-rw-r--r--arch/cris/arch-v32/mach-a3/arbiter.c634
-rw-r--r--arch/cris/arch-v32/mach-a3/cpufreq.c153
-rw-r--r--arch/cris/arch-v32/mach-a3/dma.c185
-rw-r--r--arch/cris/arch-v32/mach-a3/dram_init.S104
-rw-r--r--arch/cris/arch-v32/mach-a3/hw_settings.S51
-rw-r--r--arch/cris/arch-v32/mach-a3/io.c150
-rw-r--r--arch/cris/arch-v32/mach-a3/pinmux.c386
-rw-r--r--arch/cris/arch-v32/mach-a3/vcs_hook.c103
-rw-r--r--arch/cris/arch-v32/mach-a3/vcs_hook.h58
11 files changed, 1945 insertions, 0 deletions
diff --git a/arch/cris/arch-v32/mach-a3/Kconfig b/arch/cris/arch-v32/mach-a3/Kconfig
new file mode 100644
index 000000000000..a4df06d5997a
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/Kconfig
@@ -0,0 +1,110 @@
+if CRIS_MACH_ARTPEC3
+
+menu "Artpec-3 options"
+ depends on CRIS_MACH_ARTPEC3
+
+config ETRAX_DRAM_VIRTUAL_BASE
+ hex
+ default "c0000000"
+
+config ETRAX_L2CACHE
+ bool
+ default y
+
+config ETRAX_SERIAL_PORTS
+ int
+ default 5
+
+config ETRAX_DDR
+ bool
+ default y
+
+config ETRAX_DDR2_MRS
+ hex "DDR2 MRS"
+ default "0"
+
+config ETRAX_DDR2_TIMING
+ hex "DDR2 SDRAM timing"
+ default "0"
+ help
+ SDRAM timing parameters.
+
+config ETRAX_DDR2_CONFIG
+ hex "DDR2 config"
+ default "0"
+
+config ETRAX_PIO_CE0_CFG
+ hex "PIO CE0 configuration"
+ default "0"
+
+config ETRAX_PIO_CE1_CFG
+ hex "PIO CE1 configuration"
+ default "0"
+
+config ETRAX_PIO_CE2_CFG
+ hex "PIO CE2 configuration"
+ default "0"
+
+config ETRAX_DEF_GIO_PA_OE
+ hex "GIO_PA_OE"
+ default "00000000"
+ help
+ Configures the direction of general port A bits. 1 is out, 0 is in.
+ This is often totally different depending on the product used.
+ There are some guidelines though - if you know that only LED's are
+ connected to port PA, then they are usually connected to bits 2-4
+ and you can therefore use 1c. On other boards which don't have the
+ LED's at the general ports, these bits are used for all kinds of
+ stuff. If you don't know what to use, it is always safe to put all
+ as inputs, although floating inputs isn't good.
+
+config ETRAX_DEF_GIO_PA_OUT
+ hex "GIO_PA_OUT"
+ default "00000000"
+ help
+ Configures the initial data for the general port A bits. Most
+ products should use 00 here.
+
+config ETRAX_DEF_GIO_PB_OE
+ hex "GIO_PB_OE"
+ default "000000000"
+ help
+ Configures the direction of general port B bits. 1 is out, 0 is in.
+ This is often totally different depending on the product used.
+ There are some guidelines though - if you know that only LED's are
+ connected to port PA, then they are usually connected to bits 2-4
+ and you can therefore use 1c. On other boards which don't have the
+ LED's at the general ports, these bits are used for all kinds of
+ stuff. If you don't know what to use, it is always safe to put all
+ as inputs, although floating inputs isn't good.
+
+config ETRAX_DEF_GIO_PB_OUT
+ hex "GIO_PB_OUT"
+ default "000000000"
+ help
+ Configures the initial data for the general port B bits. Most
+ products should use 00000 here.
+
+config ETRAX_DEF_GIO_PC_OE
+ hex "GIO_PC_OE"
+ default "00000"
+ help
+ Configures the direction of general port C bits. 1 is out, 0 is in.
+ This is often totally different depending on the product used.
+ There are some guidelines though - if you know that only LED's are
+ connected to port PA, then they are usually connected to bits 2-4
+ and you can therefore use 1c. On other boards which don't have the
+ LED's at the general ports, these bits are used for all kinds of
+ stuff. If you don't know what to use, it is always safe to put all
+ as inputs, although floating inputs isn't good.
+
+config ETRAX_DEF_GIO_PC_OUT
+ hex "GIO_PC_OUT"
+ default "00000"
+ help
+ Configures the initial data for the general port C bits. Most
+ products should use 00000 here.
+
+endmenu
+
+endif
diff --git a/arch/cris/arch-v32/mach-a3/Makefile b/arch/cris/arch-v32/mach-a3/Makefile
new file mode 100644
index 000000000000..41fa6a6893a9
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/Makefile
@@ -0,0 +1,11 @@
+# $Id: Makefile,v 1.3 2007/03/13 11:57:46 starvik Exp $
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := dma.o pinmux.o io.o arbiter.o
+obj-$(CONFIG_ETRAX_VCS_SIM) += vcs_hook.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+
+clean:
+
diff --git a/arch/cris/arch-v32/mach-a3/arbiter.c b/arch/cris/arch-v32/mach-a3/arbiter.c
new file mode 100644
index 000000000000..b92fd7eed3c4
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/arbiter.c
@@ -0,0 +1,634 @@
+/*
+ * Memory arbiter functions. Allocates bandwidth through the
+ * arbiter and sets up arbiter breakpoints.
+ *
+ * The algorithm first assigns slots to the clients that has specified
+ * bandwith (e.g. ethernet) and then the remaining slots are divided
+ * on all the active clients.
+ *
+ * Copyright (c) 2004-2007 Axis Communications AB.
+ *
+ * The artpec-3 has two arbiters. The memory hierarchy looks like this:
+ *
+ *
+ * CPU DMAs
+ * | |
+ * | |
+ * -------------- ------------------
+ * | foo arbiter|----| Internal memory|
+ * -------------- ------------------
+ * |
+ * --------------
+ * | L2 cache |
+ * --------------
+ * |
+ * h264 etc |
+ * | |
+ * | |
+ * --------------
+ * | bar arbiter|
+ * --------------
+ * |
+ * ---------
+ * | SDRAM |
+ * ---------
+ *
+ */
+
+#include <hwregs/reg_map.h>
+#include <hwregs/reg_rdwr.h>
+#include <hwregs/marb_foo_defs.h>
+#include <hwregs/marb_bar_defs.h>
+#include <arbiter.h>
+#include <hwregs/intr_vect.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <asm/irq_regs.h>
+
+#define D(x)
+
+struct crisv32_watch_entry {
+ unsigned long instance;
+ watch_callback *cb;
+ unsigned long start;
+ unsigned long end;
+ int used;
+};
+
+#define NUMBER_OF_BP 4
+#define SDRAM_BANDWIDTH 400000000
+#define INTMEM_BANDWIDTH 400000000
+#define NBR_OF_SLOTS 64
+#define NBR_OF_REGIONS 2
+#define NBR_OF_CLIENTS 15
+#define ARBITERS 2
+#define UNASSIGNED 100
+
+struct arbiter {
+ unsigned long instance;
+ int nbr_regions;
+ int nbr_clients;
+ int requested_slots[NBR_OF_REGIONS][NBR_OF_CLIENTS];
+ int active_clients[NBR_OF_REGIONS][NBR_OF_CLIENTS];
+};
+
+static struct crisv32_watch_entry watches[ARBITERS][NUMBER_OF_BP] =
+{
+ {
+ {regi_marb_foo_bp0},
+ {regi_marb_foo_bp1},
+ {regi_marb_foo_bp2},
+ {regi_marb_foo_bp3}
+ },
+ {
+ {regi_marb_bar_bp0},
+ {regi_marb_bar_bp1},
+ {regi_marb_bar_bp2},
+ {regi_marb_bar_bp3}
+ }
+};
+
+struct arbiter arbiters[ARBITERS] =
+{
+ { /* L2 cache arbiter */
+ .instance = regi_marb_foo,
+ .nbr_regions = 2,
+ .nbr_clients = 15
+ },
+ { /* DDR2 arbiter */
+ .instance = regi_marb_bar,
+ .nbr_regions = 1,
+ .nbr_clients = 9
+ }
+};
+
+static int max_bandwidth[NBR_OF_REGIONS] = {SDRAM_BANDWIDTH, INTMEM_BANDWIDTH};
+
+DEFINE_SPINLOCK(arbiter_lock);
+
+static irqreturn_t
+crisv32_foo_arbiter_irq(int irq, void *dev_id);
+static irqreturn_t
+crisv32_bar_arbiter_irq(int irq, void *dev_id);
+
+/*
+ * "I'm the arbiter, I know the score.
+ * From square one I'll be watching all 64."
+ * (memory arbiter slots, that is)
+ *
+ * Or in other words:
+ * Program the memory arbiter slots for "region" according to what's
+ * in requested_slots[] and active_clients[], while minimizing
+ * latency. A caller may pass a non-zero positive amount for
+ * "unused_slots", which must then be the unallocated, remaining
+ * number of slots, free to hand out to any client.
+ */
+
+static void crisv32_arbiter_config(int arbiter, int region, int unused_slots)
+{
+ int slot;
+ int client;
+ int interval = 0;
+
+ /*
+ * This vector corresponds to the hardware arbiter slots (see
+ * the hardware documentation for semantics). We initialize
+ * each slot with a suitable sentinel value outside the valid
+ * range {0 .. NBR_OF_CLIENTS - 1} and replace them with
+ * client indexes. Then it's fed to the hardware.
+ */
+ s8 val[NBR_OF_SLOTS];
+
+ for (slot = 0; slot < NBR_OF_SLOTS; slot++)
+ val[slot] = -1;
+
+ for (client = 0; client < arbiters[arbiter].nbr_clients; client++) {
+ int pos;
+ /* Allocate the requested non-zero number of slots, but
+ * also give clients with zero-requests one slot each
+ * while stocks last. We do the latter here, in client
+ * order. This makes sure zero-request clients are the
+ * first to get to any spare slots, else those slots
+ * could, when bandwidth is allocated close to the limit,
+ * all be allocated to low-index non-zero-request clients
+ * in the default-fill loop below. Another positive but
+ * secondary effect is a somewhat better spread of the
+ * zero-bandwidth clients in the vector, avoiding some of
+ * the latency that could otherwise be caused by the
+ * partitioning of non-zero-bandwidth clients at low
+ * indexes and zero-bandwidth clients at high
+ * indexes. (Note that this spreading can only affect the
+ * unallocated bandwidth.) All the above only matters for
+ * memory-intensive situations, of course.
+ */
+ if (!arbiters[arbiter].requested_slots[region][client]) {
+ /*
+ * Skip inactive clients. Also skip zero-slot
+ * allocations in this pass when there are no known
+ * free slots.
+ */
+ if (!arbiters[arbiter].active_clients[region][client] ||
+ unused_slots <= 0)
+ continue;
+
+ unused_slots--;
+
+ /* Only allocate one slot for this client. */
+ interval = NBR_OF_SLOTS;
+ } else
+ interval = NBR_OF_SLOTS /
+ arbiters[arbiter].requested_slots[region][client];
+
+ pos = 0;
+ while (pos < NBR_OF_SLOTS) {
+ if (val[pos] >= 0)
+ pos++;
+ else {
+ val[pos] = client;
+ pos += interval;
+ }
+ }
+ }
+
+ client = 0;
+ for (slot = 0; slot < NBR_OF_SLOTS; slot++) {
+ /*
+ * Allocate remaining slots in round-robin
+ * client-number order for active clients. For this
+ * pass, we ignore requested bandwidth and previous
+ * allocations.
+ */
+ if (val[slot] < 0) {
+ int first = client;
+ while (!arbiters[arbiter].active_clients[region][client]) {
+ client = (client + 1) %
+ arbiters[arbiter].nbr_clients;
+ if (client == first)
+ break;
+ }
+ val[slot] = client;
+ client = (client + 1) % arbiters[arbiter].nbr_clients;
+ }
+ if (arbiter == 0) {
+ if (region == EXT_REGION)
+ REG_WR_INT_VECT(marb_foo, regi_marb_foo,
+ rw_l2_slots, slot, val[slot]);
+ else if (region == INT_REGION)
+ REG_WR_INT_VECT(marb_foo, regi_marb_foo,
+ rw_intm_slots, slot, val[slot]);
+ } else {
+ REG_WR_INT_VECT(marb_bar, regi_marb_bar,
+ rw_ddr2_slots, slot, val[slot]);
+ }
+ }
+}
+
+extern char _stext, _etext;
+
+static void crisv32_arbiter_init(void)
+{
+ static int initialized;
+
+ if (initialized)
+ return;
+
+ initialized = 1;
+
+ /*
+ * CPU caches are always set to active, but with zero
+ * bandwidth allocated. It should be ok to allocate zero
+ * bandwidth for the caches, because DMA for other channels
+ * will supposedly finish, once their programmed amount is
+ * done, and then the caches will get access according to the
+ * "fixed scheme" for unclaimed slots. Though, if for some
+ * use-case somewhere, there's a maximum CPU latency for
+ * e.g. some interrupt, we have to start allocating specific
+ * bandwidth for the CPU caches too.
+ */
+ arbiters[0].active_clients[EXT_REGION][11] = 1;
+ arbiters[0].active_clients[EXT_REGION][12] = 1;
+ crisv32_arbiter_config(0, EXT_REGION, 0);
+ crisv32_arbiter_config(0, INT_REGION, 0);
+ crisv32_arbiter_config(1, EXT_REGION, 0);
+
+ if (request_irq(MEMARB_FOO_INTR_VECT, crisv32_foo_arbiter_irq,
+ IRQF_DISABLED, "arbiter", NULL))
+ printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
+
+ if (request_irq(MEMARB_BAR_INTR_VECT, crisv32_bar_arbiter_irq,
+ IRQF_DISABLED, "arbiter", NULL))
+ printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
+
+#ifndef CONFIG_ETRAX_KGDB
+ /* Global watch for writes to kernel text segment. */
+ crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
+ MARB_CLIENTS(arbiter_all_clients, arbiter_bar_all_clients),
+ arbiter_all_write, NULL);
+#endif
+
+ /* Set up max burst sizes by default */
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_rd_burst, 3);
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_wr_burst, 3);
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_ccd_burst, 3);
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_wr_burst, 3);
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_rd_burst, 3);
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_rd_burst, 3);
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_vout_burst, 3);
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_fifo_burst, 3);
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_l2cache_burst, 3);
+}
+
+int crisv32_arbiter_allocate_bandwith(int client, int region,
+ unsigned long bandwidth)
+{
+ int i;
+ int total_assigned = 0;
+ int total_clients = 0;
+ int req;
+ int arbiter = 0;
+
+ crisv32_arbiter_init();
+
+ if (client & 0xffff0000) {
+ arbiter = 1;
+ client >>= 16;
+ }
+
+ for (i = 0; i < arbiters[arbiter].nbr_clients; i++) {
+ total_assigned += arbiters[arbiter].requested_slots[region][i];
+ total_clients += arbiters[arbiter].active_clients[region][i];
+ }
+
+ /* Avoid division by 0 for 0-bandwidth requests. */
+ req = bandwidth == 0
+ ? 0 : NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth);
+
+ /*
+ * We make sure that there are enough slots only for non-zero
+ * requests. Requesting 0 bandwidth *may* allocate slots,
+ * though if all bandwidth is allocated, such a client won't
+ * get any and will have to rely on getting memory access
+ * according to the fixed scheme that's the default when one
+ * of the slot-allocated clients doesn't claim their slot.
+ */
+ if (total_assigned + req > NBR_OF_SLOTS)
+ return -ENOMEM;
+
+ arbiters[arbiter].active_clients[region][client] = 1;
+ arbiters[arbiter].requested_slots[region][client] = req;
+ crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned);
+
+ /* Propagate allocation from foo to bar */
+ if (arbiter == 0)
+ crisv32_arbiter_allocate_bandwith(8 << 16,
+ EXT_REGION, bandwidth);
+ return 0;
+}
+
+/*
+ * Main entry for bandwidth deallocation.
+ *
+ * Strictly speaking, for a somewhat constant set of clients where
+ * each client gets a constant bandwidth and is just enabled or
+ * disabled (somewhat dynamically), no action is necessary here to
+ * avoid starvation for non-zero-allocation clients, as the allocated
+ * slots will just be unused. However, handing out those unused slots
+ * to active clients avoids needless latency if the "fixed scheme"
+ * would give unclaimed slots to an eager low-index client.
+ */
+
+void crisv32_arbiter_deallocate_bandwidth(int client, int region)
+{
+ int i;
+ int total_assigned = 0;
+ int arbiter = 0;
+
+ if (client & 0xffff0000)
+ arbiter = 1;
+
+ arbiters[arbiter].requested_slots[region][client] = 0;
+ arbiters[arbiter].active_clients[region][client] = 0;
+
+ for (i = 0; i < arbiters[arbiter].nbr_clients; i++)
+ total_assigned += arbiters[arbiter].requested_slots[region][i];
+
+ crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned);
+}
+
+int crisv32_arbiter_watch(unsigned long start, unsigned long size,
+ unsigned long clients, unsigned long accesses,
+ watch_callback *cb)
+{
+ int i;
+ int arbiter;
+ int used[2];
+ int ret = 0;
+
+ crisv32_arbiter_init();
+
+ if (start > 0x80000000) {
+ printk(KERN_ERR "Arbiter: %lX doesn't look like a "
+ "physical address", start);
+ return -EFAULT;
+ }
+
+ spin_lock(&arbiter_lock);
+
+ if (clients & 0xffff)
+ used[0] = 1;
+ if (clients & 0xffff0000)
+ used[1] = 1;
+
+ for (arbiter = 0; arbiter < ARBITERS; arbiter++) {
+ if (!used[arbiter])
+ continue;
+
+ for (i = 0; i < NUMBER_OF_BP; i++) {
+ if (!watches[arbiter][i].used) {
+ unsigned intr_mask;
+ if (arbiter)
+ intr_mask = REG_RD_INT(marb_bar,
+ regi_marb_bar, rw_intr_mask);
+ else
+ intr_mask = REG_RD_INT(marb_foo,
+ regi_marb_foo, rw_intr_mask);
+
+ watches[arbiter][i].used = 1;
+ watches[arbiter][i].start = start;
+ watches[arbiter][i].end = start + size;
+ watches[arbiter][i].cb = cb;
+
+ ret |= (i + 1) << (arbiter + 8);
+ if (arbiter) {
+ REG_WR_INT(marb_bar_bp,
+ watches[arbiter][i].instance,
+ rw_first_addr,
+ watches[arbiter][i].start);
+ REG_WR_INT(marb_bar_bp,
+ watches[arbiter][i].instance,
+ rw_last_addr,
+ watches[arbiter][i].end);
+ REG_WR_INT(marb_bar_bp,
+ watches[arbiter][i].instance,
+ rw_op, accesses);
+ REG_WR_INT(marb_bar_bp,
+ watches[arbiter][i].instance,
+ rw_clients,
+ clients & 0xffff);
+ } else {
+ REG_WR_INT(marb_foo_bp,
+ watches[arbiter][i].instance,
+ rw_first_addr,
+ watches[arbiter][i].start);
+ REG_WR_INT(marb_foo_bp,
+ watches[arbiter][i].instance,
+ rw_last_addr,
+ watches[arbiter][i].end);
+ REG_WR_INT(marb_foo_bp,
+ watches[arbiter][i].instance,
+ rw_op, accesses);
+ REG_WR_INT(marb_foo_bp,
+ watches[arbiter][i].instance,
+ rw_clients, clients >> 16);
+ }
+
+ if (i == 0)
+ intr_mask |= 1;
+ else if (i == 1)
+ intr_mask |= 2;
+ else if (i == 2)
+ intr_mask |= 4;
+ else if (i == 3)
+ intr_mask |= 8;
+
+ if (arbiter)
+ REG_WR_INT(marb_bar, regi_marb_bar,
+ rw_intr_mask, intr_mask);
+ else
+ REG_WR_INT(marb_foo, regi_marb_foo,
+ rw_intr_mask, intr_mask);
+
+ spin_unlock(&arbiter_lock);
+
+ break;
+ }
+ }
+ }
+ spin_unlock(&arbiter_lock);
+ if (ret)
+ return ret;
+ else
+ return -ENOMEM;
+}
+
+int crisv32_arbiter_unwatch(int id)
+{
+ int arbiter;
+ int intr_mask;
+
+ crisv32_arbiter_init();
+
+ spin_lock(&arbiter_lock);
+
+ for (arbiter = 0; arbiter < ARBITERS; arbiter++) {
+ int id2;
+
+ if (arbiter)
+ intr_mask = REG_RD_INT(marb_bar, regi_marb_bar,
+ rw_intr_mask);
+ else
+ intr_mask = REG_RD_INT(marb_foo, regi_marb_foo,
+ rw_intr_mask);
+
+ id2 = (id & (0xff << (arbiter + 8))) >> (arbiter + 8);
+ if (id2 == 0)
+ continue;
+ id2--;
+ if ((id2 >= NUMBER_OF_BP) || (!watches[arbiter][id2].used)) {
+ spin_unlock(&arbiter_lock);
+ return -EINVAL;
+ }
+
+ memset(&watches[arbiter][id2], 0,
+ sizeof(struct crisv32_watch_entry));
+
+ if (id2 == 0)
+ intr_mask &= ~1;
+ else if (id2 == 1)
+ intr_mask &= ~2;
+ else if (id2 == 2)
+ intr_mask &= ~4;
+ else if (id2 == 3)
+ intr_mask &= ~8;
+
+ if (arbiter)
+ REG_WR_INT(marb_bar, regi_marb_bar, rw_intr_mask,
+ intr_mask);
+ else
+ REG_WR_INT(marb_foo, regi_marb_foo, rw_intr_mask,
+ intr_mask);
+ }
+
+ spin_unlock(&arbiter_lock);
+ return 0;
+}
+
+extern void show_registers(struct pt_regs *regs);
+
+
+static irqreturn_t
+crisv32_foo_arbiter_irq(int irq, void *dev_id)
+{
+ reg_marb_foo_r_masked_intr masked_intr =
+ REG_RD(marb_foo, regi_marb_foo, r_masked_intr);
+ reg_marb_foo_bp_r_brk_clients r_clients;
+ reg_marb_foo_bp_r_brk_addr r_addr;
+ reg_marb_foo_bp_r_brk_op r_op;
+ reg_marb_foo_bp_r_brk_first_client r_first;
+ reg_marb_foo_bp_r_brk_size r_size;
+ reg_marb_foo_bp_rw_ack ack = {0};
+ reg_marb_foo_rw_ack_intr ack_intr = {
+ .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1
+ };
+ struct crisv32_watch_entry *watch;
+ unsigned arbiter = (unsigned)dev_id;
+
+ masked_intr = REG_RD(marb_foo, regi_marb_foo, r_masked_intr);
+
+ if (masked_intr.bp0)
+ watch = &watches[arbiter][0];
+ else if (masked_intr.bp1)
+ watch = &watches[arbiter][1];
+ else if (masked_intr.bp2)
+ watch = &watches[arbiter][2];
+ else if (masked_intr.bp3)
+ watch = &watches[arbiter][3];
+ else
+ return IRQ_NONE;
+
+ /* Retrieve all useful information and print it. */
+ r_clients = REG_RD(marb_foo_bp, watch->instance, r_brk_clients);
+ r_addr = REG_RD(marb_foo_bp, watch->instance, r_brk_addr);
+ r_op = REG_RD(marb_foo_bp, watch->instance, r_brk_op);
+ r_first = REG_RD(marb_foo_bp, watch->instance, r_brk_first_client);
+ r_size = REG_RD(marb_foo_bp, watch->instance, r_brk_size);
+
+ printk(KERN_DEBUG "Arbiter IRQ\n");
+ printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n",
+ REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_clients, r_clients),
+ REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_addr, r_addr),
+ REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_op, r_op),
+ REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_first_client, r_first),
+ REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_size, r_size));
+
+ REG_WR(marb_foo_bp, watch->instance, rw_ack, ack);
+ REG_WR(marb_foo, regi_marb_foo, rw_ack_intr, ack_intr);
+
+ printk(KERN_DEBUG "IRQ occured at %X\n", (unsigned)get_irq_regs());
+
+ if (watch->cb)
+ watch->cb();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+crisv32_bar_arbiter_irq(int irq, void *dev_id)
+{
+ reg_marb_bar_r_masked_intr masked_intr =
+ REG_RD(marb_bar, regi_marb_bar, r_masked_intr);
+ reg_marb_bar_bp_r_brk_clients r_clients;
+ reg_marb_bar_bp_r_brk_addr r_addr;
+ reg_marb_bar_bp_r_brk_op r_op;
+ reg_marb_bar_bp_r_brk_first_client r_first;
+ reg_marb_bar_bp_r_brk_size r_size;
+ reg_marb_bar_bp_rw_ack ack = {0};
+ reg_marb_bar_rw_ack_intr ack_intr = {
+ .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1
+ };
+ struct crisv32_watch_entry *watch;
+ unsigned arbiter = (unsigned)dev_id;
+
+ masked_intr = REG_RD(marb_bar, regi_marb_bar, r_masked_intr);
+
+ if (masked_intr.bp0)
+ watch = &watches[arbiter][0];
+ else if (masked_intr.bp1)
+ watch = &watches[arbiter][1];
+ else if (masked_intr.bp2)
+ watch = &watches[arbiter][2];
+ else if (masked_intr.bp3)
+ watch = &watches[arbiter][3];
+ else
+ return IRQ_NONE;
+
+ /* Retrieve all useful information and print it. */
+ r_clients = REG_RD(marb_bar_bp, watch->instance, r_brk_clients);
+ r_addr = REG_RD(marb_bar_bp, watch->instance, r_brk_addr);
+ r_op = REG_RD(marb_bar_bp, watch->instance, r_brk_op);
+ r_first = REG_RD(marb_bar_bp, watch->instance, r_brk_first_client);
+ r_size = REG_RD(marb_bar_bp, watch->instance, r_brk_size);
+
+ printk(KERN_DEBUG "Arbiter IRQ\n");
+ printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n",
+ REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_clients, r_clients),
+ REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_addr, r_addr),
+ REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_op, r_op),
+ REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_first_client, r_first),
+ REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_size, r_size));
+
+ REG_WR(marb_bar_bp, watch->instance, rw_ack, ack);
+ REG_WR(marb_bar, regi_marb_bar, rw_ack_intr, ack_intr);
+
+ printk(KERN_DEBUG "IRQ occured at %X\n", (unsigned)get_irq_regs()->erp);
+
+ if (watch->cb)
+ watch->cb();
+
+ return IRQ_HANDLED;
+}
+
diff --git a/arch/cris/arch-v32/mach-a3/cpufreq.c b/arch/cris/arch-v32/mach-a3/cpufreq.c
new file mode 100644
index 000000000000..8e5a3cab8ad7
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/cpufreq.c
@@ -0,0 +1,153 @@
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <hwregs/reg_map.h>
+#include <hwregs/reg_rdwr.h>
+#include <hwregs/clkgen_defs.h>
+#include <hwregs/ddr2_defs.h>
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data);
+
+static struct notifier_block cris_sdram_freq_notifier_block = {
+ .notifier_call = cris_sdram_freq_notifier
+};
+
+static struct cpufreq_frequency_table cris_freq_table[] = {
+ {0x01, 6000},
+ {0x02, 200000},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
+{
+ reg_clkgen_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
+ return clk_ctrl.pll ? 200000 : 6000;
+}
+
+static void cris_freq_set_cpu_state(unsigned int state)
+{
+ int i = 0;
+ struct cpufreq_freqs freqs;
+ reg_clkgen_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
+
+#ifdef CONFIG_SMP
+ for_each_present_cpu(i)
+#endif
+ {
+ freqs.old = cris_freq_get_cpu_frequency(i);
+ freqs.new = cris_freq_table[state].frequency;
+ freqs.cpu = i;
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ local_irq_disable();
+
+ /* Even though we may be SMP they will share the same clock
+ * so all settings are made on CPU0. */
+ if (cris_freq_table[state].frequency == 200000)
+ clk_ctrl.pll = 1;
+ else
+ clk_ctrl.pll = 0;
+ REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
+
+ local_irq_enable();
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+};
+
+static int cris_freq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
+}
+
+static int cris_freq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int newstate = 0;
+
+ if (cpufreq_frequency_table_target(policy, cris_freq_table,
+ target_freq, relation, &newstate))
+ return -EINVAL;
+
+ cris_freq_set_cpu_state(newstate);
+
+ return 0;
+}
+
+static int cris_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+
+ /* cpuinfo and default policy values */
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+ policy->cpuinfo.transition_latency = 1000000; /* 1ms */
+ policy->cur = cris_freq_get_cpu_frequency(0);
+
+ result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
+ if (result)
+ return (result);
+
+ cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
+
+ return 0;
+}
+
+
+static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+
+static struct freq_attr *cris_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver cris_freq_driver = {
+ .get = cris_freq_get_cpu_frequency,
+ .verify = cris_freq_verify,
+ .target = cris_freq_target,
+ .init = cris_freq_cpu_init,
+ .exit = cris_freq_cpu_exit,
+ .name = "cris_freq",
+ .owner = THIS_MODULE,
+ .attr = cris_freq_attr,
+};
+
+static int __init cris_freq_init(void)
+{
+ int ret;
+ ret = cpufreq_register_driver(&cris_freq_driver);
+ cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ return ret;
+}
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ int i;
+ struct cpufreq_freqs *freqs = data;
+ if (val == CPUFREQ_PRECHANGE) {
+ reg_ddr2_rw_cfg cfg =
+ REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg);
+ cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46);
+
+ if (freqs->new == 200000)
+ for (i = 0; i < 50000; i++);
+ REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
+ }
+ return 0;
+}
+
+
+module_init(cris_freq_init);
diff --git a/arch/cris/arch-v32/mach-a3/dma.c b/arch/cris/arch-v32/mach-a3/dma.c
new file mode 100644
index 000000000000..0c19fede0e65
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/dma.c
@@ -0,0 +1,185 @@
+/* Wrapper for DMA channel allocator that starts clocks etc */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <asm/arch/mach/dma.h>
+#include <hwregs/reg_map.h>
+#include <hwregs/reg_rdwr.h>
+#include <hwregs/marb_defs.h>
+#include <hwregs/clkgen_defs.h>
+#include <hwregs/strmux_defs.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+#include <arbiter.h>
+
+static char used_dma_channels[MAX_DMA_CHANNELS];
+static const char *used_dma_channels_users[MAX_DMA_CHANNELS];
+
+static DEFINE_SPINLOCK(dma_lock);
+
+int crisv32_request_dma(unsigned int dmanr, const char *device_id,
+ unsigned options, unsigned int bandwidth, enum dma_owner owner)
+{
+ unsigned long flags;
+ reg_clkgen_rw_clk_ctrl clk_ctrl;
+ reg_strmux_rw_cfg strmux_cfg;
+
+ if (crisv32_arbiter_allocate_bandwith(dmanr,
+ options & DMA_INT_MEM ? INT_REGION : EXT_REGION,
+ bandwidth))
+ return -ENOMEM;
+
+ spin_lock_irqsave(&dma_lock, flags);
+
+ if (used_dma_channels[dmanr]) {
+ spin_unlock_irqrestore(&dma_lock, flags);
+ if (options & DMA_VERBOSE_ON_ERROR)
+ printk(KERN_ERR "Failed to request DMA %i for %s, "
+ "already allocated by %s\n",
+ dmanr,
+ device_id,
+ used_dma_channels_users[dmanr]);
+
+ if (options & DMA_PANIC_ON_ERROR)
+ panic("request_dma error!");
+ spin_unlock_irqrestore(&dma_lock, flags);
+ return -EBUSY;
+ }
+ clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
+ strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
+
+ switch (dmanr) {
+ case 0:
+ case 1:
+ clk_ctrl.dma0_1_eth = 1;
+ break;
+ case 2:
+ case 3:
+ clk_ctrl.dma2_3_strcop = 1;
+ break;
+ case 4:
+ case 5:
+ clk_ctrl.dma4_5_iop = 1;
+ break;
+ case 6:
+ case 7:
+ clk_ctrl.sser_ser_dma6_7 = 1;
+ break;
+ case 9:
+ case 11:
+ clk_ctrl.dma9_11 = 1;
+ break;
+#if MAX_DMA_CHANNELS-1 != 11
+#error Check dma.c
+#endif
+ default:
+ spin_unlock_irqrestore(&dma_lock, flags);
+ if (options & DMA_VERBOSE_ON_ERROR)
+ printk(KERN_ERR "Failed to request DMA %i for %s, "
+ "only 0-%i valid)\n",
+ dmanr, device_id, MAX_DMA_CHANNELS-1);
+
+ if (options & DMA_PANIC_ON_ERROR)
+ panic("request_dma error!");
+ return -EINVAL;
+ }
+
+ switch (owner) {
+ case dma_eth:
+ if (dmanr == 0)
+ strmux_cfg.dma0 = regk_strmux_eth;
+ else if (dmanr == 1)
+ strmux_cfg.dma1 = regk_strmux_eth;
+ else
+ panic("Invalid DMA channel for eth\n");
+ break;
+ case dma_ser0:
+ if (dmanr == 0)
+ strmux_cfg.dma0 = regk_strmux_ser0;
+ else if (dmanr == 1)
+ strmux_cfg.dma1 = regk_strmux_ser0;
+ else
+ panic("Invalid DMA channel for ser0\n");
+ break;
+ case dma_ser3:
+ if (dmanr == 2)
+ strmux_cfg.dma2 = regk_strmux_ser3;
+ else if (dmanr == 3)
+ strmux_cfg.dma3 = regk_strmux_ser3;
+ else
+ panic("Invalid DMA channel for ser3\n");
+ break;
+ case dma_strp:
+ if (dmanr == 2)
+ strmux_cfg.dma2 = regk_strmux_strcop;
+ else if (dmanr == 3)
+ strmux_cfg.dma3 = regk_strmux_strcop;
+ else
+ panic("Invalid DMA channel for strp\n");
+ break;
+ case dma_ser1:
+ if (dmanr == 4)
+ strmux_cfg.dma4 = regk_strmux_ser1;
+ else if (dmanr == 5)
+ strmux_cfg.dma5 = regk_strmux_ser1;
+ else
+ panic("Invalid DMA channel for ser1\n");
+ break;
+ case dma_iop:
+ if (dmanr == 4)
+ strmux_cfg.dma4 = regk_strmux_iop;
+ else if (dmanr == 5)
+ strmux_cfg.dma5 = regk_strmux_iop;
+ else
+ panic("Invalid DMA channel for iop\n");
+ break;
+ case dma_ser2:
+ if (dmanr == 6)
+ strmux_cfg.dma6 = regk_strmux_ser2;
+ else if (dmanr == 7)
+ strmux_cfg.dma7 = regk_strmux_ser2;
+ else
+ panic("Invalid DMA channel for ser2\n");
+ break;
+ case dma_sser:
+ if (dmanr == 6)
+ strmux_cfg.dma6 = regk_strmux_sser;
+ else if (dmanr == 7)
+ strmux_cfg.dma7 = regk_strmux_sser;
+ else
+ panic("Invalid DMA channel for sser\n");
+ break;
+ case dma_ser4:
+ if (dmanr == 9)
+ strmux_cfg.dma9 = regk_strmux_ser4;
+ else
+ panic("Invalid DMA channel for ser4\n");
+ break;
+ case dma_jpeg:
+ if (dmanr == 9)
+ strmux_cfg.dma9 = regk_strmux_jpeg;
+ else
+ panic("Invalid DMA channel for JPEG\n");
+ break;
+ case dma_h264:
+ if (dmanr == 11)
+ strmux_cfg.dma11 = regk_strmux_h264;
+ else
+ panic("Invalid DMA channel for H264\n");
+ break;
+ }
+
+ used_dma_channels[dmanr] = 1;
+ used_dma_channels_users[dmanr] = device_id;
+ REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
+ REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
+ spin_unlock_irqrestore(&dma_lock, flags);
+ return 0;
+}
+
+void crisv32_free_dma(unsigned int dmanr)
+{
+ spin_lock(&dma_lock);
+ used_dma_channels[dmanr] = 0;
+ spin_unlock(&dma_lock);
+}
diff --git a/arch/cris/arch-v32/mach-a3/dram_init.S b/arch/cris/arch-v32/mach-a3/dram_init.S
new file mode 100644
index 000000000000..94d6b41cb299
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/dram_init.S
@@ -0,0 +1,104 @@
+/*
+ * DDR SDRAM initialization - alter with care
+ * This file is intended to be included from other assembler files
+ *
+ * Note: This file may not modify r8 or r9 because they are used to
+ * carry information from the decompresser to the kernel
+ *
+ * Copyright (C) 2005-2007 Axis Communications AB
+ *
+ * Authors: Mikael Starvik <starvik@axis.com>
+ */
+
+/* Just to be certain the config file is included, we include it here
+ * explicitely instead of depending on it being included in the file that
+ * uses this code.
+ */
+
+#include <hwregs/asm/reg_map_asm.h>
+#include <hwregs/asm/ddr2_defs_asm.h>
+
+ ;; WARNING! The registers r8 and r9 are used as parameters carrying
+ ;; information from the decompressor (if the kernel was compressed).
+ ;; They should not be used in the code below.
+
+ ;; Refer to ddr2 MDS for initialization sequence
+
+ ; Start clock
+ move.d REG_ADDR(ddr2, regi_ddr2_ctrl, rw_phy_cfg), $r0
+ move.d REG_STATE(ddr2, rw_phy_cfg, en, yes), $r1
+ move.d $r1, [$r0]
+
+ ; Reset phy and start calibration
+ move.d REG_ADDR(ddr2, regi_ddr2_ctrl, rw_phy_ctrl), $r0
+ move.d REG_STATE(ddr2, rw_phy_ctrl, rst, yes) | \
+ REG_STATE(ddr2, rw_phy_ctrl, cal_rst, yes), $r1
+ move.d $r1, [$r0]
+ move.d REG_STATE(ddr2, rw_phy_ctrl, cal_start, yes), $r1
+ move.d $r1, [$r0]
+
+ ; 2. Wait 200us
+ move.d 10000, $r2
+1: bne 1b
+ subq 1, $r2
+
+ ; Issue commands
+ move.d REG_ADDR(ddr2, regi_ddr2_ctrl, rw_ctrl), $r0
+ move.d sdram_commands_start, $r2
+command_loop:
+ movu.b [$r2+], $r1
+ movu.w [$r2+], $r3
+do_cmd:
+ lslq 16, $r1
+ or.d $r3, $r1
+ move.d $r1, [$r0]
+ cmp.d sdram_commands_end, $r2
+ blo command_loop
+ nop
+
+ ; Set timing
+ move.d REG_ADDR(ddr2, regi_ddr2_ctrl, rw_timing), $r0
+ move.d CONFIG_ETRAX_DDR2_TIMING, $r1
+ move.d $r1, [$r0]
+
+ ; Set latency
+ move.d REG_ADDR(ddr2, regi_ddr2_ctrl, rw_latency), $r0
+ move.d 0x13, $r1
+ move.d $r1, [$r0]
+
+ ; Set configuration
+ move.d REG_ADDR(ddr2, regi_ddr2_ctrl, rw_cfg), $r0
+ move.d CONFIG_ETRAX_DDR2_CONFIG, $r1
+ move.d $r1, [$r0]
+
+ ba after_sdram_commands
+ nop
+
+sdram_commands_start:
+ .byte regk_ddr2_deselect
+ .word 0
+ .byte regk_ddr2_pre
+ .word regk_ddr2_pre_all
+ .byte regk_ddr2_emrs2
+ .word 0
+ .byte regk_ddr2_emrs3
+ .word 0
+ .byte regk_ddr2_emrs
+ .word regk_ddr2_dll_en
+ .byte regk_ddr2_mrs
+ .word regk_ddr2_dll_rst
+ .byte regk_ddr2_pre
+ .word regk_ddr2_pre_all
+ .byte regk_ddr2_ref
+ .word 0
+ .byte regk_ddr2_ref
+ .word 0
+ .byte regk_ddr2_mrs
+ .word CONFIG_ETRAX_DDR2_MRS & 0xffff
+ .byte regk_ddr2_emrs
+ .word regk_ddr2_ocd_default | regk_ddr2_dll_en
+ .byte regk_ddr2_emrs
+ .word regk_ddr2_ocd_exit | regk_ddr2_dll_en | (CONFIG_ETRAX_DDR2_MRS >> 16)
+sdram_commands_end:
+ .align 1
+after_sdram_commands:
diff --git a/arch/cris/arch-v32/mach-a3/hw_settings.S b/arch/cris/arch-v32/mach-a3/hw_settings.S
new file mode 100644
index 000000000000..258a6329cd4a
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/hw_settings.S
@@ -0,0 +1,51 @@
+/*
+ * This table is used by some tools to extract hardware parameters.
+ * The table should be included in the kernel and the decompressor.
+ * Don't forget to update the tools if you change this table.
+ *
+ * Copyright (C) 2001-2007 Axis Communications AB
+ *
+ * Authors: Mikael Starvik <starvik@axis.com>
+ */
+
+#include <hwregs/asm/reg_map_asm.h>
+#include <hwregs/asm/ddr2_defs_asm.h>
+#include <hwregs/asm/gio_defs_asm.h>
+
+ .ascii "HW_PARAM_MAGIC" ; Magic number
+ .dword 0xc0004000 ; Kernel start address
+
+ ; Debug port
+#ifdef CONFIG_ETRAX_DEBUG_PORT0
+ .dword 0
+#elif defined(CONFIG_ETRAX_DEBUG_PORT1)
+ .dword 1
+#elif defined(CONFIG_ETRAX_DEBUG_PORT2)
+ .dword 2
+#elif defined(CONFIG_ETRAX_DEBUG_PORT3)
+ .dword 3
+#else
+ .dword 4 ; No debug
+#endif
+
+ ; Register values
+ .dword REG_ADDR(ddr2, regi_ddr2_ctrl, rw_cfg)
+ .dword CONFIG_ETRAX_DDR2_CONFIG
+ .dword REG_ADDR(ddr2, regi_ddr2_ctrl, rw_timing)
+ .dword CONFIG_ETRAX_DDR2_TIMING
+ .dword CONFIG_ETRAX_DDR2_MRS
+
+ .dword REG_ADDR(gio, regi_gio, rw_pa_dout)
+ .dword CONFIG_ETRAX_DEF_GIO_PA_OUT
+ .dword REG_ADDR(gio, regi_gio, rw_pa_oe)
+ .dword CONFIG_ETRAX_DEF_GIO_PA_OE
+ .dword REG_ADDR(gio, regi_gio, rw_pb_dout)
+ .dword CONFIG_ETRAX_DEF_GIO_PB_OUT
+ .dword REG_ADDR(gio, regi_gio, rw_pb_oe)
+ .dword CONFIG_ETRAX_DEF_GIO_PB_OE
+ .dword REG_ADDR(gio, regi_gio, rw_pc_dout)
+ .dword CONFIG_ETRAX_DEF_GIO_PC_OUT
+ .dword REG_ADDR(gio, regi_gio, rw_pc_oe)
+ .dword CONFIG_ETRAX_DEF_GIO_PC_OE
+
+ .dword 0 ; No more register values
diff --git a/arch/cris/arch-v32/mach-a3/io.c b/arch/cris/arch-v32/mach-a3/io.c
new file mode 100644
index 000000000000..5e6c1aad8b4f
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/io.c
@@ -0,0 +1,150 @@
+/*
+ * Helper functions for I/O pins.
+ *
+ * Copyright (c) 2005-2007 Axis Communications AB.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/arch/mach/pinmux.h>
+#include <hwregs/gio_defs.h>
+
+struct crisv32_ioport crisv32_ioports[] = {
+ {
+ (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_oe),
+ (unsigned long *)REG_ADDR(gio, regi_gio, rw_pa_dout),
+ (unsigned long *)REG_ADDR(gio, regi_gio, r_pa_din),
+ 32
+ },
+ {
+ (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_oe),
+ (unsigned long *)REG_ADDR(gio, regi_gio, rw_pb_dout),
+ (unsigned long *)REG_ADDR(gio, regi_gio, r_pb_din),
+ 32
+ },
+ {
+ (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_oe),
+ (unsigned long *)REG_ADDR(gio, regi_gio, rw_pc_dout),
+ (unsigned long *)REG_ADDR(gio, regi_gio, r_pc_din),
+ 16
+ },
+};
+
+#define NBR_OF_PORTS sizeof(crisv32_ioports)/sizeof(struct crisv32_ioport)
+
+struct crisv32_iopin crisv32_led_net0_green;
+struct crisv32_iopin crisv32_led_net0_red;
+struct crisv32_iopin crisv32_led2_green;
+struct crisv32_iopin crisv32_led2_red;
+struct crisv32_iopin crisv32_led3_green;
+struct crisv32_iopin crisv32_led3_red;
+
+/* Dummy port used when green LED and red LED is on the same bit */
+static unsigned long io_dummy;
+static struct crisv32_ioport dummy_port = {
+ &io_dummy,
+ &io_dummy,
+ &io_dummy,
+ 32
+};
+static struct crisv32_iopin dummy_led = {
+ &dummy_port,
+ 0
+};
+
+static int __init crisv32_io_init(void)
+{
+ int ret = 0;
+
+ u32 i;
+
+ /* Locks *should* be dynamically initialized. */
+ for (i = 0; i < ARRAY_SIZE(crisv32_ioports); i++)
+ spin_lock_init(&crisv32_ioports[i].lock);
+ spin_lock_init(&dummy_port.lock);
+
+ /* Initialize LEDs */
+#if (defined(CONFIG_ETRAX_NBR_LED_GRP_ONE) || defined(CONFIG_ETRAX_NBR_LED_GRP_TWO))
+ ret += crisv32_io_get_name(&crisv32_led_net0_green,
+ CONFIG_ETRAX_LED_G_NET0);
+ crisv32_io_set_dir(&crisv32_led_net0_green, crisv32_io_dir_out);
+ if (strcmp(CONFIG_ETRAX_LED_G_NET0, CONFIG_ETRAX_LED_R_NET0)) {
+ ret += crisv32_io_get_name(&crisv32_led_net0_red,
+ CONFIG_ETRAX_LED_R_NET0);
+ crisv32_io_set_dir(&crisv32_led_net0_red, crisv32_io_dir_out);
+ } else
+ crisv32_led_net0_red = dummy_led;
+#endif
+
+ ret += crisv32_io_get_name(&crisv32_led2_green, CONFIG_ETRAX_V32_LED2G);
+ ret += crisv32_io_get_name(&crisv32_led2_red, CONFIG_ETRAX_V32_LED2R);
+ ret += crisv32_io_get_name(&crisv32_led3_green, CONFIG_ETRAX_V32_LED3G);
+ ret += crisv32_io_get_name(&crisv32_led3_red, CONFIG_ETRAX_V32_LED3R);
+
+ crisv32_io_set_dir(&crisv32_led2_green, crisv32_io_dir_out);
+ crisv32_io_set_dir(&crisv32_led2_red, crisv32_io_dir_out);
+ crisv32_io_set_dir(&crisv32_led3_green, crisv32_io_dir_out);
+ crisv32_io_set_dir(&crisv32_led3_red, crisv32_io_dir_out);
+
+ return ret;
+}
+
+__initcall(crisv32_io_init);
+
+int crisv32_io_get(struct crisv32_iopin *iopin,
+ unsigned int port, unsigned int pin)
+{
+ if (port > NBR_OF_PORTS)
+ return -EINVAL;
+ if (port > crisv32_ioports[port].pin_count)
+ return -EINVAL;
+
+ iopin->bit = 1 << pin;
+ iopin->port = &crisv32_ioports[port];
+
+ if (crisv32_pinmux_alloc(port, pin, pin, pinmux_gpio))
+ return -EIO;
+
+ return 0;
+}
+
+int crisv32_io_get_name(struct crisv32_iopin *iopin,
+ const char *name)
+{
+ int port;
+ int pin;
+
+ if (toupper(*name) == 'P')
+ name++;
+
+ if (toupper(*name) < 'A' || toupper(*name) > 'E')
+ return -EINVAL;
+
+ port = toupper(*name) - 'A';
+ name++;
+ pin = simple_strtoul(name, NULL, 10);
+
+ if (pin < 0 || pin > crisv32_ioports[port].pin_count)
+ return -EINVAL;
+
+ iopin->bit = 1 << pin;
+ iopin->port = &crisv32_ioports[port];
+
+ if (crisv32_pinmux_alloc(port, pin, pin, pinmux_gpio))
+ return -EIO;
+
+ return 0;
+}
+
+#ifdef CONFIG_PCI
+/* PCI I/O access stuff */
+struct cris_io_operations *cris_iops = NULL;
+EXPORT_SYMBOL(cris_iops);
+#endif
+
diff --git a/arch/cris/arch-v32/mach-a3/pinmux.c b/arch/cris/arch-v32/mach-a3/pinmux.c
new file mode 100644
index 000000000000..0a28c9bedfb7
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/pinmux.c
@@ -0,0 +1,386 @@
+/*
+ * Allocator for I/O pins. All pins are allocated to GPIO at bootup.
+ * Unassigned pins and GPIO pins can be allocated to a fixed interface
+ * or the I/O processor instead.
+ *
+ * Copyright (c) 2005-2007 Axis Communications AB.
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <hwregs/reg_map.h>
+#include <hwregs/reg_rdwr.h>
+#include <pinmux.h>
+#include <hwregs/pinmux_defs.h>
+#include <hwregs/clkgen_defs.h>
+
+#undef DEBUG
+
+#define PINS 80
+#define PORT_PINS 32
+#define PORTS 3
+
+static char pins[PINS];
+static DEFINE_SPINLOCK(pinmux_lock);
+
+static void crisv32_pinmux_set(int port);
+
+int
+crisv32_pinmux_init(void)
+{
+ static int initialized;
+
+ if (!initialized) {
+ initialized = 1;
+ REG_WR_INT(pinmux, regi_pinmux, rw_hwprot, 0);
+ crisv32_pinmux_alloc(PORT_A, 0, 31, pinmux_gpio);
+ crisv32_pinmux_alloc(PORT_B, 0, 31, pinmux_gpio);
+ crisv32_pinmux_alloc(PORT_C, 0, 15, pinmux_gpio);
+ }
+
+ return 0;
+}
+
+int
+crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
+{
+ int i;
+ unsigned long flags;
+
+ crisv32_pinmux_init();
+
+ if (port >= PORTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pinmux_lock, flags);
+
+ for (i = first_pin; i <= last_pin; i++) {
+ if ((pins[port * PORT_PINS + i] != pinmux_none) &&
+ (pins[port * PORT_PINS + i] != pinmux_gpio) &&
+ (pins[port * PORT_PINS + i] != mode)) {
+ spin_unlock_irqrestore(&pinmux_lock, flags);
+#ifdef DEBUG
+ panic("Pinmux alloc failed!\n");
+#endif
+ return -EPERM;
+ }
+ }
+
+ for (i = first_pin; i <= last_pin; i++)
+ pins[port * PORT_PINS + i] = mode;
+
+ crisv32_pinmux_set(port);
+
+ spin_unlock_irqrestore(&pinmux_lock, flags);
+
+ return 0;
+}
+
+int
+crisv32_pinmux_alloc_fixed(enum fixed_function function)
+{
+ int ret = -EINVAL;
+ char saved[sizeof pins];
+ unsigned long flags;
+
+ spin_lock_irqsave(&pinmux_lock, flags);
+
+ /* Save internal data for recovery */
+ memcpy(saved, pins, sizeof pins);
+
+ crisv32_pinmux_init(); /* must be done before we read rw_hwprot */
+
+ reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
+ reg_clkgen_rw_clk_ctrl clk_ctrl = REG_RD(clkgen, regi_clkgen,
+ rw_clk_ctrl);
+
+ switch (function) {
+ case pinmux_eth:
+ clk_ctrl.eth = regk_clkgen_yes;
+ clk_ctrl.dma0_1_eth = regk_clkgen_yes;
+ ret = crisv32_pinmux_alloc(PORT_B, 8, 23, pinmux_fixed);
+ ret |= crisv32_pinmux_alloc(PORT_B, 24, 25, pinmux_fixed);
+ hwprot.eth = hwprot.eth_mdio = regk_pinmux_yes;
+ break;
+ case pinmux_geth:
+ ret = crisv32_pinmux_alloc(PORT_B, 0, 7, pinmux_fixed);
+ hwprot.geth = regk_pinmux_yes;
+ break;
+ case pinmux_tg_cmos:
+ clk_ctrl.ccd_tg_100 = clk_ctrl.ccd_tg_200 = regk_clkgen_yes;
+ ret = crisv32_pinmux_alloc(PORT_B, 27, 29, pinmux_fixed);
+ hwprot.tg_clk = regk_pinmux_yes;
+ break;
+ case pinmux_tg_ccd:
+ clk_ctrl.ccd_tg_100 = clk_ctrl.ccd_tg_200 = regk_clkgen_yes;
+ ret = crisv32_pinmux_alloc(PORT_B, 27, 31, pinmux_fixed);
+ ret |= crisv32_pinmux_alloc(PORT_C, 0, 15, pinmux_fixed);
+ hwprot.tg = hwprot.tg_clk = regk_pinmux_yes;
+ break;
+ case pinmux_vout:
+ clk_ctrl.strdma0_2_video = regk_clkgen_yes;
+ ret = crisv32_pinmux_alloc(PORT_A, 8, 18, pinmux_fixed);
+ hwprot.vout = hwprot.vout_sync = regk_pinmux_yes;
+ break;
+ case pinmux_ser1:
+ clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes;
+ ret = crisv32_pinmux_alloc(PORT_A, 24, 25, pinmux_fixed);
+ hwprot.ser1 = regk_pinmux_yes;
+ break;
+ case pinmux_ser2:
+ clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes;
+ ret = crisv32_pinmux_alloc(PORT_A, 26, 27, pinmux_fixed);
+ hwprot.ser2 = regk_pinmux_yes;
+ break;
+ case pinmux_ser3:
+ clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes;
+ ret = crisv32_pinmux_alloc(PORT_A, 28, 29, pinmux_fixed);
+ hwprot.ser3 = regk_pinmux_yes;
+ break;
+ case pinmux_ser4:
+ clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes;
+ ret = crisv32_pinmux_alloc(PORT_A, 30, 31, pinmux_fixed);
+ hwprot.ser4 = regk_pinmux_yes;
+ break;
+ case pinmux_sser:
+ clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes;
+ ret = crisv32_pinmux_alloc(PORT_A, 19, 23, pinmux_fixed);
+ hwprot.sser = regk_pinmux_yes;
+ break;
+ case pinmux_pio:
+ hwprot.pio = regk_pinmux_yes;
+ ret = 0;
+ break;
+ case pinmux_pwm0:
+ ret = crisv32_pinmux_alloc(PORT_A, 30, 30, pinmux_fixed);
+ hwprot.pwm0 = regk_pinmux_yes;
+ break;
+ case pinmux_pwm1:
+ ret = crisv32_pinmux_alloc(PORT_A, 31, 31, pinmux_fixed);
+ hwprot.pwm1 = regk_pinmux_yes;
+ break;
+ case pinmux_pwm2:
+ ret = crisv32_pinmux_alloc(PORT_B, 26, 26, pinmux_fixed);
+ hwprot.pwm2 = regk_pinmux_yes;
+ break;
+ case pinmux_i2c0:
+ ret = crisv32_pinmux_alloc(PORT_A, 0, 1, pinmux_fixed);
+ hwprot.i2c0 = regk_pinmux_yes;
+ break;
+ case pinmux_i2c1:
+ ret = crisv32_pinmux_alloc(PORT_A, 2, 3, pinmux_fixed);
+ hwprot.i2c1 = regk_pinmux_yes;
+ break;
+ case pinmux_i2c1_3wire:
+ ret = crisv32_pinmux_alloc(PORT_A, 2, 3, pinmux_fixed);
+ ret |= crisv32_pinmux_alloc(PORT_A, 7, 7, pinmux_fixed);
+ hwprot.i2c1 = hwprot.i2c1_sen = regk_pinmux_yes;
+ break;
+ case pinmux_i2c1_sda1:
+ ret = crisv32_pinmux_alloc(PORT_A, 2, 4, pinmux_fixed);
+ hwprot.i2c1 = hwprot.i2c1_sda1 = regk_pinmux_yes;
+ break;
+ case pinmux_i2c1_sda2:
+ ret = crisv32_pinmux_alloc(PORT_A, 2, 3, pinmux_fixed);
+ ret |= crisv32_pinmux_alloc(PORT_A, 5, 5, pinmux_fixed);
+ hwprot.i2c1 = hwprot.i2c1_sda2 = regk_pinmux_yes;
+ break;
+ case pinmux_i2c1_sda3:
+ ret = crisv32_pinmux_alloc(PORT_A, 2, 3, pinmux_fixed);
+ ret |= crisv32_pinmux_alloc(PORT_A, 6, 6, pinmux_fixed);
+ hwprot.i2c1 = hwprot.i2c1_sda3 = regk_pinmux_yes;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot);
+ REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
+ } else
+ memcpy(pins, saved, sizeof pins);
+
+ spin_unlock_irqrestore(&pinmux_lock, flags);
+
+ return ret;
+}
+
+void
+crisv32_pinmux_set(int port)
+{
+ int i;
+ int gpio_val = 0;
+ int iop_val = 0;
+ int pin = port * PORT_PINS;
+
+ for (i = 0; (i < PORT_PINS) && (pin < PINS); i++, pin++) {
+ if (pins[pin] == pinmux_gpio)
+ gpio_val |= (1 << i);
+ else if (pins[pin] == pinmux_iop)
+ iop_val |= (1 << i);
+ }
+
+ REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_gio_pa + 4 * port,
+ gpio_val);
+ REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_iop_pa + 4 * port,
+ iop_val);
+
+#ifdef DEBUG
+ crisv32_pinmux_dump();
+#endif
+}
+
+int
+crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
+{
+ int i;
+ unsigned long flags;
+
+ crisv32_pinmux_init();
+
+ if (port > PORTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pinmux_lock, flags);
+
+ for (i = first_pin; i <= last_pin; i++)
+ pins[port * PORT_PINS + i] = pinmux_none;
+
+ crisv32_pinmux_set(port);
+ spin_unlock_irqrestore(&pinmux_lock, flags);
+
+ return 0;
+}
+
+int
+crisv32_pinmux_dealloc_fixed(enum fixed_function function)
+{
+ int ret = -EINVAL;
+ char saved[sizeof pins];
+ unsigned long flags;
+
+ spin_lock_irqsave(&pinmux_lock, flags);
+
+ /* Save internal data for recovery */
+ memcpy(saved, pins, sizeof pins);
+
+ crisv32_pinmux_init(); /* must be done before we read rw_hwprot */
+
+ reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
+
+ switch (function) {
+ case pinmux_eth:
+ ret = crisv32_pinmux_dealloc(PORT_B, 8, 23);
+ ret |= crisv32_pinmux_dealloc(PORT_B, 24, 25);
+ ret |= crisv32_pinmux_dealloc(PORT_B, 0, 7);
+ hwprot.eth = hwprot.eth_mdio = hwprot.geth = regk_pinmux_no;
+ break;
+ case pinmux_tg_cmos:
+ ret = crisv32_pinmux_dealloc(PORT_B, 27, 29);
+ hwprot.tg_clk = regk_pinmux_no;
+ break;
+ case pinmux_tg_ccd:
+ ret = crisv32_pinmux_dealloc(PORT_B, 27, 31);
+ ret |= crisv32_pinmux_dealloc(PORT_C, 0, 15);
+ hwprot.tg = hwprot.tg_clk = regk_pinmux_no;
+ break;
+ case pinmux_vout:
+ ret = crisv32_pinmux_dealloc(PORT_A, 8, 18);
+ hwprot.vout = hwprot.vout_sync = regk_pinmux_no;
+ break;
+ case pinmux_ser1:
+ ret = crisv32_pinmux_dealloc(PORT_A, 24, 25);
+ hwprot.ser1 = regk_pinmux_no;
+ break;
+ case pinmux_ser2:
+ ret = crisv32_pinmux_dealloc(PORT_A, 26, 27);
+ hwprot.ser2 = regk_pinmux_no;
+ break;
+ case pinmux_ser3:
+ ret = crisv32_pinmux_dealloc(PORT_A, 28, 29);
+ hwprot.ser3 = regk_pinmux_no;
+ break;
+ case pinmux_ser4:
+ ret = crisv32_pinmux_dealloc(PORT_A, 30, 31);
+ hwprot.ser4 = regk_pinmux_no;
+ break;
+ case pinmux_sser:
+ ret = crisv32_pinmux_dealloc(PORT_A, 19, 23);
+ hwprot.sser = regk_pinmux_no;
+ break;
+ case pinmux_pwm0:
+ ret = crisv32_pinmux_dealloc(PORT_A, 30, 30);
+ hwprot.pwm0 = regk_pinmux_no;
+ break;
+ case pinmux_pwm1:
+ ret = crisv32_pinmux_dealloc(PORT_A, 31, 31);
+ hwprot.pwm1 = regk_pinmux_no;
+ break;
+ case pinmux_pwm2:
+ ret = crisv32_pinmux_dealloc(PORT_B, 26, 26);
+ hwprot.pwm2 = regk_pinmux_no;
+ break;
+ case pinmux_i2c0:
+ ret = crisv32_pinmux_dealloc(PORT_A, 0, 1);
+ hwprot.i2c0 = regk_pinmux_no;
+ break;
+ case pinmux_i2c1:
+ ret = crisv32_pinmux_dealloc(PORT_A, 2, 3);
+ hwprot.i2c1 = regk_pinmux_no;
+ break;
+ case pinmux_i2c1_3wire:
+ ret = crisv32_pinmux_dealloc(PORT_A, 2, 3);
+ ret |= crisv32_pinmux_dealloc(PORT_A, 7, 7);
+ hwprot.i2c1 = hwprot.i2c1_sen = regk_pinmux_no;
+ break;
+ case pinmux_i2c1_sda1:
+ ret = crisv32_pinmux_dealloc(PORT_A, 2, 4);
+ hwprot.i2c1_sda1 = regk_pinmux_no;
+ break;
+ case pinmux_i2c1_sda2:
+ ret = crisv32_pinmux_dealloc(PORT_A, 2, 3);
+ ret |= crisv32_pinmux_dealloc(PORT_A, 5, 5);
+ hwprot.i2c1_sda2 = regk_pinmux_no;
+ break;
+ case pinmux_i2c1_sda3:
+ ret = crisv32_pinmux_dealloc(PORT_A, 2, 3);
+ ret |= crisv32_pinmux_dealloc(PORT_A, 6, 6);
+ hwprot.i2c1_sda3 = regk_pinmux_no;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot);
+ else
+ memcpy(pins, saved, sizeof pins);
+
+ spin_unlock_irqrestore(&pinmux_lock, flags);
+
+ return ret;
+}
+
+void
+crisv32_pinmux_dump(void)
+{
+ int i, j;
+ int pin = 0;
+
+ crisv32_pinmux_init();
+
+ for (i = 0; i < PORTS; i++) {
+ pin++;
+ printk(KERN_DEBUG "Port %c\n", 'A'+i);
+ for (j = 0; (j < PORT_PINS) && (pin < PINS); j++, pin++)
+ printk(KERN_DEBUG
+ " Pin %d = %d\n", j, pins[i * PORT_PINS + j]);
+ }
+}
+
+__initcall(crisv32_pinmux_init);
diff --git a/arch/cris/arch-v32/mach-a3/vcs_hook.c b/arch/cris/arch-v32/mach-a3/vcs_hook.c
new file mode 100644
index 000000000000..58b1a5469fd7
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/vcs_hook.c
@@ -0,0 +1,103 @@
+/*
+ * Simulator hook mechanism
+ */
+
+#include "vcs_hook.h"
+#include <asm/io.h>
+#include <stdarg.h>
+
+#define HOOK_TRIG_ADDR 0xb7000000
+#define HOOK_MEM_BASE_ADDR 0xce000000
+
+static volatile unsigned *hook_base;
+
+#define HOOK_DATA(offset) hook_base[offset]
+#define VHOOK_DATA(offset) hook_base[offset]
+#define HOOK_TRIG(funcid) \
+ do { \
+ *((unsigned *) HOOK_TRIG_ADDR) = funcid; \
+ } while (0)
+#define HOOK_DATA_BYTE(offset) ((unsigned char *)hook_base)[offset]
+
+static void hook_init(void)
+{
+ static int first = 1;
+ if (first) {
+ first = 0;
+ hook_base = ioremap(HOOK_MEM_BASE_ADDR, 8192);
+ }
+}
+
+static unsigned hook_trig(unsigned id)
+{
+ unsigned ret;
+
+ /* preempt_disable(); */
+
+ /* Dummy read from mem to make sure data has propagated to memory
+ * before trigging */
+ ret = *hook_base;
+
+ /* trigger hook */
+ HOOK_TRIG(id);
+
+ /* wait for call to finish */
+ while (VHOOK_DATA(0) > 0) ;
+
+ /* extract return value */
+
+ ret = VHOOK_DATA(1);
+
+ return ret;
+}
+
+int hook_call(unsigned id, unsigned pcnt, ...)
+{
+ va_list ap;
+ int i;
+ unsigned ret;
+
+ hook_init();
+
+ HOOK_DATA(0) = id;
+
+ va_start(ap, pcnt);
+ for (i = 1; i <= pcnt; i++)
+ HOOK_DATA(i) = va_arg(ap, unsigned);
+ va_end(ap);
+
+ ret = hook_trig(id);
+
+ return ret;
+}
+
+int hook_call_str(unsigned id, unsigned size, const char *str)
+{
+ int i;
+ unsigned ret;
+
+ hook_init();
+
+ HOOK_DATA(0) = id;
+ HOOK_DATA(1) = size;
+
+ for (i = 0; i < size; i++)
+ HOOK_DATA_BYTE(8 + i) = str[i];
+ HOOK_DATA_BYTE(8 + i) = 0;
+
+ ret = hook_trig(id);
+
+ return ret;
+}
+
+void print_str(const char *str)
+{
+ int i;
+ /* find null at end of string */
+ for (i = 1; str[i]; i++) ;
+ hook_call(hook_print_str, i, str);
+}
+
+void CPU_WATCHDOG_TIMEOUT(unsigned t)
+{
+}
diff --git a/arch/cris/arch-v32/mach-a3/vcs_hook.h b/arch/cris/arch-v32/mach-a3/vcs_hook.h
new file mode 100644
index 000000000000..8b73d0e8392d
--- /dev/null
+++ b/arch/cris/arch-v32/mach-a3/vcs_hook.h
@@ -0,0 +1,58 @@
+/*
+ * Simulator hook call mechanism
+ */
+
+#ifndef __hook_h__
+#define __hook_h__
+
+int hook_call(unsigned id, unsigned pcnt, ...);
+int hook_call_str(unsigned id, unsigned size, const char *str);
+
+enum hook_ids {
+ hook_debug_on = 1,
+ hook_debug_off,
+ hook_stop_sim_ok,
+ hook_stop_sim_fail,
+ hook_alloc_shared,
+ hook_ptr_shared,
+ hook_free_shared,
+ hook_file2shared,
+ hook_cmp_shared,
+ hook_print_params,
+ hook_sim_time,
+ hook_stop_sim,
+ hook_kick_dog,
+ hook_dog_timeout,
+ hook_rand,
+ hook_srand,
+ hook_rand_range,
+ hook_print_str,
+ hook_print_hex,
+ hook_cmp_offset_shared,
+ hook_fill_random_shared,
+ hook_alloc_random_data,
+ hook_calloc_random_data,
+ hook_print_int,
+ hook_print_uint,
+ hook_fputc,
+ hook_init_fd,
+ hook_sbrk,
+ hook_print_context_descr,
+ hook_print_data_descr,
+ hook_print_group_descr,
+ hook_fill_shared,
+ hook_sl_srand,
+ hook_sl_rand_irange,
+ hook_sl_rand_urange,
+ hook_sl_sh_malloc_aligned,
+ hook_sl_sh_calloc_aligned,
+ hook_sl_sh_alloc_random_data,
+ hook_sl_sh_file2mem,
+ hook_sl_vera_mbox_handle,
+ hook_sl_vera_mbox_put,
+ hook_sl_vera_mbox_get,
+ hook_sl_system,
+ hook_sl_sh_hexdump
+};
+
+#endif
OpenPOWER on IntegriCloud