mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
83
drivers/firewire/Kconfig
Normal file
83
drivers/firewire/Kconfig
Normal file
|
@ -0,0 +1,83 @@
|
|||
menu "IEEE 1394 (FireWire) support"
|
||||
depends on HAS_DMA
|
||||
depends on PCI || COMPILE_TEST
|
||||
# firewire-core does not depend on PCI but is
|
||||
# not useful without PCI controller driver
|
||||
|
||||
config FIREWIRE
|
||||
tristate "FireWire driver stack"
|
||||
select CRC_ITU_T
|
||||
help
|
||||
This is the new-generation IEEE 1394 (FireWire) driver stack
|
||||
a.k.a. Juju, a new implementation designed for robustness and
|
||||
simplicity.
|
||||
See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
|
||||
for information about migration from the older Linux 1394 stack
|
||||
to the new driver stack.
|
||||
|
||||
To compile this driver as a module, say M here: the module will be
|
||||
called firewire-core.
|
||||
|
||||
config FIREWIRE_OHCI
|
||||
tristate "OHCI-1394 controllers"
|
||||
depends on PCI && FIREWIRE && MMU
|
||||
help
|
||||
Enable this driver if you have a FireWire controller based
|
||||
on the OHCI specification. For all practical purposes, this
|
||||
is the only chipset in use, so say Y here.
|
||||
|
||||
To compile this driver as a module, say M here: The module will be
|
||||
called firewire-ohci.
|
||||
|
||||
config FIREWIRE_SBP2
|
||||
tristate "Storage devices (SBP-2 protocol)"
|
||||
depends on FIREWIRE && SCSI
|
||||
help
|
||||
This option enables you to use SBP-2 devices connected to a
|
||||
FireWire bus. SBP-2 devices include storage devices like
|
||||
harddisks and DVD drives, also some other FireWire devices
|
||||
like scanners.
|
||||
|
||||
To compile this driver as a module, say M here: The module will be
|
||||
called firewire-sbp2.
|
||||
|
||||
You should also enable support for disks, CD-ROMs, etc. in the SCSI
|
||||
configuration section.
|
||||
|
||||
config FIREWIRE_NET
|
||||
tristate "IP networking over 1394"
|
||||
depends on FIREWIRE && INET
|
||||
help
|
||||
This enables IPv4/IPv6 over IEEE 1394, providing IP connectivity
|
||||
with other implementations of RFC 2734/3146 as found on several
|
||||
operating systems. Multicast support is currently limited.
|
||||
|
||||
To compile this driver as a module, say M here: The module will be
|
||||
called firewire-net.
|
||||
|
||||
config FIREWIRE_NOSY
|
||||
tristate "Nosy - a FireWire traffic sniffer for PCILynx cards"
|
||||
depends on PCI
|
||||
help
|
||||
Nosy is an IEEE 1394 packet sniffer that is used for protocol
|
||||
analysis and in development of IEEE 1394 drivers, applications,
|
||||
or firmwares.
|
||||
|
||||
This driver lets you use a Texas Instruments PCILynx 1394 to PCI
|
||||
link layer controller TSB12LV21/A/B as a low-budget bus analyzer.
|
||||
PCILynx is a nowadays very rare IEEE 1394 controller which is
|
||||
not OHCI 1394 compliant.
|
||||
|
||||
The following cards are known to be based on PCILynx or PCILynx-2:
|
||||
IOI IOI-1394TT (PCI card), Unibrain Fireboard 400 PCI Lynx-2
|
||||
(PCI card), Newer Technology FireWire 2 Go (CardBus card),
|
||||
Apple Power Mac G3 blue & white and G4 with PCI graphics
|
||||
(onboard controller).
|
||||
|
||||
To compile this driver as a module, say M here: The module will be
|
||||
called nosy. Source code of a userspace interface to nosy, called
|
||||
nosy-dump, can be found in tools/firewire/ of the kernel sources.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
endmenu
|
16
drivers/firewire/Makefile
Normal file
16
drivers/firewire/Makefile
Normal file
|
@ -0,0 +1,16 @@
|
|||
#
|
||||
# Makefile for the Linux IEEE 1394 implementation
|
||||
#
|
||||
|
||||
firewire-core-y += core-card.o core-cdev.o core-device.o \
|
||||
core-iso.o core-topology.o core-transaction.o
|
||||
firewire-ohci-y += ohci.o
|
||||
firewire-sbp2-y += sbp2.o
|
||||
firewire-net-y += net.o
|
||||
|
||||
obj-$(CONFIG_FIREWIRE) += firewire-core.o
|
||||
obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
|
||||
obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
|
||||
obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
|
||||
obj-$(CONFIG_FIREWIRE_NOSY) += nosy.o
|
||||
obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
|
706
drivers/firewire/core-card.c
Normal file
706
drivers/firewire/core-card.c
Normal file
|
@ -0,0 +1,706 @@
|
|||
/*
|
||||
* Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/crc-itu-t.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/firewire.h>
|
||||
#include <linux/firewire-constants.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#include "core.h"
|
||||
|
||||
#define define_fw_printk_level(func, kern_level) \
|
||||
void func(const struct fw_card *card, const char *fmt, ...) \
|
||||
{ \
|
||||
struct va_format vaf; \
|
||||
va_list args; \
|
||||
\
|
||||
va_start(args, fmt); \
|
||||
vaf.fmt = fmt; \
|
||||
vaf.va = &args; \
|
||||
printk(kern_level KBUILD_MODNAME " %s: %pV", \
|
||||
dev_name(card->device), &vaf); \
|
||||
va_end(args); \
|
||||
}
|
||||
define_fw_printk_level(fw_err, KERN_ERR);
|
||||
define_fw_printk_level(fw_notice, KERN_NOTICE);
|
||||
|
||||
int fw_compute_block_crc(__be32 *block)
|
||||
{
|
||||
int length;
|
||||
u16 crc;
|
||||
|
||||
length = (be32_to_cpu(block[0]) >> 16) & 0xff;
|
||||
crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
|
||||
*block |= cpu_to_be32(crc);
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(card_mutex);
|
||||
static LIST_HEAD(card_list);
|
||||
|
||||
static LIST_HEAD(descriptor_list);
|
||||
static int descriptor_count;
|
||||
|
||||
static __be32 tmp_config_rom[256];
|
||||
/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
|
||||
static size_t config_rom_length = 1 + 4 + 1 + 1;
|
||||
|
||||
#define BIB_CRC(v) ((v) << 0)
|
||||
#define BIB_CRC_LENGTH(v) ((v) << 16)
|
||||
#define BIB_INFO_LENGTH(v) ((v) << 24)
|
||||
#define BIB_BUS_NAME 0x31333934 /* "1394" */
|
||||
#define BIB_LINK_SPEED(v) ((v) << 0)
|
||||
#define BIB_GENERATION(v) ((v) << 4)
|
||||
#define BIB_MAX_ROM(v) ((v) << 8)
|
||||
#define BIB_MAX_RECEIVE(v) ((v) << 12)
|
||||
#define BIB_CYC_CLK_ACC(v) ((v) << 16)
|
||||
#define BIB_PMC ((1) << 27)
|
||||
#define BIB_BMC ((1) << 28)
|
||||
#define BIB_ISC ((1) << 29)
|
||||
#define BIB_CMC ((1) << 30)
|
||||
#define BIB_IRMC ((1) << 31)
|
||||
#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
|
||||
|
||||
/*
|
||||
* IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
|
||||
* but we have to make it longer because there are many devices whose firmware
|
||||
* is just too slow for that.
|
||||
*/
|
||||
#define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
|
||||
|
||||
#define CANON_OUI 0x000085
|
||||
|
||||
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
|
||||
{
|
||||
struct fw_descriptor *desc;
|
||||
int i, j, k, length;
|
||||
|
||||
/*
|
||||
* Initialize contents of config rom buffer. On the OHCI
|
||||
* controller, block reads to the config rom accesses the host
|
||||
* memory, but quadlet read access the hardware bus info block
|
||||
* registers. That's just crack, but it means we should make
|
||||
* sure the contents of bus info block in host memory matches
|
||||
* the version stored in the OHCI registers.
|
||||
*/
|
||||
|
||||
config_rom[0] = cpu_to_be32(
|
||||
BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
|
||||
config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
|
||||
config_rom[2] = cpu_to_be32(
|
||||
BIB_LINK_SPEED(card->link_speed) |
|
||||
BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
|
||||
BIB_MAX_ROM(2) |
|
||||
BIB_MAX_RECEIVE(card->max_receive) |
|
||||
BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
|
||||
config_rom[3] = cpu_to_be32(card->guid >> 32);
|
||||
config_rom[4] = cpu_to_be32(card->guid);
|
||||
|
||||
/* Generate root directory. */
|
||||
config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
|
||||
i = 7;
|
||||
j = 7 + descriptor_count;
|
||||
|
||||
/* Generate root directory entries for descriptors. */
|
||||
list_for_each_entry (desc, &descriptor_list, link) {
|
||||
if (desc->immediate > 0)
|
||||
config_rom[i++] = cpu_to_be32(desc->immediate);
|
||||
config_rom[i] = cpu_to_be32(desc->key | (j - i));
|
||||
i++;
|
||||
j += desc->length;
|
||||
}
|
||||
|
||||
/* Update root directory length. */
|
||||
config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
|
||||
|
||||
/* End of root directory, now copy in descriptors. */
|
||||
list_for_each_entry (desc, &descriptor_list, link) {
|
||||
for (k = 0; k < desc->length; k++)
|
||||
config_rom[i + k] = cpu_to_be32(desc->data[k]);
|
||||
i += desc->length;
|
||||
}
|
||||
|
||||
/* Calculate CRCs for all blocks in the config rom. This
|
||||
* assumes that CRC length and info length are identical for
|
||||
* the bus info block, which is always the case for this
|
||||
* implementation. */
|
||||
for (i = 0; i < j; i += length + 1)
|
||||
length = fw_compute_block_crc(config_rom + i);
|
||||
|
||||
WARN_ON(j != config_rom_length);
|
||||
}
|
||||
|
||||
static void update_config_roms(void)
|
||||
{
|
||||
struct fw_card *card;
|
||||
|
||||
list_for_each_entry (card, &card_list, link) {
|
||||
generate_config_rom(card, tmp_config_rom);
|
||||
card->driver->set_config_rom(card, tmp_config_rom,
|
||||
config_rom_length);
|
||||
}
|
||||
}
|
||||
|
||||
static size_t required_space(struct fw_descriptor *desc)
|
||||
{
|
||||
/* descriptor + entry into root dir + optional immediate entry */
|
||||
return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
int fw_core_add_descriptor(struct fw_descriptor *desc)
|
||||
{
|
||||
size_t i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Check descriptor is valid; the length of all blocks in the
|
||||
* descriptor has to add up to exactly the length of the
|
||||
* block.
|
||||
*/
|
||||
i = 0;
|
||||
while (i < desc->length)
|
||||
i += (desc->data[i] >> 16) + 1;
|
||||
|
||||
if (i != desc->length)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&card_mutex);
|
||||
|
||||
if (config_rom_length + required_space(desc) > 256) {
|
||||
ret = -EBUSY;
|
||||
} else {
|
||||
list_add_tail(&desc->link, &descriptor_list);
|
||||
config_rom_length += required_space(desc);
|
||||
descriptor_count++;
|
||||
if (desc->immediate > 0)
|
||||
descriptor_count++;
|
||||
update_config_roms();
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&card_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_add_descriptor);
|
||||
|
||||
void fw_core_remove_descriptor(struct fw_descriptor *desc)
|
||||
{
|
||||
mutex_lock(&card_mutex);
|
||||
|
||||
list_del(&desc->link);
|
||||
config_rom_length -= required_space(desc);
|
||||
descriptor_count--;
|
||||
if (desc->immediate > 0)
|
||||
descriptor_count--;
|
||||
update_config_roms();
|
||||
|
||||
mutex_unlock(&card_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_remove_descriptor);
|
||||
|
||||
static int reset_bus(struct fw_card *card, bool short_reset)
|
||||
{
|
||||
int reg = short_reset ? 5 : 1;
|
||||
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
|
||||
|
||||
return card->driver->update_phy_reg(card, reg, 0, bit);
|
||||
}
|
||||
|
||||
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
|
||||
{
|
||||
/* We don't try hard to sort out requests of long vs. short resets. */
|
||||
card->br_short = short_reset;
|
||||
|
||||
/* Use an arbitrary short delay to combine multiple reset requests. */
|
||||
fw_card_get(card);
|
||||
if (!queue_delayed_work(fw_workqueue, &card->br_work,
|
||||
delayed ? DIV_ROUND_UP(HZ, 100) : 0))
|
||||
fw_card_put(card);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_schedule_bus_reset);
|
||||
|
||||
static void br_work(struct work_struct *work)
|
||||
{
|
||||
struct fw_card *card = container_of(work, struct fw_card, br_work.work);
|
||||
|
||||
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
|
||||
if (card->reset_jiffies != 0 &&
|
||||
time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
|
||||
if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
|
||||
fw_card_put(card);
|
||||
return;
|
||||
}
|
||||
|
||||
fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
|
||||
FW_PHY_CONFIG_CURRENT_GAP_COUNT);
|
||||
reset_bus(card, card->br_short);
|
||||
fw_card_put(card);
|
||||
}
|
||||
|
||||
static void allocate_broadcast_channel(struct fw_card *card, int generation)
|
||||
{
|
||||
int channel, bandwidth = 0;
|
||||
|
||||
if (!card->broadcast_channel_allocated) {
|
||||
fw_iso_resource_manage(card, generation, 1ULL << 31,
|
||||
&channel, &bandwidth, true);
|
||||
if (channel != 31) {
|
||||
fw_notice(card, "failed to allocate broadcast channel\n");
|
||||
return;
|
||||
}
|
||||
card->broadcast_channel_allocated = true;
|
||||
}
|
||||
|
||||
device_for_each_child(card->device, (void *)(long)generation,
|
||||
fw_device_set_broadcast_channel);
|
||||
}
|
||||
|
||||
static const char gap_count_table[] = {
|
||||
63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
|
||||
};
|
||||
|
||||
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
|
||||
{
|
||||
fw_card_get(card);
|
||||
if (!schedule_delayed_work(&card->bm_work, delay))
|
||||
fw_card_put(card);
|
||||
}
|
||||
|
||||
static void bm_work(struct work_struct *work)
|
||||
{
|
||||
struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
|
||||
struct fw_device *root_device, *irm_device;
|
||||
struct fw_node *root_node;
|
||||
int root_id, new_root_id, irm_id, bm_id, local_id;
|
||||
int gap_count, generation, grace, rcode;
|
||||
bool do_reset = false;
|
||||
bool root_device_is_running;
|
||||
bool root_device_is_cmc;
|
||||
bool irm_is_1394_1995_only;
|
||||
bool keep_this_irm;
|
||||
__be32 transaction_data[2];
|
||||
|
||||
spin_lock_irq(&card->lock);
|
||||
|
||||
if (card->local_node == NULL) {
|
||||
spin_unlock_irq(&card->lock);
|
||||
goto out_put_card;
|
||||
}
|
||||
|
||||
generation = card->generation;
|
||||
|
||||
root_node = card->root_node;
|
||||
fw_node_get(root_node);
|
||||
root_device = root_node->data;
|
||||
root_device_is_running = root_device &&
|
||||
atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
|
||||
root_device_is_cmc = root_device && root_device->cmc;
|
||||
|
||||
irm_device = card->irm_node->data;
|
||||
irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
|
||||
(irm_device->config_rom[2] & 0x000000f0) == 0;
|
||||
|
||||
/* Canon MV5i works unreliably if it is not root node. */
|
||||
keep_this_irm = irm_device && irm_device->config_rom &&
|
||||
irm_device->config_rom[3] >> 8 == CANON_OUI;
|
||||
|
||||
root_id = root_node->node_id;
|
||||
irm_id = card->irm_node->node_id;
|
||||
local_id = card->local_node->node_id;
|
||||
|
||||
grace = time_after64(get_jiffies_64(),
|
||||
card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
|
||||
|
||||
if ((is_next_generation(generation, card->bm_generation) &&
|
||||
!card->bm_abdicate) ||
|
||||
(card->bm_generation != generation && grace)) {
|
||||
/*
|
||||
* This first step is to figure out who is IRM and
|
||||
* then try to become bus manager. If the IRM is not
|
||||
* well defined (e.g. does not have an active link
|
||||
* layer or does not responds to our lock request, we
|
||||
* will have to do a little vigilante bus management.
|
||||
* In that case, we do a goto into the gap count logic
|
||||
* so that when we do the reset, we still optimize the
|
||||
* gap count. That could well save a reset in the
|
||||
* next generation.
|
||||
*/
|
||||
|
||||
if (!card->irm_node->link_on) {
|
||||
new_root_id = local_id;
|
||||
fw_notice(card, "%s, making local node (%02x) root\n",
|
||||
"IRM has link off", new_root_id);
|
||||
goto pick_me;
|
||||
}
|
||||
|
||||
if (irm_is_1394_1995_only && !keep_this_irm) {
|
||||
new_root_id = local_id;
|
||||
fw_notice(card, "%s, making local node (%02x) root\n",
|
||||
"IRM is not 1394a compliant", new_root_id);
|
||||
goto pick_me;
|
||||
}
|
||||
|
||||
transaction_data[0] = cpu_to_be32(0x3f);
|
||||
transaction_data[1] = cpu_to_be32(local_id);
|
||||
|
||||
spin_unlock_irq(&card->lock);
|
||||
|
||||
rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
|
||||
irm_id, generation, SCODE_100,
|
||||
CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
|
||||
transaction_data, 8);
|
||||
|
||||
if (rcode == RCODE_GENERATION)
|
||||
/* Another bus reset, BM work has been rescheduled. */
|
||||
goto out;
|
||||
|
||||
bm_id = be32_to_cpu(transaction_data[0]);
|
||||
|
||||
spin_lock_irq(&card->lock);
|
||||
if (rcode == RCODE_COMPLETE && generation == card->generation)
|
||||
card->bm_node_id =
|
||||
bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
|
||||
spin_unlock_irq(&card->lock);
|
||||
|
||||
if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
|
||||
/* Somebody else is BM. Only act as IRM. */
|
||||
if (local_id == irm_id)
|
||||
allocate_broadcast_channel(card, generation);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rcode == RCODE_SEND_ERROR) {
|
||||
/*
|
||||
* We have been unable to send the lock request due to
|
||||
* some local problem. Let's try again later and hope
|
||||
* that the problem has gone away by then.
|
||||
*/
|
||||
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irq(&card->lock);
|
||||
|
||||
if (rcode != RCODE_COMPLETE && !keep_this_irm) {
|
||||
/*
|
||||
* The lock request failed, maybe the IRM
|
||||
* isn't really IRM capable after all. Let's
|
||||
* do a bus reset and pick the local node as
|
||||
* root, and thus, IRM.
|
||||
*/
|
||||
new_root_id = local_id;
|
||||
fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
|
||||
fw_rcode_string(rcode), new_root_id);
|
||||
goto pick_me;
|
||||
}
|
||||
} else if (card->bm_generation != generation) {
|
||||
/*
|
||||
* We weren't BM in the last generation, and the last
|
||||
* bus reset is less than 125ms ago. Reschedule this job.
|
||||
*/
|
||||
spin_unlock_irq(&card->lock);
|
||||
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're bus manager for this generation, so next step is to
|
||||
* make sure we have an active cycle master and do gap count
|
||||
* optimization.
|
||||
*/
|
||||
card->bm_generation = generation;
|
||||
|
||||
if (root_device == NULL) {
|
||||
/*
|
||||
* Either link_on is false, or we failed to read the
|
||||
* config rom. In either case, pick another root.
|
||||
*/
|
||||
new_root_id = local_id;
|
||||
} else if (!root_device_is_running) {
|
||||
/*
|
||||
* If we haven't probed this device yet, bail out now
|
||||
* and let's try again once that's done.
|
||||
*/
|
||||
spin_unlock_irq(&card->lock);
|
||||
goto out;
|
||||
} else if (root_device_is_cmc) {
|
||||
/*
|
||||
* We will send out a force root packet for this
|
||||
* node as part of the gap count optimization.
|
||||
*/
|
||||
new_root_id = root_id;
|
||||
} else {
|
||||
/*
|
||||
* Current root has an active link layer and we
|
||||
* successfully read the config rom, but it's not
|
||||
* cycle master capable.
|
||||
*/
|
||||
new_root_id = local_id;
|
||||
}
|
||||
|
||||
pick_me:
|
||||
/*
|
||||
* Pick a gap count from 1394a table E-1. The table doesn't cover
|
||||
* the typically much larger 1394b beta repeater delays though.
|
||||
*/
|
||||
if (!card->beta_repeaters_present &&
|
||||
root_node->max_hops < ARRAY_SIZE(gap_count_table))
|
||||
gap_count = gap_count_table[root_node->max_hops];
|
||||
else
|
||||
gap_count = 63;
|
||||
|
||||
/*
|
||||
* Finally, figure out if we should do a reset or not. If we have
|
||||
* done less than 5 resets with the same physical topology and we
|
||||
* have either a new root or a new gap count setting, let's do it.
|
||||
*/
|
||||
|
||||
if (card->bm_retries++ < 5 &&
|
||||
(card->gap_count != gap_count || new_root_id != root_id))
|
||||
do_reset = true;
|
||||
|
||||
spin_unlock_irq(&card->lock);
|
||||
|
||||
if (do_reset) {
|
||||
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
|
||||
new_root_id, gap_count);
|
||||
fw_send_phy_config(card, new_root_id, generation, gap_count);
|
||||
reset_bus(card, true);
|
||||
/* Will allocate broadcast channel after the reset. */
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (root_device_is_cmc) {
|
||||
/*
|
||||
* Make sure that the cycle master sends cycle start packets.
|
||||
*/
|
||||
transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
|
||||
rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
|
||||
root_id, generation, SCODE_100,
|
||||
CSR_REGISTER_BASE + CSR_STATE_SET,
|
||||
transaction_data, 4);
|
||||
if (rcode == RCODE_GENERATION)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (local_id == irm_id)
|
||||
allocate_broadcast_channel(card, generation);
|
||||
|
||||
out:
|
||||
fw_node_put(root_node);
|
||||
out_put_card:
|
||||
fw_card_put(card);
|
||||
}
|
||||
|
||||
void fw_card_initialize(struct fw_card *card,
|
||||
const struct fw_card_driver *driver,
|
||||
struct device *device)
|
||||
{
|
||||
static atomic_t index = ATOMIC_INIT(-1);
|
||||
|
||||
card->index = atomic_inc_return(&index);
|
||||
card->driver = driver;
|
||||
card->device = device;
|
||||
card->current_tlabel = 0;
|
||||
card->tlabel_mask = 0;
|
||||
card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
|
||||
card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
|
||||
card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
|
||||
card->split_timeout_jiffies =
|
||||
DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
|
||||
card->color = 0;
|
||||
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
|
||||
|
||||
kref_init(&card->kref);
|
||||
init_completion(&card->done);
|
||||
INIT_LIST_HEAD(&card->transaction_list);
|
||||
INIT_LIST_HEAD(&card->phy_receiver_list);
|
||||
spin_lock_init(&card->lock);
|
||||
|
||||
card->local_node = NULL;
|
||||
|
||||
INIT_DELAYED_WORK(&card->br_work, br_work);
|
||||
INIT_DELAYED_WORK(&card->bm_work, bm_work);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_card_initialize);
|
||||
|
||||
int fw_card_add(struct fw_card *card,
|
||||
u32 max_receive, u32 link_speed, u64 guid)
|
||||
{
|
||||
int ret;
|
||||
|
||||
card->max_receive = max_receive;
|
||||
card->link_speed = link_speed;
|
||||
card->guid = guid;
|
||||
|
||||
mutex_lock(&card_mutex);
|
||||
|
||||
generate_config_rom(card, tmp_config_rom);
|
||||
ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
|
||||
if (ret == 0)
|
||||
list_add_tail(&card->link, &card_list);
|
||||
|
||||
mutex_unlock(&card_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_card_add);
|
||||
|
||||
/*
|
||||
* The next few functions implement a dummy driver that is used once a card
|
||||
* driver shuts down an fw_card. This allows the driver to cleanly unload,
|
||||
* as all IO to the card will be handled (and failed) by the dummy driver
|
||||
* instead of calling into the module. Only functions for iso context
|
||||
* shutdown still need to be provided by the card driver.
|
||||
*
|
||||
* .read/write_csr() should never be called anymore after the dummy driver
|
||||
* was bound since they are only used within request handler context.
|
||||
* .set_config_rom() is never called since the card is taken out of card_list
|
||||
* before switching to the dummy driver.
|
||||
*/
|
||||
|
||||
static int dummy_read_phy_reg(struct fw_card *card, int address)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int dummy_update_phy_reg(struct fw_card *card, int address,
|
||||
int clear_bits, int set_bits)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
|
||||
{
|
||||
packet->callback(packet, card, RCODE_CANCELLED);
|
||||
}
|
||||
|
||||
static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
|
||||
{
|
||||
packet->callback(packet, card, RCODE_CANCELLED);
|
||||
}
|
||||
|
||||
static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int dummy_enable_phys_dma(struct fw_card *card,
|
||||
int node_id, int generation)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
|
||||
int type, int channel, size_t header_size)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static int dummy_start_iso(struct fw_iso_context *ctx,
|
||||
s32 cycle, u32 sync, u32 tags)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
|
||||
struct fw_iso_buffer *buffer, unsigned long payload)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
|
||||
{
|
||||
}
|
||||
|
||||
static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static const struct fw_card_driver dummy_driver_template = {
|
||||
.read_phy_reg = dummy_read_phy_reg,
|
||||
.update_phy_reg = dummy_update_phy_reg,
|
||||
.send_request = dummy_send_request,
|
||||
.send_response = dummy_send_response,
|
||||
.cancel_packet = dummy_cancel_packet,
|
||||
.enable_phys_dma = dummy_enable_phys_dma,
|
||||
.allocate_iso_context = dummy_allocate_iso_context,
|
||||
.start_iso = dummy_start_iso,
|
||||
.set_iso_channels = dummy_set_iso_channels,
|
||||
.queue_iso = dummy_queue_iso,
|
||||
.flush_queue_iso = dummy_flush_queue_iso,
|
||||
.flush_iso_completions = dummy_flush_iso_completions,
|
||||
};
|
||||
|
||||
void fw_card_release(struct kref *kref)
|
||||
{
|
||||
struct fw_card *card = container_of(kref, struct fw_card, kref);
|
||||
|
||||
complete(&card->done);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fw_card_release);
|
||||
|
||||
void fw_core_remove_card(struct fw_card *card)
|
||||
{
|
||||
struct fw_card_driver dummy_driver = dummy_driver_template;
|
||||
|
||||
card->driver->update_phy_reg(card, 4,
|
||||
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
|
||||
fw_schedule_bus_reset(card, false, true);
|
||||
|
||||
mutex_lock(&card_mutex);
|
||||
list_del_init(&card->link);
|
||||
mutex_unlock(&card_mutex);
|
||||
|
||||
/* Switch off most of the card driver interface. */
|
||||
dummy_driver.free_iso_context = card->driver->free_iso_context;
|
||||
dummy_driver.stop_iso = card->driver->stop_iso;
|
||||
card->driver = &dummy_driver;
|
||||
|
||||
fw_destroy_nodes(card);
|
||||
|
||||
/* Wait for all users, especially device workqueue jobs, to finish. */
|
||||
fw_card_put(card);
|
||||
wait_for_completion(&card->done);
|
||||
|
||||
WARN_ON(!list_empty(&card->transaction_list));
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_remove_card);
|
1815
drivers/firewire/core-cdev.c
Normal file
1815
drivers/firewire/core-cdev.c
Normal file
File diff suppressed because it is too large
Load diff
1325
drivers/firewire/core-device.c
Normal file
1325
drivers/firewire/core-device.c
Normal file
File diff suppressed because it is too large
Load diff
403
drivers/firewire/core-iso.c
Normal file
403
drivers/firewire/core-iso.c
Normal file
|
@ -0,0 +1,403 @@
|
|||
/*
|
||||
* Isochronous I/O functionality:
|
||||
* - Isochronous DMA context management
|
||||
* - Isochronous bus resource management (channels, bandwidth), client side
|
||||
*
|
||||
* Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/firewire.h>
|
||||
#include <linux/firewire-constants.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#include "core.h"
|
||||
|
||||
/*
|
||||
* Isochronous DMA context management
|
||||
*/
|
||||
|
||||
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
|
||||
{
|
||||
int i;
|
||||
|
||||
buffer->page_count = 0;
|
||||
buffer->page_count_mapped = 0;
|
||||
buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
|
||||
GFP_KERNEL);
|
||||
if (buffer->pages == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
|
||||
if (buffer->pages[i] == NULL)
|
||||
break;
|
||||
}
|
||||
buffer->page_count = i;
|
||||
if (i < page_count) {
|
||||
fw_iso_buffer_destroy(buffer, NULL);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
dma_addr_t address;
|
||||
int i;
|
||||
|
||||
buffer->direction = direction;
|
||||
|
||||
for (i = 0; i < buffer->page_count; i++) {
|
||||
address = dma_map_page(card->device, buffer->pages[i],
|
||||
0, PAGE_SIZE, direction);
|
||||
if (dma_mapping_error(card->device, address))
|
||||
break;
|
||||
|
||||
set_page_private(buffer->pages[i], address);
|
||||
}
|
||||
buffer->page_count_mapped = i;
|
||||
if (i < buffer->page_count)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
|
||||
int page_count, enum dma_data_direction direction)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = fw_iso_buffer_alloc(buffer, page_count);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = fw_iso_buffer_map_dma(buffer, card, direction);
|
||||
if (ret < 0)
|
||||
fw_iso_buffer_destroy(buffer, card);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_buffer_init);
|
||||
|
||||
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long uaddr;
|
||||
int i, err;
|
||||
|
||||
uaddr = vma->vm_start;
|
||||
for (i = 0; i < buffer->page_count; i++) {
|
||||
err = vm_insert_page(vma, uaddr, buffer->pages[i]);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
uaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
|
||||
struct fw_card *card)
|
||||
{
|
||||
int i;
|
||||
dma_addr_t address;
|
||||
|
||||
for (i = 0; i < buffer->page_count_mapped; i++) {
|
||||
address = page_private(buffer->pages[i]);
|
||||
dma_unmap_page(card->device, address,
|
||||
PAGE_SIZE, buffer->direction);
|
||||
}
|
||||
for (i = 0; i < buffer->page_count; i++)
|
||||
__free_page(buffer->pages[i]);
|
||||
|
||||
kfree(buffer->pages);
|
||||
buffer->pages = NULL;
|
||||
buffer->page_count = 0;
|
||||
buffer->page_count_mapped = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_buffer_destroy);
|
||||
|
||||
/* Convert DMA address to offset into virtually contiguous buffer. */
|
||||
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
|
||||
{
|
||||
size_t i;
|
||||
dma_addr_t address;
|
||||
ssize_t offset;
|
||||
|
||||
for (i = 0; i < buffer->page_count; i++) {
|
||||
address = page_private(buffer->pages[i]);
|
||||
offset = (ssize_t)completed - (ssize_t)address;
|
||||
if (offset > 0 && offset <= PAGE_SIZE)
|
||||
return (i << PAGE_SHIFT) + offset;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
|
||||
int type, int channel, int speed, size_t header_size,
|
||||
fw_iso_callback_t callback, void *callback_data)
|
||||
{
|
||||
struct fw_iso_context *ctx;
|
||||
|
||||
ctx = card->driver->allocate_iso_context(card,
|
||||
type, channel, header_size);
|
||||
if (IS_ERR(ctx))
|
||||
return ctx;
|
||||
|
||||
ctx->card = card;
|
||||
ctx->type = type;
|
||||
ctx->channel = channel;
|
||||
ctx->speed = speed;
|
||||
ctx->header_size = header_size;
|
||||
ctx->callback.sc = callback;
|
||||
ctx->callback_data = callback_data;
|
||||
|
||||
return ctx;
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_create);
|
||||
|
||||
void fw_iso_context_destroy(struct fw_iso_context *ctx)
|
||||
{
|
||||
ctx->card->driver->free_iso_context(ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_destroy);
|
||||
|
||||
int fw_iso_context_start(struct fw_iso_context *ctx,
|
||||
int cycle, int sync, int tags)
|
||||
{
|
||||
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_start);
|
||||
|
||||
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
|
||||
{
|
||||
return ctx->card->driver->set_iso_channels(ctx, channels);
|
||||
}
|
||||
|
||||
int fw_iso_context_queue(struct fw_iso_context *ctx,
|
||||
struct fw_iso_packet *packet,
|
||||
struct fw_iso_buffer *buffer,
|
||||
unsigned long payload)
|
||||
{
|
||||
return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_queue);
|
||||
|
||||
void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
|
||||
{
|
||||
ctx->card->driver->flush_queue_iso(ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_queue_flush);
|
||||
|
||||
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
|
||||
{
|
||||
return ctx->card->driver->flush_iso_completions(ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_flush_completions);
|
||||
|
||||
int fw_iso_context_stop(struct fw_iso_context *ctx)
|
||||
{
|
||||
return ctx->card->driver->stop_iso(ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_context_stop);
|
||||
|
||||
/*
|
||||
* Isochronous bus resource management (channels, bandwidth), client side
|
||||
*/
|
||||
|
||||
static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
|
||||
int bandwidth, bool allocate)
|
||||
{
|
||||
int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
|
||||
__be32 data[2];
|
||||
|
||||
/*
|
||||
* On a 1394a IRM with low contention, try < 1 is enough.
|
||||
* On a 1394-1995 IRM, we need at least try < 2.
|
||||
* Let's just do try < 5.
|
||||
*/
|
||||
for (try = 0; try < 5; try++) {
|
||||
new = allocate ? old - bandwidth : old + bandwidth;
|
||||
if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
|
||||
return -EBUSY;
|
||||
|
||||
data[0] = cpu_to_be32(old);
|
||||
data[1] = cpu_to_be32(new);
|
||||
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
|
||||
irm_id, generation, SCODE_100,
|
||||
CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
|
||||
data, 8)) {
|
||||
case RCODE_GENERATION:
|
||||
/* A generation change frees all bandwidth. */
|
||||
return allocate ? -EAGAIN : bandwidth;
|
||||
|
||||
case RCODE_COMPLETE:
|
||||
if (be32_to_cpup(data) == old)
|
||||
return bandwidth;
|
||||
|
||||
old = be32_to_cpup(data);
|
||||
/* Fall through. */
|
||||
}
|
||||
}
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int manage_channel(struct fw_card *card, int irm_id, int generation,
|
||||
u32 channels_mask, u64 offset, bool allocate)
|
||||
{
|
||||
__be32 bit, all, old;
|
||||
__be32 data[2];
|
||||
int channel, ret = -EIO, retry = 5;
|
||||
|
||||
old = all = allocate ? cpu_to_be32(~0) : 0;
|
||||
|
||||
for (channel = 0; channel < 32; channel++) {
|
||||
if (!(channels_mask & 1 << channel))
|
||||
continue;
|
||||
|
||||
ret = -EBUSY;
|
||||
|
||||
bit = cpu_to_be32(1 << (31 - channel));
|
||||
if ((old & bit) != (all & bit))
|
||||
continue;
|
||||
|
||||
data[0] = old;
|
||||
data[1] = old ^ bit;
|
||||
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
|
||||
irm_id, generation, SCODE_100,
|
||||
offset, data, 8)) {
|
||||
case RCODE_GENERATION:
|
||||
/* A generation change frees all channels. */
|
||||
return allocate ? -EAGAIN : channel;
|
||||
|
||||
case RCODE_COMPLETE:
|
||||
if (data[0] == old)
|
||||
return channel;
|
||||
|
||||
old = data[0];
|
||||
|
||||
/* Is the IRM 1394a-2000 compliant? */
|
||||
if ((data[0] & bit) == (data[1] & bit))
|
||||
continue;
|
||||
|
||||
/* 1394-1995 IRM, fall through to retry. */
|
||||
default:
|
||||
if (retry) {
|
||||
retry--;
|
||||
channel--;
|
||||
} else {
|
||||
ret = -EIO;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void deallocate_channel(struct fw_card *card, int irm_id,
|
||||
int generation, int channel)
|
||||
{
|
||||
u32 mask;
|
||||
u64 offset;
|
||||
|
||||
mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
|
||||
offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
|
||||
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
|
||||
|
||||
manage_channel(card, irm_id, generation, mask, offset, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth
|
||||
*
|
||||
* In parameters: card, generation, channels_mask, bandwidth, allocate
|
||||
* Out parameters: channel, bandwidth
|
||||
* This function blocks (sleeps) during communication with the IRM.
|
||||
*
|
||||
* Allocates or deallocates at most one channel out of channels_mask.
|
||||
* channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
|
||||
* (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
|
||||
* channel 0 and LSB for channel 63.)
|
||||
* Allocates or deallocates as many bandwidth allocation units as specified.
|
||||
*
|
||||
* Returns channel < 0 if no channel was allocated or deallocated.
|
||||
* Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
|
||||
*
|
||||
* If generation is stale, deallocations succeed but allocations fail with
|
||||
* channel = -EAGAIN.
|
||||
*
|
||||
* If channel allocation fails, no bandwidth will be allocated either.
|
||||
* If bandwidth allocation fails, no channel will be allocated either.
|
||||
* But deallocations of channel and bandwidth are tried independently
|
||||
* of each other's success.
|
||||
*/
|
||||
void fw_iso_resource_manage(struct fw_card *card, int generation,
|
||||
u64 channels_mask, int *channel, int *bandwidth,
|
||||
bool allocate)
|
||||
{
|
||||
u32 channels_hi = channels_mask; /* channels 31...0 */
|
||||
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
|
||||
int irm_id, ret, c = -EINVAL;
|
||||
|
||||
spin_lock_irq(&card->lock);
|
||||
irm_id = card->irm_node->node_id;
|
||||
spin_unlock_irq(&card->lock);
|
||||
|
||||
if (channels_hi)
|
||||
c = manage_channel(card, irm_id, generation, channels_hi,
|
||||
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
|
||||
allocate);
|
||||
if (channels_lo && c < 0) {
|
||||
c = manage_channel(card, irm_id, generation, channels_lo,
|
||||
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
|
||||
allocate);
|
||||
if (c >= 0)
|
||||
c += 32;
|
||||
}
|
||||
*channel = c;
|
||||
|
||||
if (allocate && channels_mask != 0 && c < 0)
|
||||
*bandwidth = 0;
|
||||
|
||||
if (*bandwidth == 0)
|
||||
return;
|
||||
|
||||
ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
|
||||
if (ret < 0)
|
||||
*bandwidth = 0;
|
||||
|
||||
if (allocate && ret < 0) {
|
||||
if (c >= 0)
|
||||
deallocate_channel(card, irm_id, generation, c);
|
||||
*channel = ret;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fw_iso_resource_manage);
|
569
drivers/firewire/core-topology.c
Normal file
569
drivers/firewire/core-topology.c
Normal file
|
@ -0,0 +1,569 @@
|
|||
/*
|
||||
* Incremental bus scan, based on bus topology
|
||||
*
|
||||
* Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/firewire.h>
|
||||
#include <linux/firewire-constants.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#include "core.h"
|
||||
|
||||
#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
|
||||
#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
|
||||
#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
|
||||
#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
|
||||
#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
|
||||
#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
|
||||
#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
|
||||
#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
|
||||
|
||||
#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
|
||||
|
||||
#define SELFID_PORT_CHILD 0x3
|
||||
#define SELFID_PORT_PARENT 0x2
|
||||
#define SELFID_PORT_NCONN 0x1
|
||||
#define SELFID_PORT_NONE 0x0
|
||||
|
||||
static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
|
||||
{
|
||||
u32 q;
|
||||
int port_type, shift, seq;
|
||||
|
||||
*total_port_count = 0;
|
||||
*child_port_count = 0;
|
||||
|
||||
shift = 6;
|
||||
q = *sid;
|
||||
seq = 0;
|
||||
|
||||
while (1) {
|
||||
port_type = (q >> shift) & 0x03;
|
||||
switch (port_type) {
|
||||
case SELFID_PORT_CHILD:
|
||||
(*child_port_count)++;
|
||||
case SELFID_PORT_PARENT:
|
||||
case SELFID_PORT_NCONN:
|
||||
(*total_port_count)++;
|
||||
case SELFID_PORT_NONE:
|
||||
break;
|
||||
}
|
||||
|
||||
shift -= 2;
|
||||
if (shift == 0) {
|
||||
if (!SELF_ID_MORE_PACKETS(q))
|
||||
return sid + 1;
|
||||
|
||||
shift = 16;
|
||||
sid++;
|
||||
q = *sid;
|
||||
|
||||
/*
|
||||
* Check that the extra packets actually are
|
||||
* extended self ID packets and that the
|
||||
* sequence numbers in the extended self ID
|
||||
* packets increase as expected.
|
||||
*/
|
||||
|
||||
if (!SELF_ID_EXTENDED(q) ||
|
||||
seq != SELF_ID_EXT_SEQUENCE(q))
|
||||
return NULL;
|
||||
|
||||
seq++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int get_port_type(u32 *sid, int port_index)
|
||||
{
|
||||
int index, shift;
|
||||
|
||||
index = (port_index + 5) / 8;
|
||||
shift = 16 - ((port_index + 5) & 7) * 2;
|
||||
return (sid[index] >> shift) & 0x03;
|
||||
}
|
||||
|
||||
static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
|
||||
{
|
||||
struct fw_node *node;
|
||||
|
||||
node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
|
||||
GFP_ATOMIC);
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
|
||||
node->color = color;
|
||||
node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
|
||||
node->link_on = SELF_ID_LINK_ON(sid);
|
||||
node->phy_speed = SELF_ID_PHY_SPEED(sid);
|
||||
node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
|
||||
node->port_count = port_count;
|
||||
|
||||
atomic_set(&node->ref_count, 1);
|
||||
INIT_LIST_HEAD(&node->link);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the maximum hop count for this node and it's children. The
|
||||
* maximum hop count is the maximum number of connections between any
|
||||
* two nodes in the subtree rooted at this node. We need this for
|
||||
* setting the gap count. As we build the tree bottom up in
|
||||
* build_tree() below, this is fairly easy to do: for each node we
|
||||
* maintain the max hop count and the max depth, ie the number of hops
|
||||
* to the furthest leaf. Computing the max hop count breaks down into
|
||||
* two cases: either the path goes through this node, in which case
|
||||
* the hop count is the sum of the two biggest child depths plus 2.
|
||||
* Or it could be the case that the max hop path is entirely
|
||||
* containted in a child tree, in which case the max hop count is just
|
||||
* the max hop count of this child.
|
||||
*/
|
||||
static void update_hop_count(struct fw_node *node)
|
||||
{
|
||||
int depths[2] = { -1, -1 };
|
||||
int max_child_hops = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < node->port_count; i++) {
|
||||
if (node->ports[i] == NULL)
|
||||
continue;
|
||||
|
||||
if (node->ports[i]->max_hops > max_child_hops)
|
||||
max_child_hops = node->ports[i]->max_hops;
|
||||
|
||||
if (node->ports[i]->max_depth > depths[0]) {
|
||||
depths[1] = depths[0];
|
||||
depths[0] = node->ports[i]->max_depth;
|
||||
} else if (node->ports[i]->max_depth > depths[1])
|
||||
depths[1] = node->ports[i]->max_depth;
|
||||
}
|
||||
|
||||
node->max_depth = depths[0] + 1;
|
||||
node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
|
||||
}
|
||||
|
||||
static inline struct fw_node *fw_node(struct list_head *l)
|
||||
{
|
||||
return list_entry(l, struct fw_node, link);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function builds the tree representation of the topology given
|
||||
* by the self IDs from the latest bus reset. During the construction
|
||||
* of the tree, the function checks that the self IDs are valid and
|
||||
* internally consistent. On success this function returns the
|
||||
* fw_node corresponding to the local card otherwise NULL.
|
||||
*/
|
||||
static struct fw_node *build_tree(struct fw_card *card,
|
||||
u32 *sid, int self_id_count)
|
||||
{
|
||||
struct fw_node *node, *child, *local_node, *irm_node;
|
||||
struct list_head stack, *h;
|
||||
u32 *next_sid, *end, q;
|
||||
int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
|
||||
int gap_count;
|
||||
bool beta_repeaters_present;
|
||||
|
||||
local_node = NULL;
|
||||
node = NULL;
|
||||
INIT_LIST_HEAD(&stack);
|
||||
stack_depth = 0;
|
||||
end = sid + self_id_count;
|
||||
phy_id = 0;
|
||||
irm_node = NULL;
|
||||
gap_count = SELF_ID_GAP_COUNT(*sid);
|
||||
beta_repeaters_present = false;
|
||||
|
||||
while (sid < end) {
|
||||
next_sid = count_ports(sid, &port_count, &child_port_count);
|
||||
|
||||
if (next_sid == NULL) {
|
||||
fw_err(card, "inconsistent extended self IDs\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
q = *sid;
|
||||
if (phy_id != SELF_ID_PHY_ID(q)) {
|
||||
fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
|
||||
phy_id, SELF_ID_PHY_ID(q));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (child_port_count > stack_depth) {
|
||||
fw_err(card, "topology stack underflow\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Seek back from the top of our stack to find the
|
||||
* start of the child nodes for this node.
|
||||
*/
|
||||
for (i = 0, h = &stack; i < child_port_count; i++)
|
||||
h = h->prev;
|
||||
/*
|
||||
* When the stack is empty, this yields an invalid value,
|
||||
* but that pointer will never be dereferenced.
|
||||
*/
|
||||
child = fw_node(h);
|
||||
|
||||
node = fw_node_create(q, port_count, card->color);
|
||||
if (node == NULL) {
|
||||
fw_err(card, "out of memory while building topology\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (phy_id == (card->node_id & 0x3f))
|
||||
local_node = node;
|
||||
|
||||
if (SELF_ID_CONTENDER(q))
|
||||
irm_node = node;
|
||||
|
||||
parent_count = 0;
|
||||
|
||||
for (i = 0; i < port_count; i++) {
|
||||
switch (get_port_type(sid, i)) {
|
||||
case SELFID_PORT_PARENT:
|
||||
/*
|
||||
* Who's your daddy? We dont know the
|
||||
* parent node at this time, so we
|
||||
* temporarily abuse node->color for
|
||||
* remembering the entry in the
|
||||
* node->ports array where the parent
|
||||
* node should be. Later, when we
|
||||
* handle the parent node, we fix up
|
||||
* the reference.
|
||||
*/
|
||||
parent_count++;
|
||||
node->color = i;
|
||||
break;
|
||||
|
||||
case SELFID_PORT_CHILD:
|
||||
node->ports[i] = child;
|
||||
/*
|
||||
* Fix up parent reference for this
|
||||
* child node.
|
||||
*/
|
||||
child->ports[child->color] = node;
|
||||
child->color = card->color;
|
||||
child = fw_node(child->link.next);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the node reports exactly one parent
|
||||
* port, except for the root, which of course should
|
||||
* have no parents.
|
||||
*/
|
||||
if ((next_sid == end && parent_count != 0) ||
|
||||
(next_sid < end && parent_count != 1)) {
|
||||
fw_err(card, "parent port inconsistency for node %d: "
|
||||
"parent_count=%d\n", phy_id, parent_count);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Pop the child nodes off the stack and push the new node. */
|
||||
__list_del(h->prev, &stack);
|
||||
list_add_tail(&node->link, &stack);
|
||||
stack_depth += 1 - child_port_count;
|
||||
|
||||
if (node->phy_speed == SCODE_BETA &&
|
||||
parent_count + child_port_count > 1)
|
||||
beta_repeaters_present = true;
|
||||
|
||||
/*
|
||||
* If PHYs report different gap counts, set an invalid count
|
||||
* which will force a gap count reconfiguration and a reset.
|
||||
*/
|
||||
if (SELF_ID_GAP_COUNT(q) != gap_count)
|
||||
gap_count = 0;
|
||||
|
||||
update_hop_count(node);
|
||||
|
||||
sid = next_sid;
|
||||
phy_id++;
|
||||
}
|
||||
|
||||
card->root_node = node;
|
||||
card->irm_node = irm_node;
|
||||
card->gap_count = gap_count;
|
||||
card->beta_repeaters_present = beta_repeaters_present;
|
||||
|
||||
return local_node;
|
||||
}
|
||||
|
||||
typedef void (*fw_node_callback_t)(struct fw_card * card,
|
||||
struct fw_node * node,
|
||||
struct fw_node * parent);
|
||||
|
||||
static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
|
||||
fw_node_callback_t callback)
|
||||
{
|
||||
struct list_head list;
|
||||
struct fw_node *node, *next, *child, *parent;
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
fw_node_get(root);
|
||||
list_add_tail(&root->link, &list);
|
||||
parent = NULL;
|
||||
list_for_each_entry(node, &list, link) {
|
||||
node->color = card->color;
|
||||
|
||||
for (i = 0; i < node->port_count; i++) {
|
||||
child = node->ports[i];
|
||||
if (!child)
|
||||
continue;
|
||||
if (child->color == card->color)
|
||||
parent = child;
|
||||
else {
|
||||
fw_node_get(child);
|
||||
list_add_tail(&child->link, &list);
|
||||
}
|
||||
}
|
||||
|
||||
callback(card, node, parent);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(node, next, &list, link)
|
||||
fw_node_put(node);
|
||||
}
|
||||
|
||||
static void report_lost_node(struct fw_card *card,
|
||||
struct fw_node *node, struct fw_node *parent)
|
||||
{
|
||||
fw_node_event(card, node, FW_NODE_DESTROYED);
|
||||
fw_node_put(node);
|
||||
|
||||
/* Topology has changed - reset bus manager retry counter */
|
||||
card->bm_retries = 0;
|
||||
}
|
||||
|
||||
static void report_found_node(struct fw_card *card,
|
||||
struct fw_node *node, struct fw_node *parent)
|
||||
{
|
||||
int b_path = (node->phy_speed == SCODE_BETA);
|
||||
|
||||
if (parent != NULL) {
|
||||
/* min() macro doesn't work here with gcc 3.4 */
|
||||
node->max_speed = parent->max_speed < node->phy_speed ?
|
||||
parent->max_speed : node->phy_speed;
|
||||
node->b_path = parent->b_path && b_path;
|
||||
} else {
|
||||
node->max_speed = node->phy_speed;
|
||||
node->b_path = b_path;
|
||||
}
|
||||
|
||||
fw_node_event(card, node, FW_NODE_CREATED);
|
||||
|
||||
/* Topology has changed - reset bus manager retry counter */
|
||||
card->bm_retries = 0;
|
||||
}
|
||||
|
||||
void fw_destroy_nodes(struct fw_card *card)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
card->color++;
|
||||
if (card->local_node != NULL)
|
||||
for_each_fw_node(card, card->local_node, report_lost_node);
|
||||
card->local_node = NULL;
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
}
|
||||
|
||||
static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
|
||||
{
|
||||
struct fw_node *tree;
|
||||
int i;
|
||||
|
||||
tree = node1->ports[port];
|
||||
node0->ports[port] = tree;
|
||||
for (i = 0; i < tree->port_count; i++) {
|
||||
if (tree->ports[i] == node1) {
|
||||
tree->ports[i] = node0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Compare the old topology tree for card with the new one specified by root.
|
||||
* Queue the nodes and mark them as either found, lost or updated.
|
||||
* Update the nodes in the card topology tree as we go.
|
||||
*/
|
||||
static void update_tree(struct fw_card *card, struct fw_node *root)
|
||||
{
|
||||
struct list_head list0, list1;
|
||||
struct fw_node *node0, *node1, *next1;
|
||||
int i, event;
|
||||
|
||||
INIT_LIST_HEAD(&list0);
|
||||
list_add_tail(&card->local_node->link, &list0);
|
||||
INIT_LIST_HEAD(&list1);
|
||||
list_add_tail(&root->link, &list1);
|
||||
|
||||
node0 = fw_node(list0.next);
|
||||
node1 = fw_node(list1.next);
|
||||
|
||||
while (&node0->link != &list0) {
|
||||
WARN_ON(node0->port_count != node1->port_count);
|
||||
|
||||
if (node0->link_on && !node1->link_on)
|
||||
event = FW_NODE_LINK_OFF;
|
||||
else if (!node0->link_on && node1->link_on)
|
||||
event = FW_NODE_LINK_ON;
|
||||
else if (node1->initiated_reset && node1->link_on)
|
||||
event = FW_NODE_INITIATED_RESET;
|
||||
else
|
||||
event = FW_NODE_UPDATED;
|
||||
|
||||
node0->node_id = node1->node_id;
|
||||
node0->color = card->color;
|
||||
node0->link_on = node1->link_on;
|
||||
node0->initiated_reset = node1->initiated_reset;
|
||||
node0->max_hops = node1->max_hops;
|
||||
node1->color = card->color;
|
||||
fw_node_event(card, node0, event);
|
||||
|
||||
if (card->root_node == node1)
|
||||
card->root_node = node0;
|
||||
if (card->irm_node == node1)
|
||||
card->irm_node = node0;
|
||||
|
||||
for (i = 0; i < node0->port_count; i++) {
|
||||
if (node0->ports[i] && node1->ports[i]) {
|
||||
/*
|
||||
* This port didn't change, queue the
|
||||
* connected node for further
|
||||
* investigation.
|
||||
*/
|
||||
if (node0->ports[i]->color == card->color)
|
||||
continue;
|
||||
list_add_tail(&node0->ports[i]->link, &list0);
|
||||
list_add_tail(&node1->ports[i]->link, &list1);
|
||||
} else if (node0->ports[i]) {
|
||||
/*
|
||||
* The nodes connected here were
|
||||
* unplugged; unref the lost nodes and
|
||||
* queue FW_NODE_LOST callbacks for
|
||||
* them.
|
||||
*/
|
||||
|
||||
for_each_fw_node(card, node0->ports[i],
|
||||
report_lost_node);
|
||||
node0->ports[i] = NULL;
|
||||
} else if (node1->ports[i]) {
|
||||
/*
|
||||
* One or more node were connected to
|
||||
* this port. Move the new nodes into
|
||||
* the tree and queue FW_NODE_CREATED
|
||||
* callbacks for them.
|
||||
*/
|
||||
move_tree(node0, node1, i);
|
||||
for_each_fw_node(card, node0->ports[i],
|
||||
report_found_node);
|
||||
}
|
||||
}
|
||||
|
||||
node0 = fw_node(node0->link.next);
|
||||
next1 = fw_node(node1->link.next);
|
||||
fw_node_put(node1);
|
||||
node1 = next1;
|
||||
}
|
||||
}
|
||||
|
||||
static void update_topology_map(struct fw_card *card,
|
||||
u32 *self_ids, int self_id_count)
|
||||
{
|
||||
int node_count = (card->root_node->node_id & 0x3f) + 1;
|
||||
__be32 *map = card->topology_map;
|
||||
|
||||
*map++ = cpu_to_be32((self_id_count + 2) << 16);
|
||||
*map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
|
||||
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
|
||||
|
||||
while (self_id_count--)
|
||||
*map++ = cpu_to_be32p(self_ids++);
|
||||
|
||||
fw_compute_block_crc(card->topology_map);
|
||||
}
|
||||
|
||||
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
|
||||
int self_id_count, u32 *self_ids, bool bm_abdicate)
|
||||
{
|
||||
struct fw_node *local_node;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* If the selfID buffer is not the immediate successor of the
|
||||
* previously processed one, we cannot reliably compare the
|
||||
* old and new topologies.
|
||||
*/
|
||||
if (!is_next_generation(generation, card->generation) &&
|
||||
card->local_node != NULL) {
|
||||
fw_destroy_nodes(card);
|
||||
card->bm_retries = 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
|
||||
card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
|
||||
card->node_id = node_id;
|
||||
/*
|
||||
* Update node_id before generation to prevent anybody from using
|
||||
* a stale node_id together with a current generation.
|
||||
*/
|
||||
smp_wmb();
|
||||
card->generation = generation;
|
||||
card->reset_jiffies = get_jiffies_64();
|
||||
card->bm_node_id = 0xffff;
|
||||
card->bm_abdicate = bm_abdicate;
|
||||
fw_schedule_bm_work(card, 0);
|
||||
|
||||
local_node = build_tree(card, self_ids, self_id_count);
|
||||
|
||||
update_topology_map(card, self_ids, self_id_count);
|
||||
|
||||
card->color++;
|
||||
|
||||
if (local_node == NULL) {
|
||||
fw_err(card, "topology build failed\n");
|
||||
/* FIXME: We need to issue a bus reset in this case. */
|
||||
} else if (card->local_node == NULL) {
|
||||
card->local_node = local_node;
|
||||
for_each_fw_node(card, local_node, report_found_node);
|
||||
} else {
|
||||
update_tree(card, local_node);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_handle_bus_reset);
|
1300
drivers/firewire/core-transaction.c
Normal file
1300
drivers/firewire/core-transaction.c
Normal file
File diff suppressed because it is too large
Load diff
258
drivers/firewire/core.h
Normal file
258
drivers/firewire/core.h
Normal file
|
@ -0,0 +1,258 @@
|
|||
#ifndef _FIREWIRE_CORE_H
|
||||
#define _FIREWIRE_CORE_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
|
||||
struct device;
|
||||
struct fw_card;
|
||||
struct fw_device;
|
||||
struct fw_iso_buffer;
|
||||
struct fw_iso_context;
|
||||
struct fw_iso_packet;
|
||||
struct fw_node;
|
||||
struct fw_packet;
|
||||
|
||||
|
||||
/* -card */
|
||||
|
||||
extern __printf(2, 3)
|
||||
void fw_err(const struct fw_card *card, const char *fmt, ...);
|
||||
extern __printf(2, 3)
|
||||
void fw_notice(const struct fw_card *card, const char *fmt, ...);
|
||||
|
||||
/* bitfields within the PHY registers */
|
||||
#define PHY_LINK_ACTIVE 0x80
|
||||
#define PHY_CONTENDER 0x40
|
||||
#define PHY_BUS_RESET 0x40
|
||||
#define PHY_EXTENDED_REGISTERS 0xe0
|
||||
#define PHY_BUS_SHORT_RESET 0x40
|
||||
#define PHY_INT_STATUS_BITS 0x3c
|
||||
#define PHY_ENABLE_ACCEL 0x02
|
||||
#define PHY_ENABLE_MULTI 0x01
|
||||
#define PHY_PAGE_SELECT 0xe0
|
||||
|
||||
#define BANDWIDTH_AVAILABLE_INITIAL 4915
|
||||
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
|
||||
#define BROADCAST_CHANNEL_VALID (1 << 30)
|
||||
|
||||
#define CSR_STATE_BIT_CMSTR (1 << 8)
|
||||
#define CSR_STATE_BIT_ABDICATE (1 << 10)
|
||||
|
||||
struct fw_card_driver {
|
||||
/*
|
||||
* Enable the given card with the given initial config rom.
|
||||
* This function is expected to activate the card, and either
|
||||
* enable the PHY or set the link_on bit and initiate a bus
|
||||
* reset.
|
||||
*/
|
||||
int (*enable)(struct fw_card *card,
|
||||
const __be32 *config_rom, size_t length);
|
||||
|
||||
int (*read_phy_reg)(struct fw_card *card, int address);
|
||||
int (*update_phy_reg)(struct fw_card *card, int address,
|
||||
int clear_bits, int set_bits);
|
||||
|
||||
/*
|
||||
* Update the config rom for an enabled card. This function
|
||||
* should change the config rom that is presented on the bus
|
||||
* and initiate a bus reset.
|
||||
*/
|
||||
int (*set_config_rom)(struct fw_card *card,
|
||||
const __be32 *config_rom, size_t length);
|
||||
|
||||
void (*send_request)(struct fw_card *card, struct fw_packet *packet);
|
||||
void (*send_response)(struct fw_card *card, struct fw_packet *packet);
|
||||
/* Calling cancel is valid once a packet has been submitted. */
|
||||
int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
|
||||
|
||||
/*
|
||||
* Allow the specified node ID to do direct DMA out and in of
|
||||
* host memory. The card will disable this for all node when
|
||||
* a bus reset happens, so driver need to reenable this after
|
||||
* bus reset. Returns 0 on success, -ENODEV if the card
|
||||
* doesn't support this, -ESTALE if the generation doesn't
|
||||
* match.
|
||||
*/
|
||||
int (*enable_phys_dma)(struct fw_card *card,
|
||||
int node_id, int generation);
|
||||
|
||||
u32 (*read_csr)(struct fw_card *card, int csr_offset);
|
||||
void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
|
||||
|
||||
struct fw_iso_context *
|
||||
(*allocate_iso_context)(struct fw_card *card,
|
||||
int type, int channel, size_t header_size);
|
||||
void (*free_iso_context)(struct fw_iso_context *ctx);
|
||||
|
||||
int (*start_iso)(struct fw_iso_context *ctx,
|
||||
s32 cycle, u32 sync, u32 tags);
|
||||
|
||||
int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels);
|
||||
|
||||
int (*queue_iso)(struct fw_iso_context *ctx,
|
||||
struct fw_iso_packet *packet,
|
||||
struct fw_iso_buffer *buffer,
|
||||
unsigned long payload);
|
||||
|
||||
void (*flush_queue_iso)(struct fw_iso_context *ctx);
|
||||
|
||||
int (*flush_iso_completions)(struct fw_iso_context *ctx);
|
||||
|
||||
int (*stop_iso)(struct fw_iso_context *ctx);
|
||||
};
|
||||
|
||||
void fw_card_initialize(struct fw_card *card,
|
||||
const struct fw_card_driver *driver, struct device *device);
|
||||
int fw_card_add(struct fw_card *card,
|
||||
u32 max_receive, u32 link_speed, u64 guid);
|
||||
void fw_core_remove_card(struct fw_card *card);
|
||||
int fw_compute_block_crc(__be32 *block);
|
||||
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
|
||||
|
||||
/* -cdev */
|
||||
|
||||
extern const struct file_operations fw_device_ops;
|
||||
|
||||
void fw_device_cdev_update(struct fw_device *device);
|
||||
void fw_device_cdev_remove(struct fw_device *device);
|
||||
void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
|
||||
|
||||
|
||||
/* -device */
|
||||
|
||||
extern struct rw_semaphore fw_device_rwsem;
|
||||
extern struct idr fw_device_idr;
|
||||
extern int fw_cdev_major;
|
||||
|
||||
static inline struct fw_device *fw_device_get(struct fw_device *device)
|
||||
{
|
||||
get_device(&device->device);
|
||||
|
||||
return device;
|
||||
}
|
||||
|
||||
static inline void fw_device_put(struct fw_device *device)
|
||||
{
|
||||
put_device(&device->device);
|
||||
}
|
||||
|
||||
struct fw_device *fw_device_get_by_devt(dev_t devt);
|
||||
int fw_device_set_broadcast_channel(struct device *dev, void *gen);
|
||||
void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
|
||||
|
||||
|
||||
/* -iso */
|
||||
|
||||
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
|
||||
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
|
||||
enum dma_data_direction direction);
|
||||
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
|
||||
/* -topology */
|
||||
|
||||
enum {
|
||||
FW_NODE_CREATED,
|
||||
FW_NODE_UPDATED,
|
||||
FW_NODE_DESTROYED,
|
||||
FW_NODE_LINK_ON,
|
||||
FW_NODE_LINK_OFF,
|
||||
FW_NODE_INITIATED_RESET,
|
||||
};
|
||||
|
||||
struct fw_node {
|
||||
u16 node_id;
|
||||
u8 color;
|
||||
u8 port_count;
|
||||
u8 link_on:1;
|
||||
u8 initiated_reset:1;
|
||||
u8 b_path:1;
|
||||
u8 phy_speed:2; /* As in the self ID packet. */
|
||||
u8 max_speed:2; /* Minimum of all phy-speeds on the path from the
|
||||
* local node to this node. */
|
||||
u8 max_depth:4; /* Maximum depth to any leaf node */
|
||||
u8 max_hops:4; /* Max hops in this sub tree */
|
||||
atomic_t ref_count;
|
||||
|
||||
/* For serializing node topology into a list. */
|
||||
struct list_head link;
|
||||
|
||||
/* Upper layer specific data. */
|
||||
void *data;
|
||||
|
||||
struct fw_node *ports[0];
|
||||
};
|
||||
|
||||
static inline struct fw_node *fw_node_get(struct fw_node *node)
|
||||
{
|
||||
atomic_inc(&node->ref_count);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static inline void fw_node_put(struct fw_node *node)
|
||||
{
|
||||
if (atomic_dec_and_test(&node->ref_count))
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
|
||||
int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
|
||||
void fw_destroy_nodes(struct fw_card *card);
|
||||
|
||||
/*
|
||||
* Check whether new_generation is the immediate successor of old_generation.
|
||||
* Take counter roll-over at 255 (as per OHCI) into account.
|
||||
*/
|
||||
static inline bool is_next_generation(int new_generation, int old_generation)
|
||||
{
|
||||
return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
|
||||
}
|
||||
|
||||
|
||||
/* -transaction */
|
||||
|
||||
#define TCODE_LINK_INTERNAL 0xe
|
||||
|
||||
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
|
||||
#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
|
||||
#define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL)
|
||||
#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
|
||||
#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
|
||||
#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
|
||||
#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
|
||||
|
||||
#define LOCAL_BUS 0xffc0
|
||||
|
||||
/* OHCI-1394's default upper bound for physical DMA: 4 GB */
|
||||
#define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
|
||||
|
||||
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
|
||||
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
|
||||
int fw_get_response_length(struct fw_request *request);
|
||||
void fw_fill_response(struct fw_packet *response, u32 *request_header,
|
||||
int rcode, void *payload, size_t length);
|
||||
|
||||
#define FW_PHY_CONFIG_NO_NODE_ID -1
|
||||
#define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
|
||||
void fw_send_phy_config(struct fw_card *card,
|
||||
int node_id, int generation, int gap_count);
|
||||
|
||||
static inline bool is_ping_packet(u32 *data)
|
||||
{
|
||||
return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
|
||||
}
|
||||
|
||||
#endif /* _FIREWIRE_CORE_H */
|
309
drivers/firewire/init_ohci1394_dma.c
Normal file
309
drivers/firewire/init_ohci1394_dma.c
Normal file
|
@ -0,0 +1,309 @@
|
|||
/*
|
||||
* init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers
|
||||
*
|
||||
* Copyright (C) 2006-2007 Bernhard Kaindl <bk@suse.de>
|
||||
*
|
||||
* Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c
|
||||
* this file has functions to:
|
||||
* - scan the PCI very early on boot for all OHCI 1394-compliant controllers
|
||||
* - reset and initialize them and make them join the IEEE1394 bus and
|
||||
* - enable physical DMA on them to allow remote debugging
|
||||
*
|
||||
* All code and data is marked as __init and __initdata, respective as
|
||||
* during boot, all OHCI1394 controllers may be claimed by the firewire
|
||||
* stack and at this point, this code should not touch them anymore.
|
||||
*
|
||||
* To use physical DMA after the initialization of the firewire stack,
|
||||
* be sure that the stack enables it and (re-)attach after the bus reset
|
||||
* which may be caused by the firewire stack initialization.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h> /* for PCI defines */
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <asm/pci-direct.h> /* for direct PCI config space access */
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
#include <linux/init_ohci1394_dma.h>
|
||||
#include "ohci.h"
|
||||
|
||||
int __initdata init_ohci1394_dma_early;
|
||||
|
||||
struct ohci {
|
||||
void __iomem *registers;
|
||||
};
|
||||
|
||||
static inline void reg_write(const struct ohci *ohci, int offset, u32 data)
|
||||
{
|
||||
writel(data, ohci->registers + offset);
|
||||
}
|
||||
|
||||
static inline u32 reg_read(const struct ohci *ohci, int offset)
|
||||
{
|
||||
return readl(ohci->registers + offset);
|
||||
}
|
||||
|
||||
#define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */
|
||||
|
||||
/* Reads a PHY register of an OHCI-1394 controller */
|
||||
static inline u8 __init get_phy_reg(struct ohci *ohci, u8 addr)
|
||||
{
|
||||
int i;
|
||||
u32 r;
|
||||
|
||||
reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
|
||||
|
||||
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
|
||||
if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
r = reg_read(ohci, OHCI1394_PhyControl);
|
||||
|
||||
return (r & 0x00ff0000) >> 16;
|
||||
}
|
||||
|
||||
/* Writes to a PHY register of an OHCI-1394 controller */
|
||||
static inline void __init set_phy_reg(struct ohci *ohci, u8 addr, u8 data)
|
||||
{
|
||||
int i;
|
||||
|
||||
reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
|
||||
|
||||
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
|
||||
if (!(reg_read(ohci, OHCI1394_PhyControl) & 0x00004000))
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Resets an OHCI-1394 controller (for sane state before initialization) */
|
||||
static inline void __init init_ohci1394_soft_reset(struct ohci *ohci)
|
||||
{
|
||||
int i;
|
||||
|
||||
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
|
||||
|
||||
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
|
||||
if (!(reg_read(ohci, OHCI1394_HCControlSet)
|
||||
& OHCI1394_HCControl_softReset))
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
|
||||
#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
|
||||
#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
|
||||
|
||||
/* Basic OHCI-1394 register and port inititalization */
|
||||
static inline void __init init_ohci1394_initialize(struct ohci *ohci)
|
||||
{
|
||||
u32 bus_options;
|
||||
int num_ports, i;
|
||||
|
||||
/* Put some defaults to these undefined bus options */
|
||||
bus_options = reg_read(ohci, OHCI1394_BusOptions);
|
||||
bus_options |= 0x60000000; /* Enable CMC and ISC */
|
||||
bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
|
||||
bus_options &= ~0x18000000; /* Disable PMC and BMC */
|
||||
reg_write(ohci, OHCI1394_BusOptions, bus_options);
|
||||
|
||||
/* Set the bus number */
|
||||
reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
|
||||
|
||||
/* Enable posted writes */
|
||||
reg_write(ohci, OHCI1394_HCControlSet,
|
||||
OHCI1394_HCControl_postedWriteEnable);
|
||||
|
||||
/* Clear link control register */
|
||||
reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
|
||||
|
||||
/* enable phys */
|
||||
reg_write(ohci, OHCI1394_LinkControlSet,
|
||||
OHCI1394_LinkControl_rcvPhyPkt);
|
||||
|
||||
/* Don't accept phy packets into AR request context */
|
||||
reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
|
||||
|
||||
/* Clear the Isochonouys interrupt masks */
|
||||
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
|
||||
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
|
||||
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
|
||||
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
|
||||
|
||||
/* Accept asynchronous transfer requests from all nodes for now */
|
||||
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
|
||||
|
||||
/* Specify asynchronous transfer retries */
|
||||
reg_write(ohci, OHCI1394_ATRetries,
|
||||
OHCI1394_MAX_AT_REQ_RETRIES |
|
||||
(OHCI1394_MAX_AT_RESP_RETRIES<<4) |
|
||||
(OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
|
||||
|
||||
/* We don't want hardware swapping */
|
||||
reg_write(ohci, OHCI1394_HCControlClear,
|
||||
OHCI1394_HCControl_noByteSwapData);
|
||||
|
||||
/* Enable link */
|
||||
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
|
||||
|
||||
/* If anything is connected to a port, make sure it is enabled */
|
||||
num_ports = get_phy_reg(ohci, 2) & 0xf;
|
||||
for (i = 0; i < num_ports; i++) {
|
||||
unsigned int status;
|
||||
|
||||
set_phy_reg(ohci, 7, i);
|
||||
status = get_phy_reg(ohci, 8);
|
||||
|
||||
if (status & 0x20)
|
||||
set_phy_reg(ohci, 8, status & ~1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
|
||||
*
|
||||
* OHCI1394 initialization itself and any device going on- or offline
|
||||
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
|
||||
* specifies that physical DMA is disabled on each bus reset and it
|
||||
* has to be enabled after each bus reset when needed. We resort
|
||||
* to polling here because on early boot, we have no interrupts.
|
||||
*/
|
||||
static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
|
||||
{
|
||||
int i, events;
|
||||
|
||||
for (i = 0; i < 9; i++) {
|
||||
mdelay(200);
|
||||
events = reg_read(ohci, OHCI1394_IntEventSet);
|
||||
if (events & OHCI1394_busReset)
|
||||
reg_write(ohci, OHCI1394_IntEventClear,
|
||||
OHCI1394_busReset);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
|
||||
* This enables remote DMA access over IEEE1394 from every host for the low
|
||||
* 4GB of address space. DMA accesses above 4GB are not available currently.
|
||||
*/
|
||||
static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
|
||||
{
|
||||
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 0xffffffff);
|
||||
reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 0xffffffff);
|
||||
reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
|
||||
}
|
||||
|
||||
/**
|
||||
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
|
||||
* This initializes the given controller and enables physical DMA engine in it.
|
||||
*/
|
||||
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
|
||||
{
|
||||
/* Start off with a soft reset, clears everything to a sane state. */
|
||||
init_ohci1394_soft_reset(ohci);
|
||||
|
||||
/* Accessing some registers without LPS enabled may cause lock up */
|
||||
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
|
||||
|
||||
/* Disable and clear interrupts */
|
||||
reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
|
||||
reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
|
||||
|
||||
mdelay(50); /* Wait 50msec to make sure we have full link enabled */
|
||||
|
||||
init_ohci1394_initialize(ohci);
|
||||
/*
|
||||
* The initialization causes at least one IEEE1394 bus reset. Enabling
|
||||
* physical DMA only works *after* *all* bus resets have calmed down:
|
||||
*/
|
||||
init_ohci1394_wait_for_busresets(ohci);
|
||||
|
||||
/* We had to wait and do this now if we want to debug early problems */
|
||||
init_ohci1394_enable_physical_dma(ohci);
|
||||
}
|
||||
|
||||
/**
|
||||
* init_ohci1394_controller - Map the registers of the controller and init DMA
|
||||
* This maps the registers of the specified controller and initializes it
|
||||
*/
|
||||
static inline void __init init_ohci1394_controller(int num, int slot, int func)
|
||||
{
|
||||
unsigned long ohci_base;
|
||||
struct ohci ohci;
|
||||
|
||||
printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394"
|
||||
" at %02x:%02x.%x\n", num, slot, func);
|
||||
|
||||
ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2))
|
||||
& PCI_BASE_ADDRESS_MEM_MASK;
|
||||
|
||||
set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base);
|
||||
|
||||
ohci.registers = (void __iomem *)fix_to_virt(FIX_OHCI1394_BASE);
|
||||
|
||||
init_ohci1394_reset_and_init_dma(&ohci);
|
||||
}
|
||||
|
||||
/**
|
||||
* debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them
|
||||
* Scans the whole PCI space for OHCI1394 controllers and inits DMA on them
|
||||
*/
|
||||
void __init init_ohci1394_dma_on_all_controllers(void)
|
||||
{
|
||||
int num, slot, func;
|
||||
u32 class;
|
||||
|
||||
if (!early_pci_allowed())
|
||||
return;
|
||||
|
||||
/* Poor man's PCI discovery, the only thing we can do at early boot */
|
||||
for (num = 0; num < 32; num++) {
|
||||
for (slot = 0; slot < 32; slot++) {
|
||||
for (func = 0; func < 8; func++) {
|
||||
class = read_pci_config(num, slot, func,
|
||||
PCI_CLASS_REVISION);
|
||||
if (class == 0xffffffff)
|
||||
continue; /* No device at this func */
|
||||
|
||||
if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
|
||||
continue; /* Not an OHCI-1394 device */
|
||||
|
||||
init_ohci1394_controller(num, slot, func);
|
||||
break; /* Assume one controller per device */
|
||||
}
|
||||
}
|
||||
}
|
||||
printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* setup_init_ohci1394_early - enables early OHCI1394 DMA initialization
|
||||
*/
|
||||
static int __init setup_ohci1394_dma(char *opt)
|
||||
{
|
||||
if (!strcmp(opt, "early"))
|
||||
init_ohci1394_dma_early = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */
|
||||
early_param("ohci1394_dma", setup_ohci1394_dma);
|
1720
drivers/firewire/net.c
Normal file
1720
drivers/firewire/net.c
Normal file
File diff suppressed because it is too large
Load diff
25
drivers/firewire/nosy-user.h
Normal file
25
drivers/firewire/nosy-user.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
#ifndef __nosy_user_h
|
||||
#define __nosy_user_h
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define NOSY_IOC_GET_STATS _IOR('&', 0, struct nosy_stats)
|
||||
#define NOSY_IOC_START _IO('&', 1)
|
||||
#define NOSY_IOC_STOP _IO('&', 2)
|
||||
#define NOSY_IOC_FILTER _IOW('&', 2, __u32)
|
||||
|
||||
struct nosy_stats {
|
||||
__u32 total_packet_count;
|
||||
__u32 lost_packet_count;
|
||||
};
|
||||
|
||||
/*
|
||||
* Format of packets returned from the kernel driver:
|
||||
*
|
||||
* quadlet with timestamp (microseconds, CPU endian)
|
||||
* quadlet-padded packet data... (little endian)
|
||||
* quadlet with ack (little endian)
|
||||
*/
|
||||
|
||||
#endif /* __nosy_user_h */
|
709
drivers/firewire/nosy.c
Normal file
709
drivers/firewire/nosy.c
Normal file
|
@ -0,0 +1,709 @@
|
|||
/*
|
||||
* nosy - Snoop mode driver for TI PCILynx 1394 controllers
|
||||
* Copyright (C) 2002-2007 Kristian Høgsberg
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/sched.h> /* required for linux/wait.h */
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#include "nosy.h"
|
||||
#include "nosy-user.h"
|
||||
|
||||
#define TCODE_PHY_PACKET 0x10
|
||||
#define PCI_DEVICE_ID_TI_PCILYNX 0x8000
|
||||
|
||||
static char driver_name[] = KBUILD_MODNAME;
|
||||
|
||||
/* this is the physical layout of a PCL, its size is 128 bytes */
|
||||
struct pcl {
|
||||
__le32 next;
|
||||
__le32 async_error_next;
|
||||
u32 user_data;
|
||||
__le32 pcl_status;
|
||||
__le32 remaining_transfer_count;
|
||||
__le32 next_data_buffer;
|
||||
struct {
|
||||
__le32 control;
|
||||
__le32 pointer;
|
||||
} buffer[13];
|
||||
};
|
||||
|
||||
struct packet {
|
||||
unsigned int length;
|
||||
char data[0];
|
||||
};
|
||||
|
||||
struct packet_buffer {
|
||||
char *data;
|
||||
size_t capacity;
|
||||
long total_packet_count, lost_packet_count;
|
||||
atomic_t size;
|
||||
struct packet *head, *tail;
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
||||
struct pcilynx {
|
||||
struct pci_dev *pci_device;
|
||||
__iomem char *registers;
|
||||
|
||||
struct pcl *rcv_start_pcl, *rcv_pcl;
|
||||
__le32 *rcv_buffer;
|
||||
|
||||
dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
|
||||
|
||||
spinlock_t client_list_lock;
|
||||
struct list_head client_list;
|
||||
|
||||
struct miscdevice misc;
|
||||
struct list_head link;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
static inline struct pcilynx *
|
||||
lynx_get(struct pcilynx *lynx)
|
||||
{
|
||||
kref_get(&lynx->kref);
|
||||
|
||||
return lynx;
|
||||
}
|
||||
|
||||
static void
|
||||
lynx_release(struct kref *kref)
|
||||
{
|
||||
kfree(container_of(kref, struct pcilynx, kref));
|
||||
}
|
||||
|
||||
static inline void
|
||||
lynx_put(struct pcilynx *lynx)
|
||||
{
|
||||
kref_put(&lynx->kref, lynx_release);
|
||||
}
|
||||
|
||||
struct client {
|
||||
struct pcilynx *lynx;
|
||||
u32 tcode_mask;
|
||||
struct packet_buffer buffer;
|
||||
struct list_head link;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(card_mutex);
|
||||
static LIST_HEAD(card_list);
|
||||
|
||||
static int
|
||||
packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
|
||||
{
|
||||
buffer->data = kmalloc(capacity, GFP_KERNEL);
|
||||
if (buffer->data == NULL)
|
||||
return -ENOMEM;
|
||||
buffer->head = (struct packet *) buffer->data;
|
||||
buffer->tail = (struct packet *) buffer->data;
|
||||
buffer->capacity = capacity;
|
||||
buffer->lost_packet_count = 0;
|
||||
atomic_set(&buffer->size, 0);
|
||||
init_waitqueue_head(&buffer->wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
packet_buffer_destroy(struct packet_buffer *buffer)
|
||||
{
|
||||
kfree(buffer->data);
|
||||
}
|
||||
|
||||
static int
|
||||
packet_buffer_get(struct client *client, char __user *data, size_t user_length)
|
||||
{
|
||||
struct packet_buffer *buffer = &client->buffer;
|
||||
size_t length;
|
||||
char *end;
|
||||
|
||||
if (wait_event_interruptible(buffer->wait,
|
||||
atomic_read(&buffer->size) > 0) ||
|
||||
list_empty(&client->lynx->link))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
if (atomic_read(&buffer->size) == 0)
|
||||
return -ENODEV;
|
||||
|
||||
/* FIXME: Check length <= user_length. */
|
||||
|
||||
end = buffer->data + buffer->capacity;
|
||||
length = buffer->head->length;
|
||||
|
||||
if (&buffer->head->data[length] < end) {
|
||||
if (copy_to_user(data, buffer->head->data, length))
|
||||
return -EFAULT;
|
||||
buffer->head = (struct packet *) &buffer->head->data[length];
|
||||
} else {
|
||||
size_t split = end - buffer->head->data;
|
||||
|
||||
if (copy_to_user(data, buffer->head->data, split))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(data + split, buffer->data, length - split))
|
||||
return -EFAULT;
|
||||
buffer->head = (struct packet *) &buffer->data[length - split];
|
||||
}
|
||||
|
||||
/*
|
||||
* Decrease buffer->size as the last thing, since this is what
|
||||
* keeps the interrupt from overwriting the packet we are
|
||||
* retrieving from the buffer.
|
||||
*/
|
||||
atomic_sub(sizeof(struct packet) + length, &buffer->size);
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
static void
|
||||
packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
|
||||
{
|
||||
char *end;
|
||||
|
||||
buffer->total_packet_count++;
|
||||
|
||||
if (buffer->capacity <
|
||||
atomic_read(&buffer->size) + sizeof(struct packet) + length) {
|
||||
buffer->lost_packet_count++;
|
||||
return;
|
||||
}
|
||||
|
||||
end = buffer->data + buffer->capacity;
|
||||
buffer->tail->length = length;
|
||||
|
||||
if (&buffer->tail->data[length] < end) {
|
||||
memcpy(buffer->tail->data, data, length);
|
||||
buffer->tail = (struct packet *) &buffer->tail->data[length];
|
||||
} else {
|
||||
size_t split = end - buffer->tail->data;
|
||||
|
||||
memcpy(buffer->tail->data, data, split);
|
||||
memcpy(buffer->data, data + split, length - split);
|
||||
buffer->tail = (struct packet *) &buffer->data[length - split];
|
||||
}
|
||||
|
||||
/* Finally, adjust buffer size and wake up userspace reader. */
|
||||
|
||||
atomic_add(sizeof(struct packet) + length, &buffer->size);
|
||||
wake_up_interruptible(&buffer->wait);
|
||||
}
|
||||
|
||||
static inline void
|
||||
reg_write(struct pcilynx *lynx, int offset, u32 data)
|
||||
{
|
||||
writel(data, lynx->registers + offset);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
reg_read(struct pcilynx *lynx, int offset)
|
||||
{
|
||||
return readl(lynx->registers + offset);
|
||||
}
|
||||
|
||||
static inline void
|
||||
reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
|
||||
{
|
||||
reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
|
||||
}
|
||||
|
||||
/*
|
||||
* Maybe the pcl programs could be set up to just append data instead
|
||||
* of using a whole packet.
|
||||
*/
|
||||
static inline void
|
||||
run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
|
||||
int dmachan)
|
||||
{
|
||||
reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
|
||||
reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
|
||||
DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
|
||||
}
|
||||
|
||||
static int
|
||||
set_phy_reg(struct pcilynx *lynx, int addr, int val)
|
||||
{
|
||||
if (addr > 15) {
|
||||
dev_err(&lynx->pci_device->dev,
|
||||
"PHY register address %d out of range\n", addr);
|
||||
return -1;
|
||||
}
|
||||
if (val > 0xff) {
|
||||
dev_err(&lynx->pci_device->dev,
|
||||
"PHY register value %d out of range\n", val);
|
||||
return -1;
|
||||
}
|
||||
reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
|
||||
LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nosy_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int minor = iminor(inode);
|
||||
struct client *client;
|
||||
struct pcilynx *tmp, *lynx = NULL;
|
||||
|
||||
mutex_lock(&card_mutex);
|
||||
list_for_each_entry(tmp, &card_list, link)
|
||||
if (tmp->misc.minor == minor) {
|
||||
lynx = lynx_get(tmp);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&card_mutex);
|
||||
if (lynx == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
client = kmalloc(sizeof *client, GFP_KERNEL);
|
||||
if (client == NULL)
|
||||
goto fail;
|
||||
|
||||
client->tcode_mask = ~0;
|
||||
client->lynx = lynx;
|
||||
INIT_LIST_HEAD(&client->link);
|
||||
|
||||
if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
|
||||
goto fail;
|
||||
|
||||
file->private_data = client;
|
||||
|
||||
return nonseekable_open(inode, file);
|
||||
fail:
|
||||
kfree(client);
|
||||
lynx_put(lynx);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int
|
||||
nosy_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct client *client = file->private_data;
|
||||
struct pcilynx *lynx = client->lynx;
|
||||
|
||||
spin_lock_irq(&lynx->client_list_lock);
|
||||
list_del_init(&client->link);
|
||||
spin_unlock_irq(&lynx->client_list_lock);
|
||||
|
||||
packet_buffer_destroy(&client->buffer);
|
||||
kfree(client);
|
||||
lynx_put(lynx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
nosy_poll(struct file *file, poll_table *pt)
|
||||
{
|
||||
struct client *client = file->private_data;
|
||||
unsigned int ret = 0;
|
||||
|
||||
poll_wait(file, &client->buffer.wait, pt);
|
||||
|
||||
if (atomic_read(&client->buffer.size) > 0)
|
||||
ret = POLLIN | POLLRDNORM;
|
||||
|
||||
if (list_empty(&client->lynx->link))
|
||||
ret |= POLLHUP;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
|
||||
{
|
||||
struct client *client = file->private_data;
|
||||
|
||||
return packet_buffer_get(client, buffer, count);
|
||||
}
|
||||
|
||||
static long
|
||||
nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct client *client = file->private_data;
|
||||
spinlock_t *client_list_lock = &client->lynx->client_list_lock;
|
||||
struct nosy_stats stats;
|
||||
|
||||
switch (cmd) {
|
||||
case NOSY_IOC_GET_STATS:
|
||||
spin_lock_irq(client_list_lock);
|
||||
stats.total_packet_count = client->buffer.total_packet_count;
|
||||
stats.lost_packet_count = client->buffer.lost_packet_count;
|
||||
spin_unlock_irq(client_list_lock);
|
||||
|
||||
if (copy_to_user((void __user *) arg, &stats, sizeof stats))
|
||||
return -EFAULT;
|
||||
else
|
||||
return 0;
|
||||
|
||||
case NOSY_IOC_START:
|
||||
spin_lock_irq(client_list_lock);
|
||||
list_add_tail(&client->link, &client->lynx->client_list);
|
||||
spin_unlock_irq(client_list_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
case NOSY_IOC_STOP:
|
||||
spin_lock_irq(client_list_lock);
|
||||
list_del_init(&client->link);
|
||||
spin_unlock_irq(client_list_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
case NOSY_IOC_FILTER:
|
||||
spin_lock_irq(client_list_lock);
|
||||
client->tcode_mask = arg;
|
||||
spin_unlock_irq(client_list_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
/* Flush buffer, configure filter. */
|
||||
}
|
||||
}
|
||||
|
||||
static const struct file_operations nosy_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = nosy_read,
|
||||
.unlocked_ioctl = nosy_ioctl,
|
||||
.poll = nosy_poll,
|
||||
.open = nosy_open,
|
||||
.release = nosy_release,
|
||||
};
|
||||
|
||||
#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
|
||||
|
||||
static void
|
||||
packet_irq_handler(struct pcilynx *lynx)
|
||||
{
|
||||
struct client *client;
|
||||
u32 tcode_mask, tcode;
|
||||
size_t length;
|
||||
struct timeval tv;
|
||||
|
||||
/* FIXME: Also report rcv_speed. */
|
||||
|
||||
length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
|
||||
tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
|
||||
|
||||
do_gettimeofday(&tv);
|
||||
lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec;
|
||||
|
||||
if (length == PHY_PACKET_SIZE)
|
||||
tcode_mask = 1 << TCODE_PHY_PACKET;
|
||||
else
|
||||
tcode_mask = 1 << tcode;
|
||||
|
||||
spin_lock(&lynx->client_list_lock);
|
||||
|
||||
list_for_each_entry(client, &lynx->client_list, link)
|
||||
if (client->tcode_mask & tcode_mask)
|
||||
packet_buffer_put(&client->buffer,
|
||||
lynx->rcv_buffer, length + 4);
|
||||
|
||||
spin_unlock(&lynx->client_list_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
bus_reset_irq_handler(struct pcilynx *lynx)
|
||||
{
|
||||
struct client *client;
|
||||
struct timeval tv;
|
||||
|
||||
do_gettimeofday(&tv);
|
||||
|
||||
spin_lock(&lynx->client_list_lock);
|
||||
|
||||
list_for_each_entry(client, &lynx->client_list, link)
|
||||
packet_buffer_put(&client->buffer, &tv.tv_usec, 4);
|
||||
|
||||
spin_unlock(&lynx->client_list_lock);
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
irq_handler(int irq, void *device)
|
||||
{
|
||||
struct pcilynx *lynx = device;
|
||||
u32 pci_int_status;
|
||||
|
||||
pci_int_status = reg_read(lynx, PCI_INT_STATUS);
|
||||
|
||||
if (pci_int_status == ~0)
|
||||
/* Card was ejected. */
|
||||
return IRQ_NONE;
|
||||
|
||||
if ((pci_int_status & PCI_INT_INT_PEND) == 0)
|
||||
/* Not our interrupt, bail out quickly. */
|
||||
return IRQ_NONE;
|
||||
|
||||
if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
|
||||
u32 link_int_status;
|
||||
|
||||
link_int_status = reg_read(lynx, LINK_INT_STATUS);
|
||||
reg_write(lynx, LINK_INT_STATUS, link_int_status);
|
||||
|
||||
if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
|
||||
bus_reset_irq_handler(lynx);
|
||||
}
|
||||
|
||||
/* Clear the PCI_INT_STATUS register only after clearing the
|
||||
* LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
|
||||
* be set again immediately. */
|
||||
|
||||
reg_write(lynx, PCI_INT_STATUS, pci_int_status);
|
||||
|
||||
if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
|
||||
packet_irq_handler(lynx);
|
||||
run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void
|
||||
remove_card(struct pci_dev *dev)
|
||||
{
|
||||
struct pcilynx *lynx = pci_get_drvdata(dev);
|
||||
struct client *client;
|
||||
|
||||
mutex_lock(&card_mutex);
|
||||
list_del_init(&lynx->link);
|
||||
misc_deregister(&lynx->misc);
|
||||
mutex_unlock(&card_mutex);
|
||||
|
||||
reg_write(lynx, PCI_INT_ENABLE, 0);
|
||||
free_irq(lynx->pci_device->irq, lynx);
|
||||
|
||||
spin_lock_irq(&lynx->client_list_lock);
|
||||
list_for_each_entry(client, &lynx->client_list, link)
|
||||
wake_up_interruptible(&client->buffer.wait);
|
||||
spin_unlock_irq(&lynx->client_list_lock);
|
||||
|
||||
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
|
||||
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
|
||||
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
|
||||
lynx->rcv_pcl, lynx->rcv_pcl_bus);
|
||||
pci_free_consistent(lynx->pci_device, PAGE_SIZE,
|
||||
lynx->rcv_buffer, lynx->rcv_buffer_bus);
|
||||
|
||||
iounmap(lynx->registers);
|
||||
pci_disable_device(dev);
|
||||
lynx_put(lynx);
|
||||
}
|
||||
|
||||
#define RCV_BUFFER_SIZE (16 * 1024)
|
||||
|
||||
static int
|
||||
add_card(struct pci_dev *dev, const struct pci_device_id *unused)
|
||||
{
|
||||
struct pcilynx *lynx;
|
||||
u32 p, end;
|
||||
int ret, i;
|
||||
|
||||
if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
|
||||
dev_err(&dev->dev,
|
||||
"DMA address limits not supported for PCILynx hardware\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
if (pci_enable_device(dev)) {
|
||||
dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
pci_set_master(dev);
|
||||
|
||||
lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
|
||||
if (lynx == NULL) {
|
||||
dev_err(&dev->dev, "Failed to allocate control structure\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail_disable;
|
||||
}
|
||||
lynx->pci_device = dev;
|
||||
pci_set_drvdata(dev, lynx);
|
||||
|
||||
spin_lock_init(&lynx->client_list_lock);
|
||||
INIT_LIST_HEAD(&lynx->client_list);
|
||||
kref_init(&lynx->kref);
|
||||
|
||||
lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
|
||||
PCILYNX_MAX_REGISTER);
|
||||
|
||||
lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
|
||||
sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
|
||||
lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
|
||||
sizeof(struct pcl), &lynx->rcv_pcl_bus);
|
||||
lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
|
||||
RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
|
||||
if (lynx->rcv_start_pcl == NULL ||
|
||||
lynx->rcv_pcl == NULL ||
|
||||
lynx->rcv_buffer == NULL) {
|
||||
dev_err(&dev->dev, "Failed to allocate receive buffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail_deallocate;
|
||||
}
|
||||
lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
|
||||
lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
|
||||
lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
|
||||
|
||||
lynx->rcv_pcl->buffer[0].control =
|
||||
cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
|
||||
lynx->rcv_pcl->buffer[0].pointer =
|
||||
cpu_to_le32(lynx->rcv_buffer_bus + 4);
|
||||
p = lynx->rcv_buffer_bus + 2048;
|
||||
end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
|
||||
for (i = 1; p < end; i++, p += 2048) {
|
||||
lynx->rcv_pcl->buffer[i].control =
|
||||
cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
|
||||
lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
|
||||
}
|
||||
lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
|
||||
|
||||
reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
|
||||
/* Fix buggy cards with autoboot pin not tied low: */
|
||||
reg_write(lynx, DMA0_CHAN_CTRL, 0);
|
||||
reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
|
||||
|
||||
#if 0
|
||||
/* now, looking for PHY register set */
|
||||
if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
|
||||
lynx->phyic.reg_1394a = 1;
|
||||
PRINT(KERN_INFO, lynx->id,
|
||||
"found 1394a conform PHY (using extended register set)");
|
||||
lynx->phyic.vendor = get_phy_vendorid(lynx);
|
||||
lynx->phyic.product = get_phy_productid(lynx);
|
||||
} else {
|
||||
lynx->phyic.reg_1394a = 0;
|
||||
PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Setup the general receive FIFO max size. */
|
||||
reg_write(lynx, FIFO_SIZES, 255);
|
||||
|
||||
reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
|
||||
|
||||
reg_write(lynx, LINK_INT_ENABLE,
|
||||
LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
|
||||
LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
|
||||
LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
|
||||
LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
|
||||
LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
|
||||
|
||||
/* Disable the L flag in self ID packets. */
|
||||
set_phy_reg(lynx, 4, 0);
|
||||
|
||||
/* Put this baby into snoop mode */
|
||||
reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
|
||||
|
||||
run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
|
||||
|
||||
if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
|
||||
driver_name, lynx)) {
|
||||
dev_err(&dev->dev,
|
||||
"Failed to allocate shared interrupt %d\n", dev->irq);
|
||||
ret = -EIO;
|
||||
goto fail_deallocate;
|
||||
}
|
||||
|
||||
lynx->misc.parent = &dev->dev;
|
||||
lynx->misc.minor = MISC_DYNAMIC_MINOR;
|
||||
lynx->misc.name = "nosy";
|
||||
lynx->misc.fops = &nosy_ops;
|
||||
|
||||
mutex_lock(&card_mutex);
|
||||
ret = misc_register(&lynx->misc);
|
||||
if (ret) {
|
||||
dev_err(&dev->dev, "Failed to register misc char device\n");
|
||||
mutex_unlock(&card_mutex);
|
||||
goto fail_free_irq;
|
||||
}
|
||||
list_add_tail(&lynx->link, &card_list);
|
||||
mutex_unlock(&card_mutex);
|
||||
|
||||
dev_info(&dev->dev,
|
||||
"Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_free_irq:
|
||||
reg_write(lynx, PCI_INT_ENABLE, 0);
|
||||
free_irq(lynx->pci_device->irq, lynx);
|
||||
|
||||
fail_deallocate:
|
||||
if (lynx->rcv_start_pcl)
|
||||
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
|
||||
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
|
||||
if (lynx->rcv_pcl)
|
||||
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
|
||||
lynx->rcv_pcl, lynx->rcv_pcl_bus);
|
||||
if (lynx->rcv_buffer)
|
||||
pci_free_consistent(lynx->pci_device, PAGE_SIZE,
|
||||
lynx->rcv_buffer, lynx->rcv_buffer_bus);
|
||||
iounmap(lynx->registers);
|
||||
kfree(lynx);
|
||||
|
||||
fail_disable:
|
||||
pci_disable_device(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct pci_device_id pci_table[] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_TI,
|
||||
.device = PCI_DEVICE_ID_TI_PCILYNX,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
},
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, pci_table);
|
||||
|
||||
static struct pci_driver lynx_pci_driver = {
|
||||
.name = driver_name,
|
||||
.id_table = pci_table,
|
||||
.probe = add_card,
|
||||
.remove = remove_card,
|
||||
};
|
||||
|
||||
module_pci_driver(lynx_pci_driver);
|
||||
|
||||
MODULE_AUTHOR("Kristian Hoegsberg");
|
||||
MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
|
||||
MODULE_LICENSE("GPL");
|
237
drivers/firewire/nosy.h
Normal file
237
drivers/firewire/nosy.h
Normal file
|
@ -0,0 +1,237 @@
|
|||
/*
|
||||
* Chip register definitions for PCILynx chipset. Based on pcilynx.h
|
||||
* from the Linux 1394 drivers, but modified a bit so the names here
|
||||
* match the specification exactly (even though they have weird names,
|
||||
* like xxx_OVER_FLOW, or arbitrary abbreviations like SNTRJ for "sent
|
||||
* reject" etc.)
|
||||
*/
|
||||
|
||||
#define PCILYNX_MAX_REGISTER 0xfff
|
||||
#define PCILYNX_MAX_MEMORY 0xffff
|
||||
|
||||
#define PCI_LATENCY_CACHELINE 0x0c
|
||||
|
||||
#define MISC_CONTROL 0x40
|
||||
#define MISC_CONTROL_SWRESET (1<<0)
|
||||
|
||||
#define SERIAL_EEPROM_CONTROL 0x44
|
||||
|
||||
#define PCI_INT_STATUS 0x48
|
||||
#define PCI_INT_ENABLE 0x4c
|
||||
/* status and enable have identical bit numbers */
|
||||
#define PCI_INT_INT_PEND (1<<31)
|
||||
#define PCI_INT_FRC_INT (1<<30)
|
||||
#define PCI_INT_SLV_ADR_PERR (1<<28)
|
||||
#define PCI_INT_SLV_DAT_PERR (1<<27)
|
||||
#define PCI_INT_MST_DAT_PERR (1<<26)
|
||||
#define PCI_INT_MST_DEV_TO (1<<25)
|
||||
#define PCI_INT_INT_SLV_TO (1<<23)
|
||||
#define PCI_INT_AUX_TO (1<<18)
|
||||
#define PCI_INT_AUX_INT (1<<17)
|
||||
#define PCI_INT_P1394_INT (1<<16)
|
||||
#define PCI_INT_DMA4_PCL (1<<9)
|
||||
#define PCI_INT_DMA4_HLT (1<<8)
|
||||
#define PCI_INT_DMA3_PCL (1<<7)
|
||||
#define PCI_INT_DMA3_HLT (1<<6)
|
||||
#define PCI_INT_DMA2_PCL (1<<5)
|
||||
#define PCI_INT_DMA2_HLT (1<<4)
|
||||
#define PCI_INT_DMA1_PCL (1<<3)
|
||||
#define PCI_INT_DMA1_HLT (1<<2)
|
||||
#define PCI_INT_DMA0_PCL (1<<1)
|
||||
#define PCI_INT_DMA0_HLT (1<<0)
|
||||
/* all DMA interrupts combined: */
|
||||
#define PCI_INT_DMA_ALL 0x3ff
|
||||
|
||||
#define PCI_INT_DMA_HLT(chan) (1 << (chan * 2))
|
||||
#define PCI_INT_DMA_PCL(chan) (1 << (chan * 2 + 1))
|
||||
|
||||
#define LBUS_ADDR 0xb4
|
||||
#define LBUS_ADDR_SEL_RAM (0x0<<16)
|
||||
#define LBUS_ADDR_SEL_ROM (0x1<<16)
|
||||
#define LBUS_ADDR_SEL_AUX (0x2<<16)
|
||||
#define LBUS_ADDR_SEL_ZV (0x3<<16)
|
||||
|
||||
#define GPIO_CTRL_A 0xb8
|
||||
#define GPIO_CTRL_B 0xbc
|
||||
#define GPIO_DATA_BASE 0xc0
|
||||
|
||||
#define DMA_BREG(base, chan) (base + chan * 0x20)
|
||||
#define DMA_SREG(base, chan) (base + chan * 0x10)
|
||||
|
||||
#define PCL_NEXT_INVALID (1<<0)
|
||||
|
||||
/* transfer commands */
|
||||
#define PCL_CMD_RCV (0x1<<24)
|
||||
#define PCL_CMD_RCV_AND_UPDATE (0xa<<24)
|
||||
#define PCL_CMD_XMT (0x2<<24)
|
||||
#define PCL_CMD_UNFXMT (0xc<<24)
|
||||
#define PCL_CMD_PCI_TO_LBUS (0x8<<24)
|
||||
#define PCL_CMD_LBUS_TO_PCI (0x9<<24)
|
||||
|
||||
/* aux commands */
|
||||
#define PCL_CMD_NOP (0x0<<24)
|
||||
#define PCL_CMD_LOAD (0x3<<24)
|
||||
#define PCL_CMD_STOREQ (0x4<<24)
|
||||
#define PCL_CMD_STORED (0xb<<24)
|
||||
#define PCL_CMD_STORE0 (0x5<<24)
|
||||
#define PCL_CMD_STORE1 (0x6<<24)
|
||||
#define PCL_CMD_COMPARE (0xe<<24)
|
||||
#define PCL_CMD_SWAP_COMPARE (0xf<<24)
|
||||
#define PCL_CMD_ADD (0xd<<24)
|
||||
#define PCL_CMD_BRANCH (0x7<<24)
|
||||
|
||||
/* BRANCH condition codes */
|
||||
#define PCL_COND_DMARDY_SET (0x1<<20)
|
||||
#define PCL_COND_DMARDY_CLEAR (0x2<<20)
|
||||
|
||||
#define PCL_GEN_INTR (1<<19)
|
||||
#define PCL_LAST_BUFF (1<<18)
|
||||
#define PCL_LAST_CMD (PCL_LAST_BUFF)
|
||||
#define PCL_WAITSTAT (1<<17)
|
||||
#define PCL_BIGENDIAN (1<<16)
|
||||
#define PCL_ISOMODE (1<<12)
|
||||
|
||||
#define DMA0_PREV_PCL 0x100
|
||||
#define DMA1_PREV_PCL 0x120
|
||||
#define DMA2_PREV_PCL 0x140
|
||||
#define DMA3_PREV_PCL 0x160
|
||||
#define DMA4_PREV_PCL 0x180
|
||||
#define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan))
|
||||
|
||||
#define DMA0_CURRENT_PCL 0x104
|
||||
#define DMA1_CURRENT_PCL 0x124
|
||||
#define DMA2_CURRENT_PCL 0x144
|
||||
#define DMA3_CURRENT_PCL 0x164
|
||||
#define DMA4_CURRENT_PCL 0x184
|
||||
#define DMA_CURRENT_PCL(chan) (DMA_BREG(DMA0_CURRENT_PCL, chan))
|
||||
|
||||
#define DMA0_CHAN_STAT 0x10c
|
||||
#define DMA1_CHAN_STAT 0x12c
|
||||
#define DMA2_CHAN_STAT 0x14c
|
||||
#define DMA3_CHAN_STAT 0x16c
|
||||
#define DMA4_CHAN_STAT 0x18c
|
||||
#define DMA_CHAN_STAT(chan) (DMA_BREG(DMA0_CHAN_STAT, chan))
|
||||
/* CHAN_STATUS registers share bits */
|
||||
#define DMA_CHAN_STAT_SELFID (1<<31)
|
||||
#define DMA_CHAN_STAT_ISOPKT (1<<30)
|
||||
#define DMA_CHAN_STAT_PCIERR (1<<29)
|
||||
#define DMA_CHAN_STAT_PKTERR (1<<28)
|
||||
#define DMA_CHAN_STAT_PKTCMPL (1<<27)
|
||||
#define DMA_CHAN_STAT_SPECIALACK (1<<14)
|
||||
|
||||
#define DMA0_CHAN_CTRL 0x110
|
||||
#define DMA1_CHAN_CTRL 0x130
|
||||
#define DMA2_CHAN_CTRL 0x150
|
||||
#define DMA3_CHAN_CTRL 0x170
|
||||
#define DMA4_CHAN_CTRL 0x190
|
||||
#define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan))
|
||||
/* CHAN_CTRL registers share bits */
|
||||
#define DMA_CHAN_CTRL_ENABLE (1<<31)
|
||||
#define DMA_CHAN_CTRL_BUSY (1<<30)
|
||||
#define DMA_CHAN_CTRL_LINK (1<<29)
|
||||
|
||||
#define DMA0_READY 0x114
|
||||
#define DMA1_READY 0x134
|
||||
#define DMA2_READY 0x154
|
||||
#define DMA3_READY 0x174
|
||||
#define DMA4_READY 0x194
|
||||
#define DMA_READY(chan) (DMA_BREG(DMA0_READY, chan))
|
||||
|
||||
#define DMA_GLOBAL_REGISTER 0x908
|
||||
|
||||
#define FIFO_SIZES 0xa00
|
||||
|
||||
#define FIFO_CONTROL 0xa10
|
||||
#define FIFO_CONTROL_GRF_FLUSH (1<<4)
|
||||
#define FIFO_CONTROL_ITF_FLUSH (1<<3)
|
||||
#define FIFO_CONTROL_ATF_FLUSH (1<<2)
|
||||
|
||||
#define FIFO_XMIT_THRESHOLD 0xa14
|
||||
|
||||
#define DMA0_WORD0_CMP_VALUE 0xb00
|
||||
#define DMA1_WORD0_CMP_VALUE 0xb10
|
||||
#define DMA2_WORD0_CMP_VALUE 0xb20
|
||||
#define DMA3_WORD0_CMP_VALUE 0xb30
|
||||
#define DMA4_WORD0_CMP_VALUE 0xb40
|
||||
#define DMA_WORD0_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD0_CMP_VALUE, chan))
|
||||
|
||||
#define DMA0_WORD0_CMP_ENABLE 0xb04
|
||||
#define DMA1_WORD0_CMP_ENABLE 0xb14
|
||||
#define DMA2_WORD0_CMP_ENABLE 0xb24
|
||||
#define DMA3_WORD0_CMP_ENABLE 0xb34
|
||||
#define DMA4_WORD0_CMP_ENABLE 0xb44
|
||||
#define DMA_WORD0_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD0_CMP_ENABLE, chan))
|
||||
|
||||
#define DMA0_WORD1_CMP_VALUE 0xb08
|
||||
#define DMA1_WORD1_CMP_VALUE 0xb18
|
||||
#define DMA2_WORD1_CMP_VALUE 0xb28
|
||||
#define DMA3_WORD1_CMP_VALUE 0xb38
|
||||
#define DMA4_WORD1_CMP_VALUE 0xb48
|
||||
#define DMA_WORD1_CMP_VALUE(chan) (DMA_SREG(DMA0_WORD1_CMP_VALUE, chan))
|
||||
|
||||
#define DMA0_WORD1_CMP_ENABLE 0xb0c
|
||||
#define DMA1_WORD1_CMP_ENABLE 0xb1c
|
||||
#define DMA2_WORD1_CMP_ENABLE 0xb2c
|
||||
#define DMA3_WORD1_CMP_ENABLE 0xb3c
|
||||
#define DMA4_WORD1_CMP_ENABLE 0xb4c
|
||||
#define DMA_WORD1_CMP_ENABLE(chan) (DMA_SREG(DMA0_WORD1_CMP_ENABLE, chan))
|
||||
/* word 1 compare enable flags */
|
||||
#define DMA_WORD1_CMP_MATCH_OTHERBUS (1<<15)
|
||||
#define DMA_WORD1_CMP_MATCH_BROADCAST (1<<14)
|
||||
#define DMA_WORD1_CMP_MATCH_BUS_BCAST (1<<13)
|
||||
#define DMA_WORD1_CMP_MATCH_LOCAL_NODE (1<<12)
|
||||
#define DMA_WORD1_CMP_MATCH_EXACT (1<<11)
|
||||
#define DMA_WORD1_CMP_ENABLE_SELF_ID (1<<10)
|
||||
#define DMA_WORD1_CMP_ENABLE_MASTER (1<<8)
|
||||
|
||||
#define LINK_ID 0xf00
|
||||
#define LINK_ID_BUS(id) (id<<22)
|
||||
#define LINK_ID_NODE(id) (id<<16)
|
||||
|
||||
#define LINK_CONTROL 0xf04
|
||||
#define LINK_CONTROL_BUSY (1<<29)
|
||||
#define LINK_CONTROL_TX_ISO_EN (1<<26)
|
||||
#define LINK_CONTROL_RX_ISO_EN (1<<25)
|
||||
#define LINK_CONTROL_TX_ASYNC_EN (1<<24)
|
||||
#define LINK_CONTROL_RX_ASYNC_EN (1<<23)
|
||||
#define LINK_CONTROL_RESET_TX (1<<21)
|
||||
#define LINK_CONTROL_RESET_RX (1<<20)
|
||||
#define LINK_CONTROL_CYCMASTER (1<<11)
|
||||
#define LINK_CONTROL_CYCSOURCE (1<<10)
|
||||
#define LINK_CONTROL_CYCTIMEREN (1<<9)
|
||||
#define LINK_CONTROL_RCV_CMP_VALID (1<<7)
|
||||
#define LINK_CONTROL_SNOOP_ENABLE (1<<6)
|
||||
|
||||
#define CYCLE_TIMER 0xf08
|
||||
|
||||
#define LINK_PHY 0xf0c
|
||||
#define LINK_PHY_READ (1<<31)
|
||||
#define LINK_PHY_WRITE (1<<30)
|
||||
#define LINK_PHY_ADDR(addr) (addr<<24)
|
||||
#define LINK_PHY_WDATA(data) (data<<16)
|
||||
#define LINK_PHY_RADDR(addr) (addr<<8)
|
||||
|
||||
#define LINK_INT_STATUS 0xf14
|
||||
#define LINK_INT_ENABLE 0xf18
|
||||
/* status and enable have identical bit numbers */
|
||||
#define LINK_INT_LINK_INT (1<<31)
|
||||
#define LINK_INT_PHY_TIME_OUT (1<<30)
|
||||
#define LINK_INT_PHY_REG_RCVD (1<<29)
|
||||
#define LINK_INT_PHY_BUSRESET (1<<28)
|
||||
#define LINK_INT_TX_RDY (1<<26)
|
||||
#define LINK_INT_RX_DATA_RDY (1<<25)
|
||||
#define LINK_INT_IT_STUCK (1<<20)
|
||||
#define LINK_INT_AT_STUCK (1<<19)
|
||||
#define LINK_INT_SNTRJ (1<<17)
|
||||
#define LINK_INT_HDR_ERR (1<<16)
|
||||
#define LINK_INT_TC_ERR (1<<15)
|
||||
#define LINK_INT_CYC_SEC (1<<11)
|
||||
#define LINK_INT_CYC_STRT (1<<10)
|
||||
#define LINK_INT_CYC_DONE (1<<9)
|
||||
#define LINK_INT_CYC_PEND (1<<8)
|
||||
#define LINK_INT_CYC_LOST (1<<7)
|
||||
#define LINK_INT_CYC_ARB_FAILED (1<<6)
|
||||
#define LINK_INT_GRF_OVER_FLOW (1<<5)
|
||||
#define LINK_INT_ITF_UNDER_FLOW (1<<4)
|
||||
#define LINK_INT_ATF_UNDER_FLOW (1<<3)
|
||||
#define LINK_INT_IARB_FAILED (1<<0)
|
3899
drivers/firewire/ohci.c
Normal file
3899
drivers/firewire/ohci.c
Normal file
File diff suppressed because it is too large
Load diff
158
drivers/firewire/ohci.h
Normal file
158
drivers/firewire/ohci.h
Normal file
|
@ -0,0 +1,158 @@
|
|||
#ifndef _FIREWIRE_OHCI_H
|
||||
#define _FIREWIRE_OHCI_H
|
||||
|
||||
/* OHCI register map */
|
||||
|
||||
#define OHCI1394_Version 0x000
|
||||
#define OHCI1394_GUID_ROM 0x004
|
||||
#define OHCI1394_ATRetries 0x008
|
||||
#define OHCI1394_CSRData 0x00C
|
||||
#define OHCI1394_CSRCompareData 0x010
|
||||
#define OHCI1394_CSRControl 0x014
|
||||
#define OHCI1394_ConfigROMhdr 0x018
|
||||
#define OHCI1394_BusID 0x01C
|
||||
#define OHCI1394_BusOptions 0x020
|
||||
#define OHCI1394_GUIDHi 0x024
|
||||
#define OHCI1394_GUIDLo 0x028
|
||||
#define OHCI1394_ConfigROMmap 0x034
|
||||
#define OHCI1394_PostedWriteAddressLo 0x038
|
||||
#define OHCI1394_PostedWriteAddressHi 0x03C
|
||||
#define OHCI1394_VendorID 0x040
|
||||
#define OHCI1394_HCControlSet 0x050
|
||||
#define OHCI1394_HCControlClear 0x054
|
||||
#define OHCI1394_HCControl_BIBimageValid 0x80000000
|
||||
#define OHCI1394_HCControl_noByteSwapData 0x40000000
|
||||
#define OHCI1394_HCControl_programPhyEnable 0x00800000
|
||||
#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000
|
||||
#define OHCI1394_HCControl_LPS 0x00080000
|
||||
#define OHCI1394_HCControl_postedWriteEnable 0x00040000
|
||||
#define OHCI1394_HCControl_linkEnable 0x00020000
|
||||
#define OHCI1394_HCControl_softReset 0x00010000
|
||||
#define OHCI1394_SelfIDBuffer 0x064
|
||||
#define OHCI1394_SelfIDCount 0x068
|
||||
#define OHCI1394_SelfIDCount_selfIDError 0x80000000
|
||||
#define OHCI1394_IRMultiChanMaskHiSet 0x070
|
||||
#define OHCI1394_IRMultiChanMaskHiClear 0x074
|
||||
#define OHCI1394_IRMultiChanMaskLoSet 0x078
|
||||
#define OHCI1394_IRMultiChanMaskLoClear 0x07C
|
||||
#define OHCI1394_IntEventSet 0x080
|
||||
#define OHCI1394_IntEventClear 0x084
|
||||
#define OHCI1394_IntMaskSet 0x088
|
||||
#define OHCI1394_IntMaskClear 0x08C
|
||||
#define OHCI1394_IsoXmitIntEventSet 0x090
|
||||
#define OHCI1394_IsoXmitIntEventClear 0x094
|
||||
#define OHCI1394_IsoXmitIntMaskSet 0x098
|
||||
#define OHCI1394_IsoXmitIntMaskClear 0x09C
|
||||
#define OHCI1394_IsoRecvIntEventSet 0x0A0
|
||||
#define OHCI1394_IsoRecvIntEventClear 0x0A4
|
||||
#define OHCI1394_IsoRecvIntMaskSet 0x0A8
|
||||
#define OHCI1394_IsoRecvIntMaskClear 0x0AC
|
||||
#define OHCI1394_InitialBandwidthAvailable 0x0B0
|
||||
#define OHCI1394_InitialChannelsAvailableHi 0x0B4
|
||||
#define OHCI1394_InitialChannelsAvailableLo 0x0B8
|
||||
#define OHCI1394_FairnessControl 0x0DC
|
||||
#define OHCI1394_LinkControlSet 0x0E0
|
||||
#define OHCI1394_LinkControlClear 0x0E4
|
||||
#define OHCI1394_LinkControl_rcvSelfID (1 << 9)
|
||||
#define OHCI1394_LinkControl_rcvPhyPkt (1 << 10)
|
||||
#define OHCI1394_LinkControl_cycleTimerEnable (1 << 20)
|
||||
#define OHCI1394_LinkControl_cycleMaster (1 << 21)
|
||||
#define OHCI1394_LinkControl_cycleSource (1 << 22)
|
||||
#define OHCI1394_NodeID 0x0E8
|
||||
#define OHCI1394_NodeID_idValid 0x80000000
|
||||
#define OHCI1394_NodeID_root 0x40000000
|
||||
#define OHCI1394_NodeID_nodeNumber 0x0000003f
|
||||
#define OHCI1394_NodeID_busNumber 0x0000ffc0
|
||||
#define OHCI1394_PhyControl 0x0EC
|
||||
#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000)
|
||||
#define OHCI1394_PhyControl_ReadDone 0x80000000
|
||||
#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
|
||||
#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
|
||||
#define OHCI1394_PhyControl_WritePending 0x00004000
|
||||
#define OHCI1394_IsochronousCycleTimer 0x0F0
|
||||
#define OHCI1394_AsReqFilterHiSet 0x100
|
||||
#define OHCI1394_AsReqFilterHiClear 0x104
|
||||
#define OHCI1394_AsReqFilterLoSet 0x108
|
||||
#define OHCI1394_AsReqFilterLoClear 0x10C
|
||||
#define OHCI1394_PhyReqFilterHiSet 0x110
|
||||
#define OHCI1394_PhyReqFilterHiClear 0x114
|
||||
#define OHCI1394_PhyReqFilterLoSet 0x118
|
||||
#define OHCI1394_PhyReqFilterLoClear 0x11C
|
||||
#define OHCI1394_PhyUpperBound 0x120
|
||||
|
||||
#define OHCI1394_AsReqTrContextBase 0x180
|
||||
#define OHCI1394_AsReqTrContextControlSet 0x180
|
||||
#define OHCI1394_AsReqTrContextControlClear 0x184
|
||||
#define OHCI1394_AsReqTrCommandPtr 0x18C
|
||||
|
||||
#define OHCI1394_AsRspTrContextBase 0x1A0
|
||||
#define OHCI1394_AsRspTrContextControlSet 0x1A0
|
||||
#define OHCI1394_AsRspTrContextControlClear 0x1A4
|
||||
#define OHCI1394_AsRspTrCommandPtr 0x1AC
|
||||
|
||||
#define OHCI1394_AsReqRcvContextBase 0x1C0
|
||||
#define OHCI1394_AsReqRcvContextControlSet 0x1C0
|
||||
#define OHCI1394_AsReqRcvContextControlClear 0x1C4
|
||||
#define OHCI1394_AsReqRcvCommandPtr 0x1CC
|
||||
|
||||
#define OHCI1394_AsRspRcvContextBase 0x1E0
|
||||
#define OHCI1394_AsRspRcvContextControlSet 0x1E0
|
||||
#define OHCI1394_AsRspRcvContextControlClear 0x1E4
|
||||
#define OHCI1394_AsRspRcvCommandPtr 0x1EC
|
||||
|
||||
/* Isochronous transmit registers */
|
||||
#define OHCI1394_IsoXmitContextBase(n) (0x200 + 16 * (n))
|
||||
#define OHCI1394_IsoXmitContextControlSet(n) (0x200 + 16 * (n))
|
||||
#define OHCI1394_IsoXmitContextControlClear(n) (0x204 + 16 * (n))
|
||||
#define OHCI1394_IsoXmitCommandPtr(n) (0x20C + 16 * (n))
|
||||
|
||||
/* Isochronous receive registers */
|
||||
#define OHCI1394_IsoRcvContextBase(n) (0x400 + 32 * (n))
|
||||
#define OHCI1394_IsoRcvContextControlSet(n) (0x400 + 32 * (n))
|
||||
#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n))
|
||||
#define OHCI1394_IsoRcvCommandPtr(n) (0x40C + 32 * (n))
|
||||
#define OHCI1394_IsoRcvContextMatch(n) (0x410 + 32 * (n))
|
||||
|
||||
/* Interrupts Mask/Events */
|
||||
#define OHCI1394_reqTxComplete 0x00000001
|
||||
#define OHCI1394_respTxComplete 0x00000002
|
||||
#define OHCI1394_ARRQ 0x00000004
|
||||
#define OHCI1394_ARRS 0x00000008
|
||||
#define OHCI1394_RQPkt 0x00000010
|
||||
#define OHCI1394_RSPkt 0x00000020
|
||||
#define OHCI1394_isochTx 0x00000040
|
||||
#define OHCI1394_isochRx 0x00000080
|
||||
#define OHCI1394_postedWriteErr 0x00000100
|
||||
#define OHCI1394_lockRespErr 0x00000200
|
||||
#define OHCI1394_selfIDComplete 0x00010000
|
||||
#define OHCI1394_busReset 0x00020000
|
||||
#define OHCI1394_regAccessFail 0x00040000
|
||||
#define OHCI1394_phy 0x00080000
|
||||
#define OHCI1394_cycleSynch 0x00100000
|
||||
#define OHCI1394_cycle64Seconds 0x00200000
|
||||
#define OHCI1394_cycleLost 0x00400000
|
||||
#define OHCI1394_cycleInconsistent 0x00800000
|
||||
#define OHCI1394_unrecoverableError 0x01000000
|
||||
#define OHCI1394_cycleTooLong 0x02000000
|
||||
#define OHCI1394_phyRegRcvd 0x04000000
|
||||
#define OHCI1394_masterIntEnable 0x80000000
|
||||
|
||||
#define OHCI1394_evt_no_status 0x0
|
||||
#define OHCI1394_evt_long_packet 0x2
|
||||
#define OHCI1394_evt_missing_ack 0x3
|
||||
#define OHCI1394_evt_underrun 0x4
|
||||
#define OHCI1394_evt_overrun 0x5
|
||||
#define OHCI1394_evt_descriptor_read 0x6
|
||||
#define OHCI1394_evt_data_read 0x7
|
||||
#define OHCI1394_evt_data_write 0x8
|
||||
#define OHCI1394_evt_bus_reset 0x9
|
||||
#define OHCI1394_evt_timeout 0xa
|
||||
#define OHCI1394_evt_tcode_err 0xb
|
||||
#define OHCI1394_evt_reserved_b 0xc
|
||||
#define OHCI1394_evt_reserved_c 0xd
|
||||
#define OHCI1394_evt_unknown 0xe
|
||||
#define OHCI1394_evt_flushed 0xf
|
||||
|
||||
#define OHCI1394_phy_tcode 0xe
|
||||
|
||||
#endif /* _FIREWIRE_OHCI_H */
|
1656
drivers/firewire/sbp2.c
Normal file
1656
drivers/firewire/sbp2.c
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue