Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,4 @@
source "drivers/misc/samsung/gpio_debug/Kconfig"
source "drivers/misc/samsung/scsc/Kconfig"
source "drivers/misc/samsung/scsc_bt/Kconfig"
source "drivers/misc/samsung/kic/Kconfig"

View file

@ -0,0 +1,8 @@
# Needed since this subdir is symlinked in the main Kernel tree
# without this our samsung subdir is NOT cleaned.
clean-files := built-in.o
obj-$(CONFIG_GPIO_DEBUG) += gpio_debug/
obj-$(CONFIG_SAMSUNG_KIC) += kic/
obj-$(CONFIG_SCSC_CORE) += scsc/
obj-$(CONFIG_SCSC_BT) += scsc_bt/

View file

@ -0,0 +1,3 @@
config GPIO_DEBUG
tristate "GPIO debug functionality"

View file

@ -0,0 +1 @@
obj-$(CONFIG_GPIO_DEBUG) += gpio_debug.o

View file

@ -0,0 +1,427 @@
/* Copyright (c) 2014 Samsung Electronics Co., Ltd */
#include <linux/gpio_debug.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/mutex.h>
struct gpio_debug_data;
struct gpio_debug_event {
int gpio;
int gpio_idx;
struct gpio_debug_event_def def;
struct gpio_debug_data *data;
struct dentry *file;
};
struct gpio_debug_data {
int gpio_count;
int *gpios;
struct dentry *gpio_debug_dir;
struct dentry *gpio_debug_events_dir;
struct platform_device *pdev;
struct gpio_debug_event *events;
int event_count;
int event_base;
};
static struct gpio_debug_data debug_data;
DEFINE_MUTEX(debug_lock);
enum {
GPIO_DEBUG_TOGGLE_100,
GPIO_DEBUG_TOGGLE_200,
};
static struct gpio_debug_event_def debug_events_table[] = {
[GPIO_DEBUG_TOGGLE_100] = {
.name = "toggle100",
.description = "Toggle the GPIO 100 times at initialisation",
},
[GPIO_DEBUG_TOGGLE_200] = {
.name = "toggle200",
.description = "Toggle the GPIO 200 times at initialisation",
},
};
static void gpio_debug_event(int gpio, int state)
{
if (gpio >= 0)
gpio_set_value(gpio, state);
}
static void gpio_debug_event_exec(int event_id, int state)
{
if ((event_id >= 0) && (event_id < debug_data.event_count) && debug_data.events)
gpio_debug_event(debug_data.events[event_id].gpio, state);
}
void gpio_debug_event_enter(int base, int id)
{
gpio_debug_event_exec(base + id, 0);
}
void gpio_debug_event_exit(int base, int id)
{
gpio_debug_event_exec(base + id, 1);
}
int gpio_debug_event_enabled(int base, int id)
{
int event_id = base + id;
if ((event_id >= 0) &&
(event_id < debug_data.event_count) &&
debug_data.events &&
debug_data.events[event_id].gpio >= 0)
return 1;
else
return 0;
}
static int gpio_debug_event_link(struct gpio_debug_event *event, int gpio_index)
{
struct gpio_debug_data *data = event->data;
if (gpio_index >= data->gpio_count)
return -ERANGE;
if (gpio_index >= 0)
event->gpio = data->gpios[gpio_index];
else
event->gpio = -1;
event->gpio_idx = gpio_index;
return 0;
}
static ssize_t event_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
ssize_t ret = 0;
struct gpio_debug_event *event = file->f_inode->i_private;
char buf[256];
int pos;
mutex_lock(&debug_lock);
pos = snprintf(buf, sizeof(buf), "Description:\n%s\n\nEvent is mapped to GPIO index %d with number %d\n", event->def.description, event->gpio_idx, event->gpio);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
mutex_unlock(&debug_lock);
return ret;
}
static ssize_t event_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
{
char *user_string;
ssize_t ret;
struct gpio_debug_event *event = file->f_inode->i_private;
int new_index = -1;
mutex_lock(&debug_lock);
user_string = kmalloc(count + 1, GFP_KERNEL);
memory_read_from_buffer(user_string, count, ppos, user_buf, count);
user_string[count] = '\0';
ret = (ssize_t)strnlen(user_string, count + 1);
if (kstrtou32(user_string, 10, &new_index)) {
return -EINVAL;
gpio_debug_event_link(event, new_index);
kfree(user_string);
mutex_unlock(&debug_lock);
return ret;
}
static const struct file_operations event_ops = {
.read = event_read,
.write = event_write,
};
static void create_event_file(struct gpio_debug_event *event)
{
struct gpio_debug_data *data = event->data;
if (data && data->gpio_debug_events_dir) {
event->file = debugfs_create_file(event->def.name, 0660, data->gpio_debug_events_dir, event, &event_ops);
if (IS_ERR_OR_NULL(event->file)) {
event->file = NULL;
pr_warn("%s: Could not create debugfs file for %s\n", __func__, event->def.name);
}
}
}
static void remove_event_file(struct gpio_debug_event *event)
{
if (event && event->file) {
debugfs_remove(event->file);
event->file = NULL;
}
}
static void gpio_debug_init_event(struct gpio_debug_data *data, struct gpio_debug_event_def *event, struct gpio_debug_event *event_save)
{
event_save->def.description = event->description;
event_save->def.name = event->name;
event_save->gpio = -1;
event_save->gpio_idx = -1;
event_save->data = data;
create_event_file(event_save);
}
static void gpio_debug_destroy_event(struct gpio_debug_event *event)
{
remove_event_file(event);
event->def.description = NULL;
event->def.name = NULL;
event->gpio = -1;
event->gpio_idx = -1;
event->data = NULL;
}
int gpio_debug_event_list_register(struct gpio_debug_event_def *events, int event_count)
{
struct gpio_debug_data *data = &debug_data;
int start_index = data->event_count;
struct gpio_debug_event *new_events;
int new_event_count = data->event_count + event_count;
int i, j;
mutex_lock(&debug_lock);
if (data->events)
for (i = 0; i < data->event_count; i++)
remove_event_file(&data->events[i]);
new_events = krealloc(data->events, new_event_count * sizeof(struct gpio_debug_event), GFP_KERNEL);
if (!new_events) {
pr_warn("%s: Could not expand for extra events\n", __func__);
/* If krealloc fails, data->events is unchanged, so just exit */
return -ENOMEM;
}
data->events = new_events;
for (i = 0; i < data->event_count; i++)
create_event_file(&data->events[i]);
data->event_count = new_event_count;
for (i = 0, j = start_index; (i < event_count) && (j < data->event_count); i++, j++)
gpio_debug_init_event(data, &events[i], &data->events[j]);
mutex_unlock(&debug_lock);
return start_index;
}
void gpio_debug_event_list_unregister(int base, int event_count)
{
int i;
struct gpio_debug_data *data = &debug_data;
mutex_lock(&debug_lock);
for (i = base; (i < (event_count + base)) && (i < data->event_count); i++)
gpio_debug_destroy_event(&data->events[i]);
mutex_unlock(&debug_lock);
}
static ssize_t event_list_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
int i;
ssize_t ret = 0;
int length = 0;
char *buf;
struct gpio_debug_data *data = file->f_inode->i_private;
struct device *dev = &data->pdev->dev;
char headline[] = " gpio event\n";
mutex_lock(&debug_lock);
length += strlen(headline);
for (i = 0; i < data->event_count; i++)
if (data->events[i].def.name)
length += strlen(data->events[i].def.name) + 7;
length++; /* Reserve space for NULL termination */
buf = devm_kzalloc(dev, length, GFP_KERNEL);
buf[0] = '\0';
snprintf(buf, length, "%s", headline);
for (i = 0; i < data->event_count; i++)
if (data->events[i].data) {
if (data->events[i].gpio_idx >= 0)
snprintf(buf, length, "%s%5d %s\n", buf, data->events[i].gpio_idx, data->events[i].def.name);
else
snprintf(buf, length, "%s %s\n", buf, data->events[i].def.name);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, length);
devm_kfree(dev, buf);
mutex_unlock(&debug_lock);
return ret;
}
static const struct file_operations event_list_ops = {
.read = event_list_read,
};
static ssize_t num_gpios_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
ssize_t ret = 0;
struct gpio_debug_data *data = file->f_inode->i_private;
char buf[256];
int pos;
pos = snprintf(buf, sizeof(buf), "%d\n", data->gpio_count);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
return ret;
}
static const struct file_operations num_gpios_ops = {
.read = num_gpios_read,
};
static int gpio_debug_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int count;
struct gpio_debug_data *data = &debug_data;
int i, j;
mutex_lock(&debug_lock);
count = of_gpio_count(np);
if (count < 0)
count = 0; /* Errors register as no GPIOs available */
data->gpio_count = count;
data->gpios = NULL;
data->pdev = pdev;
if (count) {
data->gpios = devm_kzalloc(dev, count * sizeof(int), GFP_KERNEL);
for (i = 0; i < count; i++) {
data->gpios[i] = of_get_gpio(np, i);
dev_info(dev, "GPIO at index %d has number %d\n", i, data->gpios[i]);
if (gpio_is_valid(data->gpios[i])) {
char label[256];
sprintf(label, "debug-gpio-%d", i);
dev_info(dev, "Requesting GPIO %d index %d with label %s\n", data->gpios[i], i, label);
if (devm_gpio_request(dev, data->gpios[i], label))
dev_err(dev, "GPIO [%d] request failed\n", data->gpios[i]);
gpio_set_value(data->gpios[i], 1);
} else
dev_warn(dev, "GPIO at index %d is invalid\n", i);
}
}
data->gpio_debug_dir = debugfs_create_dir("gpio_debug", NULL);
if (!IS_ERR_OR_NULL(data->gpio_debug_dir)) {
data->gpio_debug_events_dir = debugfs_create_dir("events", data->gpio_debug_dir);
if (IS_ERR_OR_NULL(data->gpio_debug_events_dir)) {
data->gpio_debug_events_dir = NULL;
dev_err(dev, "Debugfs cannot create subdir\n");
}
debugfs_create_file("event_list", 0440, data->gpio_debug_dir, data, &event_list_ops);
debugfs_create_file("num_gpios", 0440, data->gpio_debug_dir, data, &num_gpios_ops);
} else {
data->gpio_debug_dir = NULL;
dev_warn(dev, "Debugfs is not available, configuration of GPIO debug is not possible\n");
}
for (i = 0; i < data->event_count; i++)
create_event_file(&data->events[i]);
mutex_unlock(&debug_lock);
data->event_base = gpio_debug_event_list_register(debug_events_table, ARRAY_SIZE(debug_events_table));
for (i = 0; i < count; i++) {
gpio_debug_event_link(&data->events[data->event_base + GPIO_DEBUG_TOGGLE_100], i);
for (j = 0; j < 100; j++) {
gpio_debug_event_enter(data->event_base, GPIO_DEBUG_TOGGLE_100);
gpio_debug_event_exit(data->event_base, GPIO_DEBUG_TOGGLE_100);
}
}
gpio_debug_event_link(&data->events[data->event_base + GPIO_DEBUG_TOGGLE_100], -1);
for (i = 0; i < count; i++) {
gpio_debug_event_link(&data->events[data->event_base + GPIO_DEBUG_TOGGLE_200], i);
for (j = 0; j < 200; j++) {
gpio_debug_event_enter(data->event_base, GPIO_DEBUG_TOGGLE_200);
gpio_debug_event_exit(data->event_base, GPIO_DEBUG_TOGGLE_200);
}
}
gpio_debug_event_link(&data->events[data->event_base + GPIO_DEBUG_TOGGLE_200], -1);
return 0;
}
static int gpio_debug_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_debug_data *data = &debug_data;
mutex_lock(&debug_lock);
debugfs_remove_recursive(data->gpio_debug_dir);
data->gpio_debug_dir = NULL;
data->gpio_debug_events_dir = NULL;
if (data->gpios) {
int i;
for (i = 0; i < data->gpio_count; i++)
if (gpio_is_valid(data->gpios[i]))
devm_gpio_free(dev, data->gpios[i]);
devm_kfree(dev, data->gpios);
data->gpios = NULL;
data->gpio_count = 0;
}
data->pdev = NULL;
kfree(data->events);
data->events = NULL;
data->event_count = 0;
mutex_unlock(&debug_lock);
return 0;
}
static const struct of_device_id gpio_debug_match[] = {
{ .compatible = "samsung,gpio-debug", },
{},
};
MODULE_DEVICE_TABLE(of, gpio_debug_match);
static struct platform_driver gpio_debug_driver = {
.probe = gpio_debug_probe,
.remove = gpio_debug_remove,
.driver = {
.name = "gpio_debug",
.of_match_table = gpio_debug_match,
}
};
module_platform_driver(gpio_debug_driver);
MODULE_DESCRIPTION("GPIO Debug framework");
MODULE_AUTHOR("Samsung Electronics Co., Ltd");
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,9 @@
config SAMSUNG_KIC
tristate "Kernel Information and Control (KIC) interface for Samsung Wi-Fi and Bluetooth chips"
if SAMSUNG_KIC != n
config SLSI_KIC_API_ENABLED
bool "Enable the KIC kernel API"
depends on SAMSUNG_KIC
endif

View file

@ -0,0 +1,15 @@
obj-$(CONFIG_SAMSUNG_KIC) += samsung_kic.o
samsung_kic-y += \
slsi_kic_core.o \
slsi_kic_filtering.o \
slsi_kic_sap_wifi.o \
slsi_kic_sap_cm.o \
slsi_kic_sap_bt.o
ccflags-y += $(CONFIG_SAMSUNG_KIC_EXTRA)
# ----------------------------------------------------------------------------
# KIC configuration
# ----------------------------------------------------------------------------
ccflags-$(CONFIG_SLSI_KIC_API_ENABLED) += -DCONFIG_SLSI_KIC_API_ENABLED

View file

@ -0,0 +1,766 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/module.h>
#include "slsi_kic_internal.h"
static DEFINE_MUTEX(kic_lock);
static struct slsi_kic_pdata *pdata;
static int slsi_kic_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
SCSC_TAG_ERR(KIC_COMMON, "%s Handle CMD %d, seq %d",
__func__, ops->cmd, info->snd_seq);
OS_UNUSED_PARAMETER(skb);
/* Called BEFORE the command cb - do filtering here */
/* Consider doing some check for "test_mode" primitives here:
* It could be a way to prevent test primitives (which can be
* powerful) to run unless test_mode has been configured. */
return 0;
}
static void slsi_kic_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
OS_UNUSED_PARAMETER(ops);
OS_UNUSED_PARAMETER(skb);
OS_UNUSED_PARAMETER(info);
/* Called AFTER the command cb - could do something here */
}
/* The netlink family */
static struct genl_family slsi_kic_fam = {
.id = GENL_ID_GENERATE, /* Don't bother with a hardcoded ID */
.name = "slsi_kic", /* Have users key off the name instead */
.hdrsize = 0, /* No private header */
.version = 2,
.netnsok = true,
.maxattr = SLSI_KIC_ATTR_MAX,
.pre_doit = slsi_kic_pre_doit,
.post_doit = slsi_kic_post_doit,
};
static const struct genl_multicast_group slsi_kic_general_system_mcgrp[] = {
{ .name = "general_system", },
};
/**
* Message building helpers
*/
static inline void *kic_hdr_put(struct sk_buff *skb, uint32_t portid, uint32_t seq,
int flags, u8 cmd)
{
/* Since there is no private header just add the generic one */
return genlmsg_put(skb, portid, seq, &slsi_kic_fam, flags, cmd);
}
static int kic_build_u32_msg(struct sk_buff *msg, uint32_t portid, uint32_t seq, int flags,
enum slsi_kic_commands cmd, int attrtype, uint32_t payload)
{
void *hdr;
hdr = kic_hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -EFAULT;
if (nla_put_u32(msg, attrtype, payload))
goto nla_put_failure;
return genlmsg_end(msg, hdr);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int kic_add_timestamp_attrs(struct sk_buff *msg)
{
struct timespec ts;
/**
* Use getrawmonotonic instead of getnstimeofday to avoid problems with
* NTP updating things, which can make things look weird.
*/
getrawmonotonic(&ts);
if (nla_put_u64(msg, SLSI_KIC_ATTR_TIMESTAMP_TV_SEC, ts.tv_sec))
goto nla_put_failure;
if (nla_put_u64(msg, SLSI_KIC_ATTR_TIMESTAMP_TV_NSEC, ts.tv_nsec))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int kic_build_system_event_msg(struct sk_buff *msg, uint32_t portid,
uint32_t seq, int flags,
uint32_t event_cat, uint32_t event)
{
void *hdr;
struct nlattr *nla;
hdr = kic_hdr_put(msg, portid, seq, flags, SLSI_KIC_CMD_SYSTEM_EVENT_IND);
if (!hdr)
return -EFAULT;
nla = nla_nest_start(msg, SLSI_KIC_ATTR_TIMESTAMP);
if (kic_add_timestamp_attrs(msg) < 0)
nla_nest_cancel(msg, nla);
else
nla_nest_end(msg, nla);
if (nla_put_u32(msg, SLSI_KIC_ATTR_SYSTEM_EVENT_CATEGORY, event_cat))
goto nla_put_failure;
if (nla_put_u32(msg, SLSI_KIC_ATTR_SYSTEM_EVENT, event))
goto nla_put_failure;
return genlmsg_end(msg, hdr);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int kic_build_firmware_event_msg(struct sk_buff *msg, uint32_t portid,
uint32_t seq, int flags,
uint16_t firmware_event_type,
enum slsi_kic_technology_type tech_type,
uint32_t contain_type,
struct slsi_kic_firmware_event_ccp_host *event)
{
void *hdr;
struct nlattr *nla;
hdr = kic_hdr_put(msg, portid, seq, flags, SLSI_KIC_CMD_FIRMWARE_EVENT_IND);
if (!hdr) {
nlmsg_free(msg);
return -EFAULT;
}
if (nla_put_u16(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_TYPE, firmware_event_type))
goto nla_put_failure;
if (nla_put_u32(msg, SLSI_KIC_ATTR_TECHNOLOGY_TYPE, tech_type))
goto nla_put_failure;
if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_CONTAINER_TYPE, contain_type))
goto nla_put_failure;
nla = nla_nest_start(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CONTAINER_CCP_HOST);
if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_ID, event->id))
goto nla_put_failure_cancel;
if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_LEVEL, event->level))
goto nla_put_failure_cancel;
if (nla_put_string(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_LEVEL_STRING, event->level_string))
goto nla_put_failure_cancel;
if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_TIMESTAMP, event->timestamp))
goto nla_put_failure_cancel;
if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_CPU, event->cpu))
goto nla_put_failure_cancel;
if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_OCCURENCES, event->occurences))
goto nla_put_failure_cancel;
if (nla_put(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_ARG, event->arg_length, event->arg))
goto nla_put_failure_cancel;
nla_nest_end(msg, nla);
return genlmsg_end(msg, hdr);
nla_put_failure_cancel:
nla_nest_cancel(msg, nla);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int kic_build_service_info_msg_add_service(struct sk_buff *msg,
enum slsi_kic_technology_type tech,
struct slsi_kic_service_info *info)
{
struct nlattr *nla = NULL;
if (!msg || !info)
goto nla_put_failure;
if (nla_put_u32(msg, SLSI_KIC_ATTR_TECHNOLOGY_TYPE, tech))
goto nla_put_failure;
nla = nla_nest_start(msg, SLSI_KIC_ATTR_SERVICE_INFO);
if (nla_put_string(msg, SLSI_KIC_ATTR_SERVICE_INFO_VER_STR, info->ver_str))
goto nla_put_failure;
if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_FW_API_MAJOR, info->fw_api_major))
goto nla_put_failure;
if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_FW_API_MINOR, info->fw_api_minor))
goto nla_put_failure;
if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_RELEASE_PRODUCT, info->release_product))
goto nla_put_failure;
if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_HOST_RELEASE_ITERATION, info->host_release_iteration))
goto nla_put_failure;
if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_HOST_RELEASE_CANDIDATE, info->host_release_candidate))
goto nla_put_failure;
nla_nest_end(msg, nla);
return 0;
nla_put_failure:
if (nla)
nla_nest_cancel(msg, nla);
return -EMSGSIZE;
}
static int kic_build_service_info_msg(struct sk_buff *msg, uint32_t portid,
uint32_t seq, int flags,
enum slsi_kic_technology_type tech,
struct slsi_kic_service_info *info)
{
void *hdr;
hdr = kic_hdr_put(msg, portid, seq, flags, SLSI_KIC_CMD_SERVICE_INFORMATION_IND);
if (!hdr)
return -EFAULT;
if (kic_build_service_info_msg_add_service(msg, tech, info) < 0)
goto nla_put_failure;
return genlmsg_end(msg, hdr);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int get_snd_pid(struct genl_info *info)
{
uint32_t snd_pid = 0;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
snd_pid = info->snd_pid;
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 6, 0)
snd_pid = info->snd_portid;
#endif
return snd_pid;
}
struct slsi_kic_pdata *slsi_kic_core_get_context(void)
{
return pdata;
}
/**
* Set the record to NULL to free and delete all stored records.
*/
static int service_info_delete_record(struct slsi_kic_service_details *record)
{
struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
if (!pdata)
return -EINVAL;
if (down_interruptible(&pdata->chip_details.proxy_service_list_mutex))
SCSC_TAG_ERR(KIC_COMMON, "Failed to lock service info mutex - continue anyway");
if (record == NULL) {
struct slsi_kic_service_details *service, *tmp_node;
list_for_each_entry_safe(service, tmp_node, &pdata->chip_details.proxy_service_list, proxy_q) {
list_del(&service->proxy_q);
kfree(service);
}
} else {
list_del(&record->proxy_q);
kfree(record);
}
up(&pdata->chip_details.proxy_service_list_mutex);
return 0;
}
static struct slsi_kic_service_details *
service_info_find_entry(enum slsi_kic_technology_type tech)
{
struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
struct slsi_kic_service_details *service, *tmp_node;
if (!pdata)
return NULL;
list_for_each_entry_safe(service, tmp_node, &pdata->chip_details.proxy_service_list, proxy_q) {
if (service->tech == tech)
return service;
}
return NULL;
}
static int service_info_update_record(enum slsi_kic_technology_type tech,
struct slsi_kic_service_info *info)
{
struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
static struct slsi_kic_service_details *record;
if (!pdata)
return -EINVAL;
if (down_interruptible(&pdata->chip_details.proxy_service_list_mutex))
goto err_out;
record = service_info_find_entry(tech);
if (record == NULL) {
up(&pdata->chip_details.proxy_service_list_mutex);
goto err_out;
}
record->tech = tech;
memcpy(&record->info, info, sizeof(struct slsi_kic_service_info));
up(&pdata->chip_details.proxy_service_list_mutex);
return 0;
err_out:
SCSC_TAG_ERR(KIC_COMMON, "Failed to update service info record");
return -EFAULT;
}
static int service_info_add(enum slsi_kic_technology_type tech,
struct slsi_kic_service_info *info)
{
struct slsi_kic_service_details *new_entry;
struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
if (!pdata)
return -EINVAL;
new_entry = kmalloc(sizeof(struct slsi_kic_service_details), GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
new_entry->tech = tech;
memcpy(&new_entry->info, info, sizeof(struct slsi_kic_service_info));
if (down_interruptible(&pdata->chip_details.proxy_service_list_mutex))
goto err_out;
list_add_tail(&new_entry->proxy_q, &pdata->chip_details.proxy_service_list);
up(&pdata->chip_details.proxy_service_list_mutex);
return 0;
err_out:
SCSC_TAG_ERR(KIC_COMMON, "Failed to add service info record to list");
kfree(new_entry);
return -EFAULT;
}
/**
* Command callbacks
*/
/* This function shall not do anything since the direction is
* kernel->user space for this primitive. We should look into if it's
* possible to handle this better than having an empty stub function. */
static int slsi_kic_wrong_direction(struct sk_buff *skb, struct genl_info *info)
{
OS_UNUSED_PARAMETER(skb);
SCSC_TAG_ERR(KIC_COMMON, "%s Received CMD from pid %u seq %u: Wrong direction only supports kernel->user space",
__func__, info->snd_seq, get_snd_pid(info));
return -EINVAL;
}
static int slsi_kic_interface_version_number_req(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
void *hdr;
OS_UNUSED_PARAMETER(skb);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = kic_hdr_put(msg, 0, info->snd_seq, 0, SLSI_KIC_CMD_KIC_INTERFACE_VERSION_NUMBER_REQ);
if (!hdr)
goto nl_hdr_failure;
if (nla_put_u32(msg, SLSI_KIC_ATTR_KIC_VERSION_MAJOR, SLSI_KIC_INTERFACE_VERSION_MAJOR))
goto nla_put_failure;
if (nla_put_u32(msg, SLSI_KIC_ATTR_KIC_VERSION_MINOR, SLSI_KIC_INTERFACE_VERSION_MINOR))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
nla_put_failure:
genlmsg_cancel(msg, hdr);
nl_hdr_failure:
nlmsg_free(msg);
return -ENOBUFS;
}
static int slsi_kic_echo_req(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
uint32_t payload = 0;
OS_UNUSED_PARAMETER(skb);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (info->attrs[SLSI_KIC_ATTR_ECHO])
payload = nla_get_u32(info->attrs[SLSI_KIC_ATTR_ECHO]);
if (kic_build_u32_msg(msg, get_snd_pid(info), info->snd_seq, 0,
SLSI_KIC_CMD_ECHO_REQ, SLSI_KIC_ATTR_ECHO, payload) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
return genlmsg_reply(msg, info);
}
static int slsi_kic_service_information_req(struct sk_buff *skb, struct genl_info *info)
{
struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
int counter = 0, i;
struct sk_buff *msg;
struct slsi_kic_service_details *sr;
void *hdr;
OS_UNUSED_PARAMETER(skb);
if (!pdata)
return -EINVAL;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = kic_hdr_put(msg, 0, info->snd_seq, 0, SLSI_KIC_CMD_SERVICE_INFORMATION_REQ);
if (!hdr)
goto nla_put_failure;
if (down_interruptible(&pdata->chip_details.proxy_service_list_mutex))
goto nla_put_failure;
/* The request doesn't carry attributes, so no validation required.
* Query the list for information for each technology and encode. */
for (i = 0; i < slsi_kic_technology_type__after_last; i++) {
sr = service_info_find_entry(i);
if (sr) {
counter++;
if (kic_build_service_info_msg_add_service(msg, i, &sr->info) < 0) {
up(&pdata->chip_details.proxy_service_list_mutex);
goto nla_put_failure;
}
}
}
up(&pdata->chip_details.proxy_service_list_mutex);
if (nla_put_u32(msg, SLSI_KIC_ATTR_NUMBER_OF_ENCODED_SERVICES, counter))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
nla_put_failure:
nlmsg_free(msg);
return -EMSGSIZE;
}
static int slsi_kic_test_trigger_recovery_req(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
uint32_t technology = 0, recovery_type = 0;
struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
enum slsi_kic_test_recovery_status status = slsi_kic_test_recovery_status_ok;
OS_UNUSED_PARAMETER(skb);
if (info->attrs[SLSI_KIC_ATTR_TECHNOLOGY_TYPE])
technology = nla_get_u32(info->attrs[SLSI_KIC_ATTR_TECHNOLOGY_TYPE]);
if (info->attrs[SLSI_KIC_ATTR_TEST_RECOVERY_TYPE])
recovery_type = nla_get_u32(info->attrs[SLSI_KIC_ATTR_TEST_RECOVERY_TYPE]);
if (pdata) {
int err = -EFAULT;
if (technology == slsi_kic_technology_type_wifi) {
struct slsi_kic_wifi_ops_tuple *wifi_ops = NULL;
wifi_ops = &pdata->wifi_ops_tuple;
mutex_lock(&wifi_ops->ops_mutex);
if (wifi_ops->wifi_ops.trigger_recovery)
err = wifi_ops->wifi_ops.trigger_recovery(wifi_ops->priv,
(enum slsi_kic_test_recovery_type)recovery_type);
mutex_unlock(&wifi_ops->ops_mutex);
} else if (technology == slsi_kic_technology_type_curator) {
struct slsi_kic_cm_ops_tuple *cm_ops = NULL;
cm_ops = &pdata->cm_ops_tuple;
mutex_lock(&cm_ops->ops_mutex);
if (cm_ops->cm_ops.trigger_recovery)
err = cm_ops->cm_ops.trigger_recovery(cm_ops->priv,
(enum slsi_kic_test_recovery_type)recovery_type);
mutex_unlock(&cm_ops->ops_mutex);
} else if (technology == slsi_kic_technology_type_bt) {
struct slsi_kic_bt_ops_tuple *bt_ops = NULL;
bt_ops = &pdata->bt_ops_tuple;
mutex_lock(&bt_ops->ops_mutex);
if (bt_ops->bt_ops.trigger_recovery)
err = bt_ops->bt_ops.trigger_recovery(bt_ops->priv,
(enum slsi_kic_test_recovery_type)recovery_type);
mutex_unlock(&bt_ops->ops_mutex);
}
if (err < 0)
status = slsi_kic_test_recovery_status_error_send_msg;
} else
status = slsi_kic_test_recovery_status_error_invald_param;
/* Prepare reply */
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (kic_build_u32_msg(msg, get_snd_pid(info), info->snd_seq, 0,
SLSI_KIC_CMD_TEST_TRIGGER_RECOVERY_REQ, SLSI_KIC_ATTR_TRIGGER_RECOVERY_STATUS, status) < 0)
goto nl_hdr_failure;
return genlmsg_reply(msg, info);
nl_hdr_failure:
nlmsg_free(msg);
return -ENOBUFS;
}
int slsi_kic_service_information_ind(enum slsi_kic_technology_type tech,
struct slsi_kic_service_info *info)
{
struct sk_buff *msg;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (service_info_find_entry(tech) == NULL) {
if (service_info_add(tech, info) < 0)
SCSC_TAG_ERR(KIC_COMMON, "%s Failed to add record", __func__);
} else if (service_info_update_record(tech, info) < 0)
SCSC_TAG_ERR(KIC_COMMON, "%s Failed to update record", __func__);
if (kic_build_service_info_msg(msg, 0, 0, 0, tech, info) < 0)
goto err;
return genlmsg_multicast(&slsi_kic_fam, msg, 0, 0, GFP_KERNEL);
err:
nlmsg_free(msg);
return -ENOBUFS;
}
EXPORT_SYMBOL(slsi_kic_service_information_ind);
int slsi_kic_system_event_ind(enum slsi_kic_system_event_category event_cat,
enum slsi_kic_system_events event, gfp_t flags)
{
struct sk_buff *msg;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, flags);
if (!msg)
return -ENOMEM;
if (kic_build_system_event_msg(msg, 0, 0, 0, event_cat, event) < 0)
goto err;
return genlmsg_multicast(&slsi_kic_fam, msg, 0, 0, flags);
err:
nlmsg_free(msg);
return -ENOBUFS;
}
EXPORT_SYMBOL(slsi_kic_system_event_ind);
int slsi_kic_firmware_event_ind(uint16_t firmware_event_type, enum slsi_kic_technology_type tech_type,
uint32_t contain_type, struct slsi_kic_firmware_event_ccp_host *event)
{
struct sk_buff *msg;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (kic_build_firmware_event_msg(msg, 0, 0, 0, firmware_event_type, tech_type, contain_type, event) < 0)
return -ENOBUFS;
return genlmsg_multicast(&slsi_kic_fam, msg, 0, 0, GFP_KERNEL);
}
EXPORT_SYMBOL(slsi_kic_firmware_event_ind);
static const struct genl_ops slsi_kic_ops[] = {
{
.cmd = SLSI_KIC_CMD_KIC_INTERFACE_VERSION_NUMBER_REQ,
.doit = slsi_kic_interface_version_number_req,
.policy = slsi_kic_attr_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = SLSI_KIC_CMD_SYSTEM_EVENT_IND,
.doit = slsi_kic_wrong_direction,
.policy = slsi_kic_attr_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = SLSI_KIC_CMD_SERVICE_INFORMATION_REQ,
.doit = slsi_kic_service_information_req,
.policy = slsi_kic_attr_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = SLSI_KIC_CMD_SERVICE_INFORMATION_IND,
.doit = slsi_kic_wrong_direction,
.policy = slsi_kic_attr_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = SLSI_KIC_CMD_FIRMWARE_EVENT_IND,
.doit = slsi_kic_wrong_direction,
.policy = slsi_kic_attr_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = SLSI_KIC_CMD_ECHO_REQ,
.doit = slsi_kic_echo_req,
.policy = slsi_kic_attr_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = SLSI_KIC_CMD_TEST_TRIGGER_RECOVERY_REQ,
.doit = slsi_kic_test_trigger_recovery_req,
.policy = slsi_kic_attr_policy,
.flags = GENL_ADMIN_PERM,
},
};
static int __init slsi_kic_init(void)
{
int err;
SCSC_TAG_DEBUG(KIC_COMMON, "%s Enter", __func__);
mutex_lock(&kic_lock);
pdata = kzalloc(sizeof(struct slsi_kic_pdata), GFP_KERNEL);
if (!pdata) {
SCSC_TAG_ERR(KIC_COMMON, "%s Exit - no mem", __func__);
mutex_unlock(&kic_lock);
return -ENOMEM;
}
mutex_init(&pdata->wifi_ops_tuple.ops_mutex);
mutex_init(&pdata->cm_ops_tuple.ops_mutex);
mutex_init(&pdata->bt_ops_tuple.ops_mutex);
/* Init chip information proxy list */
INIT_LIST_HEAD(&pdata->chip_details.proxy_service_list);
sema_init(&pdata->chip_details.proxy_service_list_mutex, 1);
pdata->state = idle;
err = genl_register_family_with_ops_groups(&slsi_kic_fam, slsi_kic_ops,
slsi_kic_general_system_mcgrp);
if (err)
goto err_out;
mutex_unlock(&kic_lock);
SCSC_TAG_DEBUG(KIC_COMMON, "%s Exit", __func__);
return 0;
err_out:
mutex_unlock(&kic_lock);
SCSC_TAG_ERR(KIC_COMMON, "%s Exit - err %d", __func__, err);
return err;
}
static void __exit slsi_kic_exit(void)
{
int err;
SCSC_TAG_DEBUG(KIC_COMMON, "%s Enter", __func__);
BUG_ON(!pdata);
if (!pdata) {
SCSC_TAG_ERR(KIC_COMMON, "%s Exit - invalid pdata", __func__);
return;
}
mutex_lock(&kic_lock);
err = genl_unregister_family(&slsi_kic_fam);
if (err < 0)
SCSC_TAG_ERR(KIC_COMMON, "%s Failed to unregister family", __func__);
if (service_info_delete_record(NULL) < 0)
SCSC_TAG_ERR(KIC_COMMON, "%s Deleting service info liste failed", __func__);
mutex_destroy(&pdata->wifi_ops_tuple.ops_mutex);
mutex_destroy(&pdata->cm_ops_tuple.ops_mutex);
mutex_destroy(&pdata->bt_ops_tuple.ops_mutex);
kfree(pdata);
pdata = NULL;
mutex_unlock(&kic_lock);
SCSC_TAG_DEBUG(KIC_COMMON, "%s Exit", __func__);
}
module_init(slsi_kic_init);
module_exit(slsi_kic_exit);
MODULE_DESCRIPTION("SCSC Kernel Information and Control (KIC) interface");
MODULE_AUTHOR("Samsung Electronics Co., Ltd");
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,8 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/* Implement subscriber and filtering here. This won't be essential for
* first draft of the kernel KIC implementation */

View file

@ -0,0 +1,81 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __SLSI_KIC_INTERNAL_H
#define __SLSI_KIC_INTERNAL_H
#include <net/sock.h>
#include <linux/netlink.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/version.h>
#include <linux/semaphore.h>
#include <net/genetlink.h>
#include <linux/time.h>
#include <linux/module.h>
#include <scsc/scsc_logring.h>
#include <scsc/kic/slsi_kic_prim.h>
#include <scsc/kic/slsi_kic_wifi.h>
#include <scsc/kic/slsi_kic_cm.h>
#include <scsc/kic/slsi_kic_bt.h>
#define OS_UNUSED_PARAMETER(x) ((void)(x))
/**
* Core instance
*/
enum slsi_kic_state {
idle,
initialised,
ready
};
struct slsi_kic_service_details {
struct list_head proxy_q;
enum slsi_kic_technology_type tech;
struct slsi_kic_service_info info;
};
struct slsi_kic_chip_details {
struct semaphore proxy_service_list_mutex;
struct list_head proxy_service_list;
};
struct slsi_kic_wifi_ops_tuple {
void *priv;
struct slsi_kic_wifi_ops wifi_ops;
struct mutex ops_mutex;
};
struct slsi_kic_bt_ops_tuple {
void *priv;
struct slsi_kic_bt_ops bt_ops;
struct mutex ops_mutex;
};
struct slsi_kic_cm_ops_tuple {
void *priv;
struct slsi_kic_cm_ops cm_ops;
struct mutex ops_mutex;
};
struct slsi_kic_pdata {
enum slsi_kic_state state;
struct slsi_kic_chip_details chip_details;
struct slsi_kic_wifi_ops_tuple wifi_ops_tuple;
struct slsi_kic_cm_ops_tuple cm_ops_tuple;
struct slsi_kic_bt_ops_tuple bt_ops_tuple;
uint32_t seq; /* This should *perhaps* be moved to a record struct for
* each subscription - will look into that during the
* filtering work. */
};
struct slsi_kic_pdata *slsi_kic_core_get_context(void);
#endif /* #ifndef __SLSI_KIC_INTERNAL_H */

View file

@ -0,0 +1,38 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include "slsi_kic_internal.h"
int slsi_kic_bt_ops_register(void *priv, struct slsi_kic_bt_ops *bt_ops)
{
struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
if (!kic_inst)
return -EFAULT;
mutex_lock(&kic_inst->bt_ops_tuple.ops_mutex);
memcpy(&kic_inst->bt_ops_tuple.bt_ops, bt_ops, sizeof(struct slsi_kic_bt_ops));
kic_inst->bt_ops_tuple.priv = priv;
mutex_unlock(&kic_inst->bt_ops_tuple.ops_mutex);
return 0;
}
EXPORT_SYMBOL(slsi_kic_bt_ops_register);
void slsi_kic_bt_ops_unregister(struct slsi_kic_bt_ops *bt_ops)
{
struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
OS_UNUSED_PARAMETER(bt_ops);
if (!kic_inst)
return;
mutex_lock(&kic_inst->bt_ops_tuple.ops_mutex);
memset(&kic_inst->bt_ops_tuple.bt_ops, 0, sizeof(struct slsi_kic_bt_ops));
kic_inst->bt_ops_tuple.priv = NULL;
mutex_unlock(&kic_inst->bt_ops_tuple.ops_mutex);
}
EXPORT_SYMBOL(slsi_kic_bt_ops_unregister);

View file

@ -0,0 +1,38 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include "slsi_kic_internal.h"
int slsi_kic_cm_ops_register(void *priv, struct slsi_kic_cm_ops *cm_ops)
{
struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
if (!kic_inst)
return -EFAULT;
mutex_lock(&kic_inst->cm_ops_tuple.ops_mutex);
memcpy(&kic_inst->cm_ops_tuple.cm_ops, cm_ops, sizeof(struct slsi_kic_cm_ops));
kic_inst->cm_ops_tuple.priv = priv;
mutex_unlock(&kic_inst->cm_ops_tuple.ops_mutex);
return 0;
}
EXPORT_SYMBOL(slsi_kic_cm_ops_register);
void slsi_kic_cm_ops_unregister(struct slsi_kic_cm_ops *cm_ops)
{
struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
OS_UNUSED_PARAMETER(cm_ops);
if (!kic_inst)
return;
mutex_lock(&kic_inst->cm_ops_tuple.ops_mutex);
memset(&kic_inst->cm_ops_tuple.cm_ops, 0, sizeof(struct slsi_kic_cm_ops));
kic_inst->cm_ops_tuple.priv = NULL;
mutex_unlock(&kic_inst->cm_ops_tuple.ops_mutex);
}
EXPORT_SYMBOL(slsi_kic_cm_ops_unregister);

View file

@ -0,0 +1,38 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include "slsi_kic_internal.h"
int slsi_kic_wifi_ops_register(void *priv, struct slsi_kic_wifi_ops *wifi_ops)
{
struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
if (!kic_inst)
return -EFAULT;
mutex_lock(&kic_inst->wifi_ops_tuple.ops_mutex);
memcpy(&kic_inst->wifi_ops_tuple.wifi_ops, wifi_ops, sizeof(struct slsi_kic_wifi_ops));
kic_inst->wifi_ops_tuple.priv = priv;
mutex_unlock(&kic_inst->wifi_ops_tuple.ops_mutex);
return 0;
}
EXPORT_SYMBOL(slsi_kic_wifi_ops_register);
void slsi_kic_wifi_ops_unregister(struct slsi_kic_wifi_ops *wifi_ops)
{
struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
OS_UNUSED_PARAMETER(wifi_ops);
if (!kic_inst)
return;
mutex_lock(&kic_inst->wifi_ops_tuple.ops_mutex);
memset(&kic_inst->wifi_ops_tuple.wifi_ops, 0, sizeof(struct slsi_kic_wifi_ops));
kic_inst->wifi_ops_tuple.priv = NULL;
mutex_unlock(&kic_inst->wifi_ops_tuple.ops_mutex);
}
EXPORT_SYMBOL(slsi_kic_wifi_ops_unregister);

View file

@ -0,0 +1,72 @@
config SCSC_CORE_CM
tristate "Samsung SCSC Maxwell driver Core Module"
depends on SCSC_CORE
config SCSC_CORE
tristate "Samsung SCSC Maxwell driver"
select SCSC_CORE_CM
config SCSC_PCIE
tristate "Samsung SCSC MIF PCIE implementation"
depends on SCSC_CORE
depends on PCI
config SCSC_PLATFORM
tristate "Samsung SCSC MIF Platform driver implementation"
depends on SCSC_CORE
config SCSC_CM_MX_CLIENT_TEST
tristate "Samsung SCSC Test Client"
depends on SCSC_CORE
config SCSC_MX_ALWAYS_ON
tristate "Samsung SCSC MX140 always booted"
depends on SCSC_CM_MX_CLIENT_TEST
config SCSC_CLK20MHZ
tristate "Samsung SCSC USB 20MHz Clock"
depends on SCSC_CORE
config SCSC_CLK20MHZ_TEST
tristate "Samsung SCSC USB 20MHz Clock Test"
depends on SCSC_CLK20MHZ
config SCSC_MMAP
tristate "Samsung SCSC MMAP/GDB User space interface"
depends on SCSC_CORE
config SCSC_DBG_SAMPLER
tristate "Samsung SCSC Debug Sampler Service"
depends on SCSC_CORE
config SCSC_DEBUG
tristate "Samsung SCSC Logging"
depends on SCSC_CORE
default n
config SCSC_DEBUG_COMPATIBILITY
bool "Samsung SCSC Logging mode"
depends on SCSC_DEBUG
default y
config SCSC_STATIC_RING
tristate "Samsung SCSC Logging use static ring"
depends on SCSC_CORE
default n
config SCSC_STATIC_RING_SIZE
int "Size of the static ring"
depends on SCSC_CORE && SCSC_STATIC_RING
default "8196"
range 1024 4194304
config SCSC_CHV_SUPPORT
bool "Samsung CHV f/w support"
depends on SCSC_CORE
default n
config SCSC_GPR4_CON_DEBUG
bool "GPR4 PIO muxes switching to the Maxwell Subsystem"
depends on SCSC_PLATFORM
default n

View file

@ -0,0 +1,93 @@
#############################################################################
#
# Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
#
#############################################################################
# Needed since this subdir is symlinked in the main Kernel tree
# without this our samsung subdir is NOT cleaned.
clean-files := *.o *.ko
ifneq ($(CONFIG_SCSC_CORE),n)
ccflags-y += -Iinclude/scsc
ccflags-y += $(CONFIG_SAMSUNG_MAXWELL_EXTRA)
obj-$(CONFIG_SCSC_DEBUG) += scsc_logring.o
scsc_logring-y += scsc_logring_main.o scsc_logring_ring.o scsc_logring_debugfs.o
##
## The following cumbersome ifeq/else is the only way to make
## this CONFIG_SCSC_PRINTK propagate correctly in any scenario (y,m,n)
## Otherwise using ONLY the else branch when CONFIG_SCSC_DEBUG evaluates
## to m ==>> ccflags-m does NOT work correctly in every build scenario:
## - kernel debug modules : builtin with scsc_logring support
## - kernel nodebug modules : builtins without scsc_logring support
## - kernel debug|nodebug nomodules : no builtin / no scsclogring
##
ifeq ($(CONFIG_SCSC_DEBUG), m)
ccflags-y += -DCONFIG_SCSC_PRINTK
else
ccflags-$(CONFIG_SCSC_DEBUG) += -DCONFIG_SCSC_PRINTK
endif
ifneq ($(CONFIG_SCSC_DEBUG),n)
ccflags-$(CONFIG_SCSC_STATIC_RING) += -DCONFIG_SCSC_STATIC_RING_SIZE=$(CONFIG_SCSC_STATIC_RING_SIZE)
endif
#pcie_module for static functions and registration of client modules (only core)
#pcie_mif for instance
obj-$(CONFIG_SCSC_PCIE) += scsc_pcie_mif.o
scsc_pcie_mif-y += pcie_mif_module.o pcie_mif.o pcie_proc.o
obj-$(CONFIG_SCSC_PLATFORM) += scsc_platform_mif.o
scsc_platform_mif-y += platform_mif_module.o platform_mif.o
#core_module for static functions and registration client module (all the service driver modules -wlan, bt,...-)
#core for instance
obj-$(CONFIG_SCSC_CORE_CM) += scsc_mx.o
scsc_mx-y += \
mxlog_transport.o \
fw_panic_record.o \
panicmon.o \
suspendmon.o \
mxlog.o \
mxproc.o \
scsc_service.o \
scsc_mx_module.o \
scsc_mx_impl.o \
mxman.o \
miframman.o \
mifmboxman.o \
mifproc.o \
mifintrbit.o \
fwhdr.o \
fwimage.o \
cpacket_buffer.o \
mifstream.o \
mxmgmt_transport.o \
gdb_transport.o \
mx140_file.o
ifneq ($(CONFIG_SCSC_CM_MX_CLIENT_TEST),n)
obj-$(CONFIG_SCSC_CM_MX_CLIENT_TEST) += mx_client_test.o
mx_client_test-y += client_test.o
endif
ifneq ($(CONFIG_SCSC_MMAP),n)
obj-$(CONFIG_SCSC_MMAP) += scsc_mmap.o
scsc_mmap-y += mx_mmap.o
endif
ifneq ($(CONFIG_SCSC_CLK20MHZ),n)
obj-$(CONFIG_SCSC_CLK20MHZ) += scsc_mx140_clk.o
scsc_mx140_clk-y += mx140_clk.o
ifeq ($(CONFIG_SCSC_CLK20MHZ_TEST),y)
scsc_mx140_clk-y += mx140_clk_test.o
endif
endif
obj-$(CONFIG_SCSC_DBG_SAMPLER) += scsc_dbg_sampler.o
scsc_dbg_sampler-y += mx_dbg_sampler.o
endif

View file

@ -0,0 +1,397 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <scsc/scsc_logring.h>
#include <scsc/scsc_mx.h>
struct scsc_mx_test {
/* scsc_service_client has to be the first */
struct scsc_service_client test_service_client;
struct scsc_service *primary_service;
struct scsc_service *secondary_service;
struct scsc_mx *mx;
bool started;
};
static struct scsc_mx_test *test;
/* First service to start */
static int service_id = SCSC_SERVICE_ID_NULL;
module_param(service_id, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(service_id, "ID of service to start, Default 0:NULL, 1:BT, 2:WLAN, 5:ECHO");
/* Second service to start if != -1 */
static int service_id_2 = -1;
module_param(service_id_2, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(service_id_2, "ID of optional second service to start: Default -1:None, 0:NULL, 1:BT, 2:WLAN, 5:ECHO");
#ifdef CONFIG_SCSC_MX_ALWAYS_ON
static int auto_start = 2;
#else
static int auto_start;
#endif
module_param(auto_start, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(auto_start, "Start service automatically: 0: disabled, 1: Enabled, 2: Deferred");
/* Delay after probe before starting mx140 when auto_start=2 */
#define SCSC_MX_BOOT_DELAY_MS 30000
static DEFINE_MUTEX(ss_lock);
/* char device entry declarations */
static dev_t client_test_dev_t;
static struct class *client_test_class;
static struct cdev *client_test_cdev;
static void test_stop_on_failure(struct scsc_service_client *client)
{
SCSC_TAG_DEBUG(MXMAN_TEST, "OK\n");
}
static void test_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
{
(void)scsc_panic_code;
SCSC_TAG_ERR(MXMAN_TEST, "OK\n");
}
static void stop_close_services(void)
{
int r;
mutex_lock(&ss_lock);
if (!test->started) {
pr_info("mx140: already stopped\n");
goto done;
}
if (test->primary_service) {
r = scsc_mx_service_stop(test->primary_service);
if (r)
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_stop(primary_service) failed err: %d\n", r);
else
SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_stop(primary_service) OK\n");
scsc_mx_service_close(test->primary_service);
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_close(%d) OK\n", service_id);
test->primary_service = NULL;
}
if (test->secondary_service) {
r = scsc_mx_service_stop(test->secondary_service);
if (r)
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_stop(secondary_service) failed err: %d\n", r);
else
SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_stop(secondary_service) OK\n");
scsc_mx_service_close(test->secondary_service);
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_close(%d) OK\n", service_id_2);
test->secondary_service = NULL;
}
test->started = false;
done:
mutex_unlock(&ss_lock);
}
static bool open_start_services(struct scsc_mx *mx)
{
struct scsc_service *primary_service;
struct scsc_service *secondary_service;
int r;
bool ok;
mutex_lock(&ss_lock);
if (test->started) {
pr_info("mx140: already started\n");
ok = true;
goto done;
}
primary_service = scsc_mx_service_open(mx, service_id, &test->test_service_client, &r);
if (!primary_service) {
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_open for primary_service failed %d\n", r);
ok = false;
goto done;
}
r = scsc_mx_service_start(primary_service, 0);
if (r) {
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_start for primary_service failed\n");
scsc_mx_service_close(primary_service);
ok = false;
goto done;
}
test->primary_service = primary_service;
if (service_id_2 != -1) {
secondary_service = scsc_mx_service_open(mx, service_id_2, &test->test_service_client, &r);
if (!secondary_service) {
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_open for secondary_service failed %d\n", r);
r = scsc_mx_service_stop(test->primary_service);
if (r)
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_stop(%d) failed err: %d\n", service_id, r);
else
SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_stop(%d) OK\n", service_id);
scsc_mx_service_close(test->primary_service);
ok = false;
goto done;
}
r = scsc_mx_service_start(secondary_service, 0);
if (r) {
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_start for secondary_service failed\n");
scsc_mx_service_close(secondary_service);
r = scsc_mx_service_stop(test->primary_service);
if (r)
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_stop(%d) failed err: %d\n", service_id, r);
else
SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_stop(%d) OK\n", service_id);
scsc_mx_service_close(test->primary_service);
ok = false;
goto done;
}
test->secondary_service = secondary_service;
}
test->started = true;
ok = true;
done:
mutex_unlock(&ss_lock);
return ok;
}
static void delay_start_func(struct work_struct *work)
{
(void)work;
pr_info("mx140: Start wlbt null service\n");
if (!test->mx)
return;
if (!open_start_services(test->mx))
pr_err("mx140: Error starting delayed service\n");
}
DECLARE_DELAYED_WORK(delay_start, delay_start_func);
/* Start the null service after a delay */
static void delay_open_start_services(void)
{
schedule_delayed_work(&delay_start, msecs_to_jiffies(SCSC_MX_BOOT_DELAY_MS));
}
/* Start service(s) and leave running until module unload */
void client_module_probe(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
{
/* Avoid unused error */
(void)module_client;
SCSC_TAG_ERR(MXMAN_TEST, "mx140:\n");
test = kzalloc(sizeof(*test), GFP_ATOMIC);
if (!test)
return;
test->test_service_client.stop_on_failure = test_stop_on_failure;
test->test_service_client.failure_reset = test_failure_reset;
test->mx = mx;
switch (auto_start) {
case 1:
if (!open_start_services(test->mx)) {
SCSC_TAG_ERR(MXMAN_TEST, "Error starting service/s\n");
kfree(test);
return;
}
break;
case 2:
pr_info("mx140: delayed auto-start\n");
delay_open_start_services();
break;
default:
break;
}
SCSC_TAG_ERR(MXMAN_TEST, "OK\n");
}
void client_module_remove(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
{
/* Avoid unused error */
(void)module_client;
pr_info("mx140: %s\n", __func__);
if (!test)
return;
if (test->mx != mx) {
SCSC_TAG_ERR(MXMAN_TEST, "test->mx != mx\n");
return;
}
/* Cancel any delayed start attempt */
cancel_delayed_work_sync(&delay_start);
stop_close_services();
/* de-allocate test structure */
kfree(test);
SCSC_TAG_DEBUG(MXMAN_TEST, "OK\n");
}
/* Test client driver registration */
struct scsc_mx_module_client client_test_driver = {
.name = "MX client test driver",
.probe = client_module_probe,
.remove = client_module_remove,
};
static int client_test_dev_open(struct inode *inode, struct file *file)
{
SCSC_TAG_ERR(MXMAN_TEST, "open client test\n");
return 0;
}
static ssize_t client_test_dev_write(struct file *file, const char *data, size_t len, loff_t *offset)
{
unsigned long count;
char str[2]; /* One value and carry return */
long int val = 0;
if (len > 2) {
SCSC_TAG_ERR(MXMAN_TEST, "Incorrect value len %zd\n", len);
goto error;
}
count = copy_from_user(str, data, len);
str[1] = 0;
if (kstrtol(str, 10, &val)) {
SCSC_TAG_ERR(MXMAN_TEST, "Invalid value\n");
goto error;
}
if (test) {
if (val) {
SCSC_TAG_INFO(MXMAN_TEST, "Start services\n");
open_start_services(test->mx);
} else {
SCSC_TAG_INFO(MXMAN_TEST, "Stop services\n");
stop_close_services();
}
} else {
SCSC_TAG_ERR(MXMAN_TEST, "Test not created\n");
goto error;
}
SCSC_TAG_ERR(MXMAN_TEST, "OK\n");
error:
return len;
}
static ssize_t client_test_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
{
return length;
}
static int client_test_dev_release(struct inode *inode, struct file *file)
{
SCSC_TAG_DEBUG(MXMAN_TEST, "close client test\n");
return 0;
}
static const struct file_operations client_test_dev_fops = {
.owner = THIS_MODULE,
.open = client_test_dev_open,
.read = client_test_dev_read,
.write = client_test_dev_write,
.release = client_test_dev_release,
};
static int __init scsc_client_test_module_init(void)
{
int r;
SCSC_TAG_DEBUG(MXMAN_TEST, "mx140:\n");
r = scsc_mx_module_register_client_module(&client_test_driver);
if (r) {
SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_module_register_client_module failed: r=%d\n", r);
return r;
}
r = alloc_chrdev_region(&client_test_dev_t, 0, 1, "sample-cdev");
if (r < 0) {
SCSC_TAG_ERR(MXMAN_TEST, "failed to alloc chrdev region\n");
goto fail_alloc_chrdev_region;
}
client_test_cdev = cdev_alloc();
if (!client_test_cdev) {
r = -ENOMEM;
SCSC_TAG_ERR(MXMAN_TEST, "failed to alloc cdev\n");
goto fail_alloc_cdev;
}
cdev_init(client_test_cdev, &client_test_dev_fops);
r = cdev_add(client_test_cdev, client_test_dev_t, 1);
if (r < 0) {
SCSC_TAG_ERR(MXMAN_TEST, "failed to add cdev\n");
goto fail_add_cdev;
}
client_test_class = class_create(THIS_MODULE, "sample");
if (!client_test_class) {
r = -EEXIST;
SCSC_TAG_ERR(MXMAN_TEST, "failed to create class\n");
goto fail_create_class;
}
if (!device_create(client_test_class, NULL, client_test_dev_t, NULL, "mx_client_test_%d", MINOR(client_test_dev_t))) {
r = -EINVAL;
SCSC_TAG_ERR(MXMAN_TEST, "failed to create device\n");
goto fail_create_device;
}
return 0;
fail_create_device:
class_destroy(client_test_class);
fail_create_class:
cdev_del(client_test_cdev);
fail_add_cdev:
fail_alloc_cdev:
unregister_chrdev_region(client_test_dev_t, 1);
fail_alloc_chrdev_region:
return r;
}
static void __exit scsc_client_test_module_exit(void)
{
SCSC_TAG_DEBUG(MXMAN_TEST, "mx140:\n");
scsc_mx_module_unregister_client_module(&client_test_driver);
SCSC_TAG_DEBUG(MXMAN_TEST, "exit\n");
device_destroy(client_test_class, client_test_dev_t);
class_destroy(client_test_class);
cdev_del(client_test_cdev);
unregister_chrdev_region(client_test_dev_t, 1);
}
late_initcall(scsc_client_test_module_init);
module_exit(scsc_client_test_module_exit);
MODULE_DESCRIPTION("mx140 Client Test Driver");
MODULE_AUTHOR("SCSC");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,335 @@
/**
* Circular buffer backed packet stream (Implementation)
*
* To allow easy distinction between full and empty buffers, there
* is one slot reserved in the available space. This means that the following
* conditions can be used to easily test the buffer's status without tracking
* the used size explicitly:
* - read_index == write_index : Buffer is empty
* - write_index + 1 == read_index : Buffer is full
* Otherwise if read_index == write_index then the buffer could be either
* empty or full.
*/
/* Implements */
#include "cpacket_buffer.h"
/* Uses */
#include <scsc/scsc_logring.h>
#include <linux/module.h>
#include "miframman.h"
/**
* Advances the read/write index by the given amount, wrapping around if this
* exceeds the buffer length.
*/
static inline void cpacketbuffer_advance_index(uint32_t *idx, uint32_t amount, uint32_t buffer_size)
{
*idx = (*idx + amount) % buffer_size;
}
/**
* Converts a buffer address to a read/write index.
*
* The address must be at the start of a packet.
*/
static inline uint32_t cpacketbuffer_address_to_index(struct cpacketbuffer *buffer, const uint8_t *address)
{
ptrdiff_t offset = address - (uint8_t *)buffer->buffer;
return (offset / buffer->packet_size) % buffer->num_packets;
}
/**
* Converts a buffer read/write index to an address.
*/
static inline uint8_t *cpacketbuffer_index_to_address(struct cpacketbuffer *buffer, uint32_t *idx)
{
return (uint8_t *)buffer->buffer + (*idx % buffer->num_packets) * buffer->packet_size;
}
/** Returns the current read index of the buffer */
static inline uint32_t cpacketbuffer_read_index(const struct cpacketbuffer *buffer)
{
return *buffer->read_index;
}
/** Returns the current write index of the buffer */
static inline uint32_t cpacketbuffer_write_index(const struct cpacketbuffer *buffer)
{
return *buffer->write_index;
}
/** Writes a set of whole packets to the buffer */
static bool cpacketbuffer_write_block(struct cpacketbuffer *buffer, const void *buf, uint32_t num_bytes)
{
uint32_t num_packets = (num_bytes + buffer->packet_size - 1) / buffer->packet_size;
const uint8_t *source_data;
uint32_t start_write_index;
uint32_t end_write_index;
if (num_packets > cpacketbuffer_free_space(buffer))
/* Not enough free packets to write this block */
return false;
source_data = (const uint8_t *)buf;
start_write_index = cpacketbuffer_write_index(buffer);
end_write_index = start_write_index;
cpacketbuffer_advance_index(&end_write_index, num_packets - 1, buffer->num_packets);
if (end_write_index < start_write_index) {
/* Writes wrap around the buffer, split the write in two */
uint32_t initial_write_size = (buffer->num_packets - start_write_index) * buffer->packet_size;
memcpy(cpacketbuffer_index_to_address(buffer, buffer->write_index), source_data, initial_write_size);
memcpy(buffer->buffer, source_data + initial_write_size, num_bytes - initial_write_size);
} else
memcpy(cpacketbuffer_index_to_address(buffer, buffer->write_index), source_data, num_bytes);
cpacketbuffer_advance_index(buffer->write_index, num_packets, buffer->num_packets);
return true;
}
/** Externally visible functions */
int cpacketbuffer_init(struct cpacketbuffer *buffer, uint32_t num_packets, uint32_t packet_size, struct scsc_mx *mx)
{
struct miframman *miframman;
uint32_t *ridx;
uint32_t *widx;
void *mem;
buffer->mx = mx;
miframman = scsc_mx_get_ramman(mx);
mem = miframman_alloc(miframman, num_packets * packet_size, 4);
if (!mem)
return -ENOMEM;
ridx = miframman_alloc(miframman, sizeof(uint32_t), 4);
if (!ridx) {
miframman_free(miframman, mem);
return -ENOMEM;
}
widx = miframman_alloc(miframman, sizeof(uint32_t), 4);
if (!widx) {
miframman_free(miframman, ridx);
miframman_free(miframman, mem);
return -ENOMEM;
}
buffer->buffer = mem;
buffer->num_packets = num_packets;
buffer->packet_size = packet_size;
buffer->read_index = ridx;
buffer->write_index = widx;
*buffer->read_index = 0;
*buffer->write_index = 0;
return 0;
}
void cpacketbuffer_release(struct cpacketbuffer *buffer)
{
struct miframman *miframman;
miframman = scsc_mx_get_ramman(buffer->mx);
miframman_free(miframman, buffer->read_index);
miframman_free(miframman, buffer->write_index);
miframman_free(miframman, buffer->buffer);
}
bool cpacketbuffer_write(struct cpacketbuffer *buffer, const void *buf, uint32_t num_bytes)
{
uint32_t start_write_index;
if (buf == NULL || num_bytes == 0)
return false;
start_write_index = cpacketbuffer_write_index(buffer);
if (!cpacketbuffer_write_block(buffer, buf, num_bytes))
return false;
/* CPU memory barrier */
wmb();
return true;
}
bool cpacketbuffer_write_gather(struct cpacketbuffer *buffer, const void **bufs, uint32_t *num_bytes, uint32_t num_bufs)
{
uint32_t start_write_index;
uint32_t i;
if (bufs == NULL || num_bytes == 0 || num_bufs == 0)
return false;
start_write_index = cpacketbuffer_write_index(buffer);
for (i = 0; i < num_bufs; ++i) {
/* Write all the whole packets from this buffer */
uint32_t partial_packet_len = num_bytes[i] % buffer->packet_size;
uint32_t whole_packet_len = num_bytes[i] - partial_packet_len;
if (whole_packet_len > 0 &&
!cpacketbuffer_write_block(buffer, bufs[i], whole_packet_len))
return false;
if (partial_packet_len != 0) {
/* Partial packet present - write this and enough from the next data block(s) to fill this packet
* before continuing */
uint32_t needed_bytes;
uint8_t *write_ptr = cpacketbuffer_index_to_address(buffer, buffer->write_index);
memcpy(write_ptr, (const uint8_t *)bufs[i] + whole_packet_len, partial_packet_len);
write_ptr += partial_packet_len;
needed_bytes = buffer->packet_size - partial_packet_len;
while (i + 1 < num_bufs && needed_bytes > 0) {
uint32_t num_bytes_to_take = num_bytes[i + 1] >= needed_bytes ? needed_bytes : num_bytes[i + 1];
memcpy(write_ptr, bufs[i + 1], num_bytes_to_take);
bufs[i + 1] = (const uint8_t *)bufs[i + 1] + num_bytes_to_take;
num_bytes[i + 1] -= num_bytes_to_take;
write_ptr += num_bytes_to_take;
needed_bytes -= num_bytes_to_take;
if (num_bytes[i + 1] == 0)
/* This buffer has been consumed entirely, move to the next */
++i;
}
cpacketbuffer_advance_index(buffer->write_index, 1, buffer->num_packets);
}
}
/* CPU memory barrier */
wmb();
SCSC_TAG_DEBUG(CPKTBUFF, "*buffer->read_index=0x%x *buffer->write_index=0x%x\n",
*buffer->read_index, *buffer->write_index);
return true;
}
uint32_t cpacketbuffer_read(struct cpacketbuffer *buffer, void *buf, uint32_t num_bytes)
{
uint8_t *read_start;
uint32_t num_packets;
uint32_t num_available_packets;
if (buf == NULL || cpacketbuffer_is_empty(buffer))
return 0;
/* Work out where we're reading from */
read_start = cpacketbuffer_index_to_address(buffer, buffer->read_index);
num_packets = num_bytes / buffer->packet_size;
if (num_bytes % buffer->packet_size != 0)
/* Partial data packet read requested, this means we remove the whole thing */
++num_packets;
/* Ensure we have enough actual data to satisfy the read request, otherwise
* truncate the read request to the amount of data available. */
num_available_packets = buffer->num_packets - (cpacketbuffer_free_space(buffer) + 1);
if (num_packets > num_available_packets) {
num_packets = num_available_packets;
num_bytes = num_packets * buffer->packet_size;
}
if (cpacketbuffer_read_index(buffer) + num_packets > buffer->num_packets) {
/* The read wraps around the end of the buffer, do it in two parts */
uint32_t initial_read_size = (buffer->num_packets - cpacketbuffer_read_index(buffer)) * buffer->packet_size;
memcpy(buf, read_start, initial_read_size);
memcpy((uint8_t *)buf + initial_read_size, buffer->buffer, num_bytes - initial_read_size);
} else
memcpy(buf, read_start, num_bytes);
/* Update the read index with how many packets we pulled out of the stream */
cpacketbuffer_advance_index(buffer->read_index, num_packets, buffer->num_packets);
/* CPU memory barrier */
wmb();
return num_bytes;
}
const void *cpacketbuffer_peek(struct cpacketbuffer *buffer, const void *current_packet)
{
uint32_t next_packet_index;
SCSC_TAG_DEBUG(CPKTBUFF, "*buffer->read_index=0x%x *buffer->write_index=0x%x\n",
*buffer->read_index, *buffer->write_index);
if (current_packet == NULL)
/* Reading the first available packet */
next_packet_index = cpacketbuffer_read_index(buffer);
else
/* Reading the next available packet past the current value of current_packet */
next_packet_index = cpacketbuffer_address_to_index(buffer,
(const uint8_t *)current_packet + buffer->packet_size);
if (next_packet_index == cpacketbuffer_write_index(buffer))
/* No more packets available */
return NULL;
return cpacketbuffer_index_to_address(buffer, &next_packet_index);
}
void cpacketbuffer_peek_complete(struct cpacketbuffer *buffer, const void *current_packet)
{
if (current_packet == NULL)
return;
/* The address we're given is the last packet read, so the new read index is for the next one */
*buffer->read_index = cpacketbuffer_address_to_index(buffer,
(const uint8_t *)current_packet + buffer->packet_size);
/* CPU memory barrier */
wmb();
}
bool cpacketbuffer_is_empty(const struct cpacketbuffer *buffer)
{
return cpacketbuffer_read_index(buffer) == cpacketbuffer_write_index(buffer);
}
bool cpacketbuffer_is_full(const struct cpacketbuffer *buffer)
{
return (cpacketbuffer_write_index(buffer) + 1) % buffer->num_packets == cpacketbuffer_read_index(buffer);
}
uint32_t cpacketbuffer_free_space(const struct cpacketbuffer *buffer)
{
uint32_t base_free_space = cpacketbuffer_write_index(buffer) >= cpacketbuffer_read_index(buffer) ?
cpacketbuffer_read_index(buffer) + buffer->num_packets - cpacketbuffer_write_index(buffer) :
cpacketbuffer_read_index(buffer) - cpacketbuffer_write_index(buffer);
/* Subtract the full/empty identification reserved slot from the free space */
return base_free_space - 1;
}
uint32_t cpacketbuffer_packet_size(const struct cpacketbuffer *buffer)
{
return buffer->packet_size;
}
void cpacketbuffer_config_serialise(const struct cpacketbuffer *buffer, struct mxcbufconf *buf_conf)
{
scsc_mifram_ref mifram_ref;
struct scsc_mif_abs *mif;
mif = scsc_mx_get_mif_abs(buffer->mx);
mif->get_mifram_ref(mif, buffer->buffer, &mifram_ref);
buf_conf->buffer_loc = mifram_ref;
buf_conf->num_packets = buffer->num_packets;
buf_conf->packet_size = buffer->packet_size;
mif->get_mifram_ref(mif, buffer->read_index, &mifram_ref);
buf_conf->read_index_loc = mifram_ref;
mif->get_mifram_ref(mif, buffer->write_index, &mifram_ref);
buf_conf->write_index_loc = mifram_ref;
}

View file

@ -0,0 +1,140 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/**
* Circular buffer backed packet stream (Interface)
*/
#ifndef CPACKET_BUFFER_H__
#define CPACKET_BUFFER_H__
/* Uses */
#include <linux/types.h>
#include "scsc_mx_impl.h"
#include "mxconf.h"
struct cpacketbuffer;
/**
* Initialises the circular buffer.
* The memory buffer length must be a multiple of the packet size.
*/
int cpacketbuffer_init(struct cpacketbuffer *buffer, uint32_t num_packets, uint32_t packet_size, struct scsc_mx *mx);
void cpacketbuffer_release(struct cpacketbuffer *buffer);
/**
* Reads the gven amount of data from the buffer, copying it to the provided address.
* This automatically removes the read data from the buffer.
*
* If the amount of data requested is not a multiple of the packet size
* only the requested number of bytes will be read, but the partially read packet
* will still be removed from the buffer.
*
* Returns true if the packet was read.
*/
uint32_t cpacketbuffer_read(struct cpacketbuffer *buffer, void *buf, uint32_t num_bytes);
/**
* Returns a pointer to the next packet of data within the buffer, without
* removing it. This can be used to processss data in place without needing to
* copy it first.
*
* If multiple packets are present these can be read in turn by setting the value
* of current_packet to the returned value from the previous call to cpacketbuffer_peek.
*
* cpacketbuffer_peek_complete must be called to remove the packet(s) from the buffer.
*
* Returns a pointer to the beginning of the packet to read, or NULL if there is no
* packet to process.
*
* Example use:
* // Get the first data packet
* void *current_packet = cpacketbuffer_peek( buffer, NULL );
* void *last_packet = NULL;
* while( current_packet != NULL )
* {
* // Process data packet
* ...
*
* // Get the next data packet
* last_packet = current_packet;
* current_packet = cpacketbuffer_peek( buffer, current_packet );
* }
*
* // Remove all processed packets from the buffer
* if( last_packet != NULL )
* {
* cpacketbuffer_peek_complete( buffer, last_packet );
* }
*/
const void *cpacketbuffer_peek(struct cpacketbuffer *buffer, const void *current_packet);
/**
* Removes all packets from the buffer up to and including the given
* packet.
*
* This must be called after using cpacketbuffer_peek to indicate that packet(s)
* can be removed from the buffer.
*/
void cpacketbuffer_peek_complete(struct cpacketbuffer *buffer, const void *packet);
/**
* Writes a number of bytes to the buffer. This will always use up whole packets in the buffer
* even if the amount of data written is not an exact multiple of the packet size.
*
* Returns true if the data was written, false if there is not enough free space in the buffer.
*/
bool cpacketbuffer_write(struct cpacketbuffer *buffer, const void *buf, uint32_t num_bytes);
/**
* Writes a set of non-contiguous data blocks to the buffer as a contiguous set.
* This will always use up whole packets even if the
* amount of data written is not an exact multiple of the packet size.
*
* Returns true if the blocks were written, false if there is not enough
* free space in the buffer for all the blocks.
*/
bool cpacketbuffer_write_gather(struct cpacketbuffer *buffer, const void **bufs, uint32_t *num_bytes, uint32_t num_bufs);
/**
* Returns the number of free packets in the buffer.
*/
uint32_t cpacketbuffer_free_space(const struct cpacketbuffer *buffer);
/**
* Returns true if the buffer is empty.
*/
bool cpacketbuffer_is_empty(const struct cpacketbuffer *buffer);
/**
* Returns true if the buffer is full.
*/
bool cpacketbuffer_is_full(const struct cpacketbuffer *buffer);
/**
* Returns the packet size the buffer was initialised with. This is the same value
* as the packet_size argument passed to cpacketbuffer_init().
*/
uint32_t cpacketbuffer_packet_size(const struct cpacketbuffer *buffer);
void cpacketbuffer_config_serialise(const struct cpacketbuffer *buffer, struct mxcbufconf *buf_conf);
/**
* Buffer context object.
*/
struct cpacketbuffer {
struct scsc_mx *mx;
void *buffer; /* Buffer location */
uint32_t num_packets; /* Total number of packets that can be stored in the buffer */
uint32_t packet_size; /* Size of each individual packet within the buffer */
/** Pointers to 32bit R/W indexes - these should point to uint32_ts */
uint32_t *read_index; /* Pointer to the location of the read index, which
* contains the index of the next packet to read. */
uint32_t *write_index; /* Pointer to the location of the write index, which
* contains the index after the last packet written. */
};
#endif /* CPACKET_BUFFER_H__ */

View file

@ -0,0 +1,148 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <scsc/scsc_logring.h>
#include "panic_record_r4_defs.h"
#define PANIC_RECORD_CKSUM_SEED 0xa5a5a5a5
/*
* version 2 r4 panic record defs
*/
#define R4_PANIC_RECORD_VERSION_2 2
#define R4_PANIC_RECORD_LENGTH_INDEX_V2 1
#define R4_PANIC_RECORD_MAX_LENGTH_V2 256
/*
* version 1 mr4 panic record defs
*/
#define M4_PANIC_RECORD_VERSION_1 1
#define M4_PANIC_RECORD_VERSION_INDEX 0
#define M4_PANIC_RECORD_LENGTH_INDEX 1
#define M4_PANIC_RECORD_MAX_LENGTH 256
/**
* Compute 32bit xor of specified seed value and data.
*
* @param seed Initial seed value.
* @param data Array of uint32s to be xored
* @param len Number of uint32s to be xored
*
* @return Computed 32bit xor of specified seed value and data.
*/
static u32 xor32(uint32_t seed, const u32 data[], size_t len)
{
const u32 *i;
u32 xor = seed;
for (i = data; i != data + len; ++i)
xor ^= *i;
return xor;
}
static void panic_record_dump(u32 *panic_record, u32 panic_record_length, bool r4)
{
int i;
SCSC_TAG_INFO(FW_PANIC, "%s panic record dump(length=%d):\n",
r4 ? "R4" : "M4", panic_record_length);
for (i = 0; i < panic_record_length; i++)
SCSC_TAG_INFO(FW_PANIC, "%s_panic_record[%d] = %08x\n",
r4 ? "r4" : "m4", i, panic_record[i]);
}
static bool fw_parse_r4_panic_record_v2(u32 *r4_panic_record)
{
u32 panic_record_cksum;
u32 calculated_cksum;
u32 panic_record_length = *(r4_panic_record + R4_PANIC_RECORD_LENGTH_INDEX_V2) / 4;
SCSC_TAG_INFO(FW_PANIC, "panic_record_length: %d\n",
panic_record_length);
if (panic_record_length < R4_PANIC_RECORD_MAX_LENGTH_V2) {
panic_record_cksum = *(r4_panic_record + panic_record_length - 1);
calculated_cksum = xor32(PANIC_RECORD_CKSUM_SEED, r4_panic_record, panic_record_length - 1);
if (calculated_cksum == panic_record_cksum) {
SCSC_TAG_INFO(FW_PANIC, "panic_record_cksum OK: %08x\n",
calculated_cksum);
panic_record_dump(r4_panic_record, panic_record_length, true);
return true;
} else {
SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_cksum: 0x%x calculated_cksum: 0x%x\n",
panic_record_cksum, calculated_cksum);
}
} else {
SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_length: %d\n",
panic_record_length);
}
return false;
}
static bool fw_parse_m4_panic_record_v1(u32 *m4_panic_record)
{
u32 panic_record_cksum;
u32 calculated_cksum;
u32 panic_record_length = *(m4_panic_record + M4_PANIC_RECORD_LENGTH_INDEX) / 4;
SCSC_TAG_INFO(FW_PANIC, "panic_record_length: %d\n",
panic_record_length);
if (panic_record_length < M4_PANIC_RECORD_MAX_LENGTH) {
panic_record_cksum = *(m4_panic_record + panic_record_length - 1);
calculated_cksum = xor32(PANIC_RECORD_CKSUM_SEED, m4_panic_record, panic_record_length - 1);
if (calculated_cksum == panic_record_cksum) {
SCSC_TAG_INFO(FW_PANIC, "panic_record_cksum OK: %08x\n",
calculated_cksum);
panic_record_dump(m4_panic_record, panic_record_length, false);
return true;
} else {
SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_cksum: 0x%x calculated_cksum: 0x%x\n",
panic_record_cksum, calculated_cksum);
}
} else {
SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_length: %d\n",
panic_record_length);
}
return false;
}
bool fw_parse_r4_panic_record(u32 *r4_panic_record)
{
u32 panic_record_version = *(r4_panic_record + PANIC_RECORD_R4_VERSION_INDEX);
SCSC_TAG_INFO(FW_PANIC, "panic_record_version: %d\n",
panic_record_version);
switch (panic_record_version) {
default:
SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_version: %d\n",
panic_record_version);
break;
case R4_PANIC_RECORD_VERSION_2:
return fw_parse_r4_panic_record_v2(r4_panic_record);
}
return false;
}
bool fw_parse_m4_panic_record(u32 *m4_panic_record)
{
u32 panic_record_version = *(m4_panic_record + M4_PANIC_RECORD_VERSION_INDEX);
SCSC_TAG_INFO(FW_PANIC, "panic_record_version: %d\n", panic_record_version);
switch (panic_record_version) {
default:
SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_version: %d\n",
panic_record_version);
break;
case M4_PANIC_RECORD_VERSION_1:
return fw_parse_m4_panic_record_v1(m4_panic_record);
}
return false;
}

View file

@ -0,0 +1,13 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef FW_PANIC_RECORD_H__
#define FW_PANIC_RECORD_H__
bool fw_parse_r4_panic_record(u32 *r4_panic_record);
bool fw_parse_m4_panic_record(u32 *m4_panic_record);
#endif /* FW_PANIC_RECORD_H__ */

View file

@ -0,0 +1,92 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <scsc/scsc_logring.h>
#include "fwhdr.h"
/*
* The Maxwell Firmware Header Format is defined in SC-505846-SW
*/
#define FWHDR_02_TRAMPOLINE_OFFSET 0
#define FWHDR_02_MAGIC_OFFSET 8
#define FWHDR_02_VERSION_MINOR_OFFSET 12
#define FWHDR_02_VERSION_MAJOR_OFFSET 14
#define FWHDR_02_LENGTH_OFFSET 16
#define FWHDR_02_FIRMWARE_API_VERSION_MINOR_OFFSET 20
#define FWHDR_02_FIRMWARE_API_VERSION_MAJOR_OFFSET 22
#define FWHDR_02_FIRMWARE_CRC_OFFSET 24
#define FWHDR_02_CONST_FW_LENGTH_OFFSET 28
#define FWHDR_02_CONST_CRC_OFFSET 32
#define FWHDR_02_FIRMWARE_RUNTIME_LENGTH_OFFSET 36
#define FWHDR_02_FIRMWARE_ENTRY_POINT_OFFSET 40
#define FWHDR_02_BUILD_ID_OFFSET 48
#define FWHDR_02_R4_PANIC_RECORD_OFFSET_OFFSET 176
#define FWHDR_02_M4_PANIC_RECORD_OFFSET_OFFSET 180
/*
* Firmware header format for version 1.0 is same as version for 0.2
*/
#define FWHDR_02_TRAMPOLINE(__fw) (*((u32 *)(__fw + FWHDR_02_TRAMPOLINE_OFFSET)))
#define FWHDR_02_HEADER_FIRMWARE_ENTRY_POINT(__fw) (*((u32 *)(__fw + FWHDR_02_FIRMWARE_ENTRY_POINT_OFFSET)))
#define FWHDR_02_HEADER_FIRMWARE_RUNTIME_LENGTH(__fw) (*((u32 *)(__fw + FWHDR_02_FIRMWARE_RUNTIME_LENGTH_OFFSET)))
#define FWHDR_02_HEADER_VERSION_MAJOR(__fw) (*((u16 *)(__fw + FWHDR_02_VERSION_MAJOR_OFFSET)))
#define FWHDR_02_HEADER_VERSION_MINOR(__fw) (*((u16 *)(__fw + FWHDR_02_VERSION_MINOR_OFFSET)))
#define FWHDR_02_HEADER_FIRMWARE_API_VERSION_MINOR(__fw) (*((u16 *)(__fw + FWHDR_02_FIRMWARE_API_VERSION_MINOR_OFFSET)))
#define FWHDR_02_HEADER_FIRMWARE_API_VERSION_MAJOR(__fw) (*((u16 *)(__fw + FWHDR_02_FIRMWARE_API_VERSION_MAJOR_OFFSET)))
#define FWHDR_02_FW_CRC32(__fw) (*((u32 *)(__fw + FWHDR_02_FIRMWARE_CRC_OFFSET)))
#define FWHDR_02_HDR_LENGTH(__fw) (*((u32 *)(__fw + FWHDR_02_LENGTH_OFFSET)))
#define FWHDR_02_HEADER_CRC32(__fw) (*((u32 *)(__fw + (FWHDR_02_HDR_LENGTH(__fw)) - sizeof(u32))))
#define FWHDR_02_CONST_CRC32(__fw) (*((u32 *)(__fw + FWHDR_02_CONST_CRC_OFFSET)))
#define FWHDR_02_CONST_FW_LENGTH(__fw) (*((u32 *)(__fw + FWHDR_02_CONST_FW_LENGTH_OFFSET)))
#define FWHDR_02_R4_PANIC_RECORD_OFFSET(__fw) (*((u32 *)(__fw + FWHDR_02_R4_PANIC_RECORD_OFFSET_OFFSET)))
#define FWHDR_02_M4_PANIC_RECORD_OFFSET(__fw) (*((u32 *)(__fw + FWHDR_02_M4_PANIC_RECORD_OFFSET_OFFSET)))
/* firmware header has a panic record if the firmware header length is at least 192 bytes long */
#define MIN_HEADER_LENGTH_WITH_PANIC_RECORD 188
#define FWHDR_MAGIC_STRING "smxf"
static bool fwhdr_parse_v02(char *fw, struct fwhdr *fwhdr)
{
if (!memcmp(fw + FWHDR_02_MAGIC_OFFSET, FWHDR_MAGIC_STRING, sizeof(FWHDR_MAGIC_STRING) - 1)) {
fwhdr->firmware_entry_point = FWHDR_02_HEADER_FIRMWARE_ENTRY_POINT(fw);
fwhdr->hdr_major = FWHDR_02_HEADER_VERSION_MAJOR(fw);
fwhdr->hdr_minor = FWHDR_02_HEADER_VERSION_MINOR(fw);
fwhdr->fwapi_major = FWHDR_02_HEADER_FIRMWARE_API_VERSION_MAJOR(fw);
fwhdr->fwapi_minor = FWHDR_02_HEADER_FIRMWARE_API_VERSION_MINOR(fw);
fwhdr->fw_crc32 = FWHDR_02_FW_CRC32(fw);
fwhdr->const_crc32 = FWHDR_02_CONST_CRC32(fw);
fwhdr->header_crc32 = FWHDR_02_HEADER_CRC32(fw);
fwhdr->const_fw_length = FWHDR_02_CONST_FW_LENGTH(fw);
fwhdr->hdr_length = FWHDR_02_HDR_LENGTH(fw);
fwhdr->fw_runtime_length = FWHDR_02_HEADER_FIRMWARE_RUNTIME_LENGTH(fw);
SCSC_TAG_INFO(FW_LOAD, "hdr_length=%d", fwhdr->hdr_length);
fwhdr->r4_panic_record_offset = FWHDR_02_R4_PANIC_RECORD_OFFSET(fw);
fwhdr->m4_panic_record_offset = FWHDR_02_M4_PANIC_RECORD_OFFSET(fw);
return true;
}
return false;
}
static char *fwhdr_get_build_id_v02(char *fw, struct fwhdr *fwhdr)
{
if (!memcmp(fw + FWHDR_02_MAGIC_OFFSET, FWHDR_MAGIC_STRING, sizeof(FWHDR_MAGIC_STRING) - 1))
return fw + FWHDR_02_BUILD_ID_OFFSET;
return NULL;
}
bool fwhdr_parse(char *fw, struct fwhdr *fwhdr)
{
return fwhdr_parse_v02(fw, fwhdr);
}
char *fwhdr_get_build_id(char *fw, struct fwhdr *fwhdr)
{
return fwhdr_get_build_id_v02(fw, fwhdr);
}

View file

@ -0,0 +1,33 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef FWHDR_H
#define FWHDR_H
struct fwhdr {
u16 hdr_major;
u16 hdr_minor;
u16 fwapi_major;
u16 fwapi_minor;
u32 firmware_entry_point;
u32 fw_runtime_length;
u32 fw_crc32;
u32 const_crc32;
u32 header_crc32;
u32 const_fw_length;
u32 hdr_length;
u32 r4_panic_record_offset;
u32 m4_panic_record_offset;
};
bool fwhdr_parse(char *fw, struct fwhdr *fwhdr);
char *fwhdr_get_build_id(char *fw, struct fwhdr *fwhdr);
#endif /* FWHDR_H */

View file

@ -0,0 +1,59 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crc32.h>
#include <scsc/scsc_logring.h>
#include "fwimage.h"
int fwimage_check_fw_header_crc(char *fw, u32 hdr_length, u32 header_crc32)
{
u32 header_crc32_calculated;
/*
* The last 4-bytes are header CRC
*/
header_crc32_calculated = ether_crc(hdr_length - sizeof(u32), fw);
if (header_crc32_calculated != header_crc32) {
SCSC_TAG_ERR(FW_LOAD, "CRC32 doesn't match: header_crc32_calculated=%d header_crc32=%d\n",
header_crc32_calculated, header_crc32);
return -EINVAL;
}
SCSC_TAG_DEBUG(FW_LOAD, "CRC32 OK: header_crc32_calculated=%d header_crc32=%d\n",
header_crc32_calculated, header_crc32);
return 0;
}
int fwimage_check_fw_const_section_crc(char *fw, u32 const_crc32, u32 const_fw_length, u32 hdr_length)
{
u32 const_crc32_calculated;
const_crc32_calculated = ether_crc(const_fw_length - hdr_length, fw + hdr_length);
if (const_crc32_calculated != const_crc32) {
SCSC_TAG_ERR(FW_LOAD, "CRC32 doesn't match: const_crc32_calculated=%d const_crc32=%d\n",
const_crc32_calculated, const_crc32);
return -EINVAL;
}
SCSC_TAG_DEBUG(FW_LOAD, "CRC32 OK: const_crc32_calculated=%d const_crc32=%d\n",
const_crc32_calculated, const_crc32);
return 0;
}
int fwimage_check_fw_crc(char *fw, u32 fw_image_length, u32 hdr_length, u32 fw_crc32)
{
u32 fw_crc32_calculated;
fw_crc32_calculated = ether_crc(fw_image_length - hdr_length, fw + hdr_length);
if (fw_crc32_calculated != fw_crc32) {
SCSC_TAG_ERR(FW_LOAD, "CRC32 doesn't match: fw_crc32_calculated=%d fw_crc32=%d\n",
fw_crc32_calculated, fw_crc32);
return -EINVAL;
}
SCSC_TAG_DEBUG(FW_LOAD, "CRC32 OK: fw_crc32_calculated=%d fw_crc32=%d\n",
fw_crc32_calculated, fw_crc32);
return 0;
}

View file

@ -0,0 +1,14 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef FWIMAGE_H
#define FWIMAGE_H
int fwimage_check_fw_header_crc(char *fw, u32 hdr_length, u32 header_crc32);
int fwimage_check_fw_const_section_crc(char *fw, u32 const_crc32, u32 const_fw_length, u32 hdr_length);
int fwimage_check_fw_crc(char *fw, u32 fw_runtime_length, u32 hdr_length, u32 fw_crc32);
#endif /* FWIMAGE_H */

View file

@ -0,0 +1,248 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/** Implements */
#include "gdb_transport.h"
/** Uses */
#include <linux/module.h>
#include <linux/slab.h>
#include <scsc/scsc_logring.h>
#include "mifintrbit.h"
struct clients_node {
struct list_head list;
struct gdb_transport_client *gdb_client;
};
struct gdb_transport_node {
struct list_head list;
struct gdb_transport *gdb_transport;
};
static struct gdb_transport_module {
struct list_head clients_list;
struct list_head gdb_transport_list;
} gdb_transport_module = {
.clients_list = LIST_HEAD_INIT(gdb_transport_module.clients_list),
.gdb_transport_list = LIST_HEAD_INIT(gdb_transport_module.gdb_transport_list)
};
static void input_irq_handler(int irq, void *data)
{
struct gdb_transport *gdb_transport = (struct gdb_transport *)data;
struct scsc_mif_abs *mif_abs;
u32 num_bytes;
u32 alloc_bytes;
char *buf;
/* 1st length */
/* Clear the interrupt first to ensure we can't possibly miss one */
mif_abs = scsc_mx_get_mif_abs(gdb_transport->mx);
mif_abs->irq_bit_clear(mif_abs, irq);
while (mif_stream_read(&gdb_transport->mif_istream, &num_bytes, sizeof(uint32_t))) {
if (num_bytes > 0 && num_bytes
< (GDB_TRANSPORT_BUF_LENGTH - sizeof(uint32_t))) {
alloc_bytes = sizeof(char) * num_bytes;
buf = kmalloc(alloc_bytes, GFP_ATOMIC);
/* 2nd payload (msg) */
mif_stream_read(&gdb_transport->mif_istream, buf, num_bytes);
gdb_transport->channel_handler_fn(buf, num_bytes, gdb_transport->channel_handler_data);
kfree(buf);
} else
SCSC_TAG_ERR(GDB_TRANS, "Incorrect num_bytes\n");
}
}
/** MIF Interrupt handler for acknowledging reads made by the AP */
static void output_irq_handler(int irq, void *data)
{
struct scsc_mif_abs *mif_abs;
struct gdb_transport *gdb_transport = (struct gdb_transport *)data;
/* Clear the interrupt first to ensure we can't possibly miss one */
/* The FW read some data from the output stream.
* Currently we do not care, so just clear the interrupt. */
mif_abs = scsc_mx_get_mif_abs(gdb_transport->mx);
mif_abs->irq_bit_clear(mif_abs, irq);
}
static void gdb_transport_probe_registered_clients(struct gdb_transport *gdb_transport)
{
bool client_registered = false;
struct clients_node *gdb_client_node, *gdb_client_next;
struct scsc_mif_abs *mif_abs;
char *dev_uid;
/* Traverse Linked List for each mif_driver node */
list_for_each_entry_safe(gdb_client_node, gdb_client_next, &gdb_transport_module.clients_list, list) {
/* Get UID */
mif_abs = scsc_mx_get_mif_abs(gdb_transport->mx);
dev_uid = mif_abs->get_uid(mif_abs);
gdb_client_node->gdb_client->probe(gdb_client_node->gdb_client, gdb_transport, dev_uid);
client_registered = true;
}
if (client_registered == false)
SCSC_TAG_INFO(GDB_TRANS, "No clients registered\n");
}
void gdb_transport_release(struct gdb_transport *gdb_transport)
{
struct clients_node *gdb_client_node, *gdb_client_next;
struct gdb_transport_node *gdb_transport_node, *gdb_transport_node_next;
bool match = false;
list_for_each_entry_safe(gdb_transport_node, gdb_transport_node_next, &gdb_transport_module.gdb_transport_list, list) {
if (gdb_transport_node->gdb_transport == gdb_transport) {
match = true;
/* Need to notify clients using the transport has been released */
list_for_each_entry_safe(gdb_client_node, gdb_client_next, &gdb_transport_module.clients_list, list) {
gdb_client_node->gdb_client->remove(gdb_client_node->gdb_client, gdb_transport);
}
list_del(&gdb_transport_node->list);
kfree(gdb_transport_node);
}
}
if (match == false)
SCSC_TAG_INFO(GDB_TRANS, "No match for given scsc_mif_abs\n");
mif_stream_release(&gdb_transport->mif_istream);
mif_stream_release(&gdb_transport->mif_ostream);
}
void gdb_transport_config_serialise(struct gdb_transport *gdb_transport,
struct mxtransconf *trans_conf)
{
mif_stream_config_serialise(&gdb_transport->mif_istream, &trans_conf->to_ap_stream_conf);
mif_stream_config_serialise(&gdb_transport->mif_ostream, &trans_conf->from_ap_stream_conf);
}
/** Public functions */
int gdb_transport_init(struct gdb_transport *gdb_transport, struct scsc_mx *mx, enum gdb_transport_enum type)
{
int r;
uint32_t mem_length = GDB_TRANSPORT_BUF_LENGTH;
uint32_t packet_size = 4;
uint32_t num_packets;
struct gdb_transport_node *gdb_transport_node;
gdb_transport_node = kzalloc(sizeof(*gdb_transport_node), GFP_ATOMIC);
if (!gdb_transport_node)
return -EIO;
/*
* Initialising a buffer of 1 byte is never legitimate, do not allow it.
* The memory buffer length must be a multiple of the packet size.
*/
if (mem_length <= 1) {
kfree(gdb_transport_node);
return -EIO;
}
memset(gdb_transport, 0, sizeof(struct gdb_transport));
num_packets = mem_length / packet_size;
mutex_init(&gdb_transport->channel_handler_mutex);
gdb_transport->mx = mx;
if (type == GDB_TRANSPORT_M4)
r = mif_stream_init(&gdb_transport->mif_istream, SCSC_MIF_ABS_TARGET_M4, MIF_STREAM_DIRECTION_IN, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, input_irq_handler, gdb_transport);
else
r = mif_stream_init(&gdb_transport->mif_istream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_IN, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, input_irq_handler, gdb_transport);
if (r) {
kfree(gdb_transport_node);
return r;
}
if (type == GDB_TRANSPORT_M4)
r = mif_stream_init(&gdb_transport->mif_ostream, SCSC_MIF_ABS_TARGET_M4, MIF_STREAM_DIRECTION_OUT, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_RESERVED, output_irq_handler, gdb_transport);
else
r = mif_stream_init(&gdb_transport->mif_ostream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_OUT, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_RESERVED, output_irq_handler, gdb_transport);
if (r) {
mif_stream_release(&gdb_transport->mif_istream);
kfree(gdb_transport_node);
return r;
}
gdb_transport->channel_handler_fn = NULL;
gdb_transport->channel_handler_data = NULL;
gdb_transport_node->gdb_transport = gdb_transport;
/* Add gdb_transport node */
list_add_tail(&gdb_transport_node->list, &gdb_transport_module.gdb_transport_list);
gdb_transport->type = type;
gdb_transport_probe_registered_clients(gdb_transport);
return 0;
}
void gdb_transport_send(struct gdb_transport *gdb_transport, void *message, uint32_t message_length)
{
/*int i;*/
char msg[300];
memcpy(msg, message, message_length);
mutex_lock(&gdb_transport->channel_handler_mutex);
/* 1st length */
mif_stream_write(&gdb_transport->mif_ostream, &message_length, sizeof(uint32_t));
/* 2nd payload (msg) */
mif_stream_write(&gdb_transport->mif_ostream, message, message_length);
mutex_unlock(&gdb_transport->channel_handler_mutex);
}
EXPORT_SYMBOL(gdb_transport_send);
void gdb_transport_register_channel_handler(struct gdb_transport *gdb_transport,
gdb_channel_handler handler, void *data)
{
mutex_lock(&gdb_transport->channel_handler_mutex);
gdb_transport->channel_handler_fn = handler;
gdb_transport->channel_handler_data = (void *)data;
mutex_unlock(&gdb_transport->channel_handler_mutex);
}
EXPORT_SYMBOL(gdb_transport_register_channel_handler);
int gdb_transport_register_client(struct gdb_transport_client *gdb_client)
{
struct clients_node *gdb_client_node;
struct gdb_transport_node *gdb_transport_node;
struct scsc_mif_abs *mif_abs;
char *dev_uid;
/* Add node in modules linked list */
gdb_client_node = kzalloc(sizeof(*gdb_client_node), GFP_ATOMIC);
if (!gdb_client_node)
return -ENOMEM;
gdb_client_node->gdb_client = gdb_client;
list_add_tail(&gdb_client_node->list, &gdb_transport_module.clients_list);
/* Traverse Linked List for transport registered */
list_for_each_entry(gdb_transport_node, &gdb_transport_module.gdb_transport_list, list) {
/* Get UID */
mif_abs = scsc_mx_get_mif_abs(gdb_transport_node->gdb_transport->mx);
dev_uid = mif_abs->get_uid(mif_abs);
gdb_client->probe(gdb_client, gdb_transport_node->gdb_transport, dev_uid);
}
return 0;
}
EXPORT_SYMBOL(gdb_transport_register_client);
void gdb_transport_unregister_client(struct gdb_transport_client *gdb_client)
{
struct clients_node *gdb_client_node, *gdb_client_next;
/* Traverse Linked List for each client_list */
list_for_each_entry_safe(gdb_client_node, gdb_client_next, &gdb_transport_module.clients_list, list) {
if (gdb_client_node->gdb_client == gdb_client) {
list_del(&gdb_client_node->list);
kfree(gdb_client_node);
}
}
}
EXPORT_SYMBOL(gdb_transport_unregister_client);

View file

@ -0,0 +1,85 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/**
* Maxwell gdb transport (Interface)
*
* Provides bi-directional communication between the firmware and the
* host.
*
* This interface also provides a utility method for sending messages across
* the stream.
*/
#ifndef GDB_TRANSPORT_H__
#define GDB_TRANSPORT_H__
/** Uses */
#include <linux/kthread.h>
#include "mifstream.h"
#define GDB_TRANSPORT_BUF_LENGTH (2 * 1024)
struct gdb_transport;
enum gdb_transport_enum {
GDB_TRANSPORT_R4 = 0,
GDB_TRANSPORT_M4,
};
/**
* Transport channel callback handler. This will be invoked each time a message on a channel is
* received. Handlers may perform work within
* their callback implementation, but should not block.
*
* Note that the message pointer passed is only valid for the duration of the function call.
*/
typedef void (*gdb_channel_handler)(const void *message, size_t length, void *data);
/**
* Sends a message to the AP across the given channel.
*
* This function is safe to call from any RTOS thread.
*/
void gdb_transport_send(struct gdb_transport *gdb_transport,
void *message, uint32_t message_length);
/**
* Initialises the maxwell management transport and configures the necessary
* interrupt handlers. Called once during boot.
*/
int gdb_transport_init(struct gdb_transport *gdb_transport, struct scsc_mx *mx, enum gdb_transport_enum type);
void gdb_transport_release(struct gdb_transport *gdb_transport);
/*
* Initialises the configuration area incl. Maxwell Infrastructure Configuration,
* MIF Management Transport Configuration and MIF Management Stream Configuration.
*/
void gdb_transport_config_serialise(struct gdb_transport *gdb_transport, struct mxtransconf *trans_conf);
void gdb_transport_set_error(struct gdb_transport *gdb_transport);
struct gdb_transport {
struct scsc_mx *mx;
struct mif_stream mif_istream;
struct mif_stream mif_ostream;
/** Registered channel handlers for messages coming from the AP for each channel */
gdb_channel_handler channel_handler_fn;
void *channel_handler_data;
struct mutex channel_handler_mutex;
/* Transport processor type */
enum gdb_transport_enum type;
};
struct gdb_transport_client {
char *name;
void (*probe)(struct gdb_transport_client *gdb_client, struct gdb_transport *gdb_transport, char *dev_uid);
void (*remove)(struct gdb_transport_client *gdb_client, struct gdb_transport *gdb_transport);
};
int gdb_transport_register_client(struct gdb_transport_client *gdb_client);
void gdb_transport_unregister_client(struct gdb_transport_client *gdb_client);
void gdb_transport_register_channel_handler(struct gdb_transport *gdb_transport, gdb_channel_handler handler, void *data);
void gdb_transport_register_char_device(struct scsc_mx *mx, struct gdb_transport **gdb_transport_handler);
#endif /* GDB_TRANSPORT_H__ */

View file

@ -0,0 +1,174 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __MIF_REG_H
#define __MIF_REG_H
/* TODO: split PCIE and PLATFORM definitions */
/*****************************/
/* PCIE register definitions */
/*****************************/
/* Maximum number of mbox register (issr) to allocate. Should match with FW configuraton */
#define MAX_NUM_MBOX 64
/* Number of emulated mailboxes
* This number should match mif: mifmboxman.h but is up to the
* the specific PCIe implementation */
#define NUM_MBOX 8
#if NUM_MBOX > MAX_NUM_MBOX
#error "NUM_MBOX > MAX_NUM_MBOX"
#endif
/* TODO */
/* Match platform driver register naming */
struct scsc_mbox_s {
volatile u32 issr[NUM_MBOX]; /* MAILBOXES */
volatile u32 padding[MAX_NUM_MBOX - NUM_MBOX]; /* MAILBOXES */
volatile u32 intmr0; /* R4 Int Mask */
volatile u32 intsr0; /* R4 Int Status register */
volatile u32 intmr1; /* AP Int Mask */
volatile u32 intsr1; /* AP Int Status register */
volatile u32 intmr2; /* M4 Int Mask */
volatile u32 intsr2; /* M4 Int Status register */
};
struct peterson_mutex {
u32 flag[2];
u32 turn;
};
#define MBOX_OFFSET 0x3ffc00
/* Leave space to allocate emulated registers */
#define P_OFFSET_AP 0x3ffe00
#define P_OFFSET_R4 0x3fff00
/*********************************/
/* PLATFORM register definitions */
/*********************************/
#define NUM_MBOX_PLAT 8
#define NUM_SEMAPHORE 12
#define MAILBOX_WLBT_BASE 0x0000
#define MAILBOX_WLBT_REG(r) (MAILBOX_WLBT_BASE + (r))
#define MCUCTRL 0x000 /* MCU Controller Register */
/* R0 [31:16] - Int FROM R4/M4 */
#define INTGR0 0x008 /* Interrupt Generation Register 0 (r/w) */
#define INTCR0 0x00C /* Interrupt Clear Register 0 (w) */
#define INTMR0 0x010 /* Interrupt Mask Register 0 (r/w) */
#define INTSR0 0x014 /* Interrupt Status Register 0 (r) */
#define INTMSR0 0x018 /* Interrupt Mask Status Register 0 (r) */
/* R1 [15:0] - Int TO R4 */
#define INTGR1 0x01c /* Interrupt Generation Register 1 */
#define INTCR1 0x020 /* Interrupt Clear Register 1 */
#define INTMR1 0x024 /* Interrupt Mask Register 1 */
#define INTSR1 0x028 /* Interrupt Status Register 1 */
#define INTMSR1 0x02c /* Interrupt Mask Status Register 1 */
/* R2 [15:0] - Int TO M4 */
#define INTGR2 0x030 /* Interrupt Generation Register 2 */
#define INTCR2 0x034 /* Interrupt Clear Register 2 */
#define INTMR2 0x038 /* Interrupt Mask Register 2 */
#define INTSR2 0x03c /* Interrupt Status Register 2 */
#define INTMSR2 0x040 /* Interrupt Mask Status Register 2 */
#define MIF_INIT 0x04c /* MIF_init */
#define IS_VERSION 0x050 /* Version Information Register */
#define ISSR_BASE 0x080 /* IS_Shared_Register Base address */
#define ISSR(r) (ISSR_BASE + (4 * (r)))
#define SEMAPHORE_BASE 0x180 /* IS_Shared_Register Base address */
#define SEMAPHORE(r) (SEMAPHORE_BASE + (4 * (r)))
#define SEMA0CON 0x1c0
#define SEMA0STATE 0x1c8
/* POWER */
/* Page 594 datasheet */
/* Base Address - 0x11C8_0000 */
#define WIFI_CTRL_NS 0x0140 /* WIFI Control SFR non-secure */
#define WIFI_PWRON BIT(1)
#define WIFI_RESET_SET BIT(2)
#define WIFI_ACTIVE_EN BIT(5) /* Enable of WIFI_ACTIVE_REQ */
#define WIFI_ACTIVE_CLR BIT(6) /* WIFI_ACTIVE_REQ is clear internally on WAKEUP */
#define WIFI_RESET_REQ_EN BIT(7) /* 1:enable, 0:disable Enable of WIFI_RESET_REQ */
#define WIFI_RESET_REQ_CLR BIT(8) /* WIFI_RESET_REQ is clear internally on WAKEUP */
#define MASK_WIFI_PWRDN_DONE BIT(9) /* 1:mask, 0 : pass RTC clock out enable to WIFI
* This mask WIFI_PWRDN_DONE come in from WIFI.
* If MASK_WIFI_PWRDN_DONE = 1, WIFI enter to DOWN
* state without checking WIFI_PWRDN_DONE*/
#define WIFI_CTRL_S 0x0144 /* WIFI Control SFR secure */
#define WIFI_START BIT(3) /* WIFI Reset release control If WIFI_START = 1,
* WIFI exit from DOWN state and go to UP state.
* If this field is set to high (WIFI_START = 1)
* WIFI state can go to UP state. This signal can be
* auto-clear by DIRECTWR at UP */
#define WIFI_STAT 0x0148 /* Indicate whether WIFI uses MIF domain */
#define WIFI_DEBUG 0x014c /* MIF sleep, wakeup debugging control */
/* Page 1574 datasheet */
#define PMU_ALIVE_BASE 0x0000
#define PMU_ALIVE_REG(r) (PMU_ALIVE_BASE + (r))
#define WIFI2AP_MEM_CONFIG0 0x0150 /* Control WLBT_MEM_SIZE. */
#define WLBT2AP_MIF_ACCESS_WIN0 0x0154 /* ACCESS_CONTROL_PERI_IP */
#define WLBT2AP_MIF_ACCESS_WIN1 0x0158 /* ACCESS_CONTROL_PERI_IP */
#define WLBT2AP_MIF_ACCESS_WIN2 0x015a /* ACCESS_CONTROL_PERI_IP */
#define WLBT2AP_MIF_ACCESS_WIN3 0x0160 /* ACCESS_CONTROL_PERI_IP */
#define WIFI2AP_MEM_CONFIG1 0x0164 /* Control WLBT_MEM_BA0 */
#define WLBT_BOOT_TEST_RST_CFG 0x0168 /* WLBT_IRAM_BOOT_OFFSET */
/* WLBT_IRAM_BOOT_TEST */
/* WLBT2AP_PERI_PROT2 */
#define WLBT2AP_PERI_ACCESS_WIN 0x016c /* WLBT2AP_PERI_ACCESS_END - WLBT2AP_PERI_ACCESS_START */
#define WIFI2AP_MODAPIF_CONFIG 0x0170 /* WLBT2AP_PERI_ACCESS_END - WLBT2AP_PERI_ACCESS_START */
#define WIFI2AP_QOS 0x0170 /* RT */
#define WIFI2AP_MEM_CONFIG2 0x017c /* Control WLBT_MEM_BA1 */
#define WIFI2AP_MEM_CONFIG3 0x0184 /* Control WLBT_ADDR_RNG */
/* Power down registers */
#define RESET_ASB_WIFI_SYS_PWR_REG 0x11f4 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
#define TCXO_GATE_WIFI_SYS_PWR_REG 0x11f0 /* Control power state in LOWPWR mode 1 - on, 0 */
#define LOGIC_RESET_WIFI_SYS_PWR_REG 0x11f8 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
#define CLEANY_BUS_WIFI_SYS_PWR_REG 0x11fc /* Control power state in LOWPWR mode 1 - on, 0 - down*/
#define CENTRAL_SEQ_WIFI_CONFIGURATION 0x0380 /* bit 16. Decides whether system-level low-power mode
* is used HIGH: System-level Low-Power mode
* disabled. LOW: System-level Low-Power mode
* enabled. When system enters low-power mode,
* this field is automatically cleared to HIGH. */
#define CENTRAL_SEQ_WIFI_STATUS 0x0384 /* 23:16 Check statemachine status */
#define STATES 0xff0000
#define SYS_PWR_CFG BIT(0)
#define SYS_PWR_CFG_2 (BIT(0) | BIT(1))
#define SYS_PWR_CFG_16 BIT(16)
/* CMU registers to request PLL for USB Clock */
#define USBPLL_CON0 0x1000
#define AP2WIFI_USBPLL_REQ BIT(0) /* 1: Request PLL, 0: Release PLL */
#define USBPLL_CON1 0x1004 /* */
#define AP2WLBT_USBPLL_WPLL_SEL BIT(0) /* 1: WLBT, 0: AP */
#define AP2WLBT_USBPLL_WPLL_EN BIT(1) /* 1: Enable, 0: Disable */
/***** Interrupts ********
*
* - MBOX
* - WIFI_ACTIVE (pag 553)
* comes from BLK_WIFI. Initial value is low and then this value becomes high after WIFI booting. If
* some problem occurs within WIFI, WIFI_ACTIVE can be low by WIFI CPU. AP CPU detects that WIFI_ACTIVE is
* low after WIFI_ACTIVE is high. And WIFI_ACTIVE detected goes to GIC's interrupt source. At ISR, AP CPU
* makes wake source and interrupt clear as setting WIFI_CTRL__WIFI_ACTIVE_CLR. WIFI_ACTIVE_CLR is auto
* clear by direct-write function.
*
* - WIFI_RESET_REQ (pag 554)
* WIFI can request WIFI reset only by WIFI_RESET_REQ. If WIFI_RESET_REQ is asserted, AP PMU detects it as
* wakeup source and interrupt source. At ISR, AP CPU makes wakeup source clear as setting
* WIFI_CTRL__CP_RESET_REQ_CLR. But, interrupt can be not clear because the interrupt goes to GIC directly
* from WIFI. (It use make function within GIC) WIFI_RESET_REQ_CLR is auto clear by direct-write function.
*/
#endif /* __MIF_REG_H */

View file

@ -0,0 +1,244 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/* Uses */
#include <linux/bitmap.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <scsc/scsc_logring.h>
#include "scsc_mif_abs.h"
/* Implements */
#include "mifintrbit.h"
/* default handler just logs a warning and clears the bit */
static void mifintrbit_default_handler(int irq, void *data)
{
struct mifintrbit *intr = (struct mifintrbit *)data;
unsigned long flags;
spin_lock_irqsave(&intr->spinlock, flags);
intr->mif->irq_bit_clear(intr->mif, irq);
spin_unlock_irqrestore(&intr->spinlock, flags);
}
static void print_bitmaps(struct mifintrbit *intr)
{
unsigned long dst1, dst2, dst3;
bitmap_copy_le(&dst1, intr->bitmap_tohost, MIFINTRBIT_NUM_INT);
bitmap_copy_le(&dst2, intr->bitmap_fromhost_r4, MIFINTRBIT_NUM_INT);
bitmap_copy_le(&dst3, intr->bitmap_fromhost_m4, MIFINTRBIT_NUM_INT);
}
static void mifiintrman_isr(int irq, void *data)
{
struct mifintrbit *intr = (struct mifintrbit *)data;
unsigned long flags;
int irq_reg = 0;
int bit;
/* Avoid unused parameter error */
(void)irq;
spin_lock_irqsave(&intr->spinlock, flags);
irq_reg = intr->mif->irq_get(intr->mif);
print_bitmaps(intr);
for_each_set_bit(bit, (unsigned long int *)&irq_reg, MIFINTRBIT_NUM_INT) {
if (intr->mifintrbit_irq_handler[bit] != mifintrbit_default_handler)
intr->mifintrbit_irq_handler[bit](bit, intr->irq_data[bit]);
}
spin_unlock_irqrestore(&intr->spinlock, flags);
}
/* Public functions */
int mifintrbit_alloc_tohost(struct mifintrbit *intr, mifintrbit_handler handler, void *data)
{
struct scsc_mif_abs *mif;
unsigned long flags;
int which_bit = 0;
spin_lock_irqsave(&intr->spinlock, flags);
/* Search for free slots */
which_bit = find_first_zero_bit(intr->bitmap_tohost, MIFINTRBIT_NUM_INT);
if (which_bit >= MIFINTRBIT_NUM_INT)
goto error;
if (intr->mifintrbit_irq_handler[which_bit] != mifintrbit_default_handler) {
spin_unlock_irqrestore(&intr->spinlock, flags);
goto error;
}
/* Get abs implementation */
mif = intr->mif;
/* Mask to prevent spurious incoming interrupts */
mif->irq_bit_mask(mif, which_bit);
/* Clear the interrupt */
mif->irq_bit_clear(mif, which_bit);
/* Register the handler */
intr->mifintrbit_irq_handler[which_bit] = handler;
intr->irq_data[which_bit] = data;
/* Once registration is set, and IRQ has been cleared, unmask the interrupt */
mif->irq_bit_unmask(mif, which_bit);
/* Update bit mask */
set_bit(which_bit, intr->bitmap_tohost);
spin_unlock_irqrestore(&intr->spinlock, flags);
return which_bit;
error:
spin_unlock_irqrestore(&intr->spinlock, flags);
SCSC_TAG_ERR(MIF, "Error registering irq\n");
return -EIO;
}
int mifintrbit_free_tohost(struct mifintrbit *intr, int which_bit)
{
struct scsc_mif_abs *mif;
unsigned long flags;
if (which_bit >= MIFINTRBIT_NUM_INT)
goto error;
spin_lock_irqsave(&intr->spinlock, flags);
/* Get abs implementation */
mif = intr->mif;
/* Mask to prevent spurious incoming interrupts */
mif->irq_bit_mask(mif, which_bit);
/* Set the handler with default */
intr->mifintrbit_irq_handler[which_bit] = mifintrbit_default_handler;
intr->irq_data[which_bit] = NULL;
/* Clear the interrupt for hygiene */
mif->irq_bit_clear(mif, which_bit);
/* Update bit mask */
clear_bit(which_bit, intr->bitmap_tohost);
spin_unlock_irqrestore(&intr->spinlock, flags);
return 0;
error:
SCSC_TAG_ERR(MIF, "Error unregistering irq\n");
return -EIO;
}
int mifintrbit_alloc_fromhost(struct mifintrbit *intr, enum scsc_mif_abs_target target)
{
unsigned long flags;
int which_bit = 0;
unsigned long *p;
spin_lock_irqsave(&intr->spinlock, flags);
if (target == SCSC_MIF_ABS_TARGET_R4)
p = intr->bitmap_fromhost_r4;
else if (target == SCSC_MIF_ABS_TARGET_M4)
p = intr->bitmap_fromhost_m4;
else
goto error;
/* Search for free slots */
which_bit = find_first_zero_bit(p, MIFINTRBIT_NUM_INT);
if (which_bit == MIFINTRBIT_NUM_INT)
goto error;
/* Update bit mask */
set_bit(which_bit, p);
spin_unlock_irqrestore(&intr->spinlock, flags);
return which_bit;
error:
spin_unlock_irqrestore(&intr->spinlock, flags);
SCSC_TAG_ERR(MIF, "Error allocating bit %d on %s\n",
which_bit, target ? "M4" : "R4");
return -EIO;
}
int mifintrbit_free_fromhost(struct mifintrbit *intr, int which_bit, enum scsc_mif_abs_target target)
{
unsigned long flags;
unsigned long *p;
spin_lock_irqsave(&intr->spinlock, flags);
if (which_bit >= MIFINTRBIT_NUM_INT)
goto error;
if (target == SCSC_MIF_ABS_TARGET_R4)
p = intr->bitmap_fromhost_r4;
else if (target == SCSC_MIF_ABS_TARGET_M4)
p = intr->bitmap_fromhost_m4;
else
goto error;
/* Clear bit mask */
clear_bit(which_bit, p);
spin_unlock_irqrestore(&intr->spinlock, flags);
return 0;
error:
spin_unlock_irqrestore(&intr->spinlock, flags);
SCSC_TAG_ERR(MIF, "Error freeing bit %d on %s\n",
which_bit, target ? "M4" : "R4");
return -EIO;
}
/* core API */
void mifintrbit_deinit(struct mifintrbit *intr)
{
unsigned long flags;
int i;
spin_lock_irqsave(&intr->spinlock, flags);
/* Set all handlers to default before unregistering the handler */
for (i = 0; i < MIFINTRBIT_NUM_INT; i++)
intr->mifintrbit_irq_handler[i] = mifintrbit_default_handler;
intr->mif->irq_unreg_handler(intr->mif);
spin_unlock_irqrestore(&intr->spinlock, flags);
}
void mifintrbit_init(struct mifintrbit *intr, struct scsc_mif_abs *mif)
{
int i;
spin_lock_init(&intr->spinlock);
/* Set all handlersd to default before hooking the hardware interrupt */
for (i = 0; i < MIFINTRBIT_NUM_INT; i++)
intr->mifintrbit_irq_handler[i] = mifintrbit_default_handler;
/* reset bitmaps */
bitmap_zero(intr->bitmap_tohost, MIFINTRBIT_NUM_INT);
bitmap_zero(intr->bitmap_fromhost_r4, MIFINTRBIT_NUM_INT);
bitmap_zero(intr->bitmap_fromhost_m4, MIFINTRBIT_NUM_INT);
/**
* Pre-allocate/reserve MIF interrupt bits 0 in both
* .._fromhost_r4 and .._fromhost_m4 interrupt bits.
*
* These bits are used for purpose of forcing Panics from
* either MX manager or GDB monitor channels.
*/
set_bit(MIFINTRBIT_RESERVED_PANIC_R4, intr->bitmap_fromhost_r4);
set_bit(MIFINTRBIT_RESERVED_PANIC_M4, intr->bitmap_fromhost_m4);
/* register isr with mif abstraction */
mif->irq_reg_handler(mif, mifiintrman_isr, (void *)intr);
/* cache mif */
intr->mif = mif;
}

View file

@ -0,0 +1,52 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __MIFINTRBIT_H
#define __MIFINTRBIT_H
#include <linux/spinlock.h>
/** MIF Interrupt Bit Handler prototype. */
typedef void (*mifintrbit_handler)(int which_bit, void *data);
struct mifintrbit; /* fwd - opaque pointer */
#define MIFINTRBIT_NUM_INT 16
/** Reserve MIF interrupt bits 0 in the to-r4 and to-m4 registers for purpose of forcing panics */
#define MIFINTRBIT_RESERVED_PANIC_R4 0
#define MIFINTRBIT_RESERVED_PANIC_M4 0
void mifintrbit_init(struct mifintrbit *intr, struct scsc_mif_abs *mif);
void mifintrbit_deinit(struct mifintrbit *intr);
/** Allocates TOHOST MIF interrupt bits, and associates handler for the AP bit.
* Returns the bit index.*/
int mifintrbit_alloc_tohost(struct mifintrbit *intr, mifintrbit_handler handler, void *data);
/** Deallocates TOHOST MIF interrupt bits */
int mifintrbit_free_tohost(struct mifintrbit *intr, int which_bit);
/* Get an interrupt bit associated with the target (R4/M4) -FROMHOST direction
* Function returns the IRQ bit associated , -EIO if error */
int mifintrbit_alloc_fromhost(struct mifintrbit *intr, enum scsc_mif_abs_target target);
/* Free an interrupt bit associated with the target (R4/M4) -FROMHOST direction
* Function returns the 0 if succedes , -EIO if error */
int mifintrbit_free_fromhost(struct mifintrbit *intr, int which_bit, enum scsc_mif_abs_target target);
struct mifintrbit {
void(*mifintrbit_irq_handler[MIFINTRBIT_NUM_INT]) (int irq, void *data);
void *irq_data[MIFINTRBIT_NUM_INT];
struct scsc_mif_abs *mif;
/* Use spinlock is it may be in IRQ context */
spinlock_t spinlock;
/* Interrupt allocation bitmaps */
DECLARE_BITMAP(bitmap_tohost, MIFINTRBIT_NUM_INT);
DECLARE_BITMAP(bitmap_fromhost_r4, MIFINTRBIT_NUM_INT);
DECLARE_BITMAP(bitmap_fromhost_m4, MIFINTRBIT_NUM_INT);
};
#endif

View file

@ -0,0 +1,116 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/* uses */
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <scsc/scsc_logring.h>
#include "scsc_mif_abs.h"
/* Implements */
#include "mifmboxman.h"
int mifmboxman_init(struct mifmboxman *mbox)
{
if (mbox->in_use)
return -EBUSY;
mutex_init(&mbox->lock);
mbox->mbox_free = MIFMBOX_NUM;
mbox->in_use = true;
bitmap_zero(mbox->bitmap, MIFMBOX_NUM);
return 0;
}
bool mifmboxman_alloc_mboxes(struct mifmboxman *mbox, int n, int *first_mbox_index)
{
unsigned int index = 0;
unsigned int available;
u8 i;
mutex_lock(&mbox->lock);
if ((n > MIFMBOX_NUM) || (n == 0) || !mbox->in_use)
goto error;
while (index <= (MIFMBOX_NUM - n)) {
available = 0;
/* Search consecutive blocks */
for (i = 0; i < n; i++) {
if (test_bit((i + index), mbox->bitmap))
break;
available++;
}
if (available == n) {
*first_mbox_index = index;
for (i = 0; i < n; i++)
set_bit(index++, mbox->bitmap);
mbox->mbox_free -= n;
goto exit;
} else
index = index + available + 1;
}
error:
SCSC_TAG_ERR(MIF, "Error allocating mbox\n");
mutex_unlock(&mbox->lock);
return false;
exit:
mutex_unlock(&mbox->lock);
return true;
}
void mifmboxman_free_mboxes(struct mifmboxman *mbox, int first_mbox_index, int n)
{
int index = 0;
int total_free = 0;
mutex_lock(&mbox->lock);
if ((n > MIFMBOX_NUM) ||
((n + first_mbox_index) > MIFMBOX_NUM) ||
(n == 0) ||
!mbox->in_use)
goto error;
for (index = first_mbox_index; index < (first_mbox_index + n); index++)
if (test_bit(index, mbox->bitmap)) {
clear_bit(index, mbox->bitmap);
total_free++;
}
mbox->mbox_free += total_free;
mutex_unlock(&mbox->lock);
return;
error:
SCSC_TAG_ERR(MIF, "Error freeing mbox\n");
mutex_unlock(&mbox->lock);
}
u32 *mifmboxman_get_mbox_ptr(struct mifmboxman *mbox, struct scsc_mif_abs *mif_abs, int mbox_index)
{
/* Avoid unused parameter error */
(void)mbox;
return mif_abs->get_mbox_ptr(mif_abs, mbox_index);
}
int mifmboxman_deinit(struct mifmboxman *mbox)
{
mutex_lock(&mbox->lock);
if (!mbox->in_use)
return -ENODEV;
mbox->in_use = false;
mutex_unlock(&mbox->lock);
return 0;
}

View file

@ -0,0 +1,33 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __MIFMBOXMAN_H
#define __MIFMBOXMAN_H
#include <linux/mutex.h>
/* TODO: Needs to define the max mem */
struct mifmboxman;
struct scsc_mif_abs;
struct mutex;
int mifmboxman_init(struct mifmboxman *mbox);
bool mifmboxman_alloc_mboxes(struct mifmboxman *mbox, int n, int *first_mbox_index);
void mifmboxman_free_mboxes(struct mifmboxman *mbox, int first_mbox_index, int n);
u32 *mifmboxman_get_mbox_ptr(struct mifmboxman *mbox, struct scsc_mif_abs *mif_abs, int mbox_index);
int mifmboxman_deinit(struct mifmboxman *mbox);
#define MIFMBOX_NUM 8
/* Inclusion in core.c treat it as opaque */
struct mifmboxman {
bool in_use;
u32 mbox_free;
DECLARE_BITMAP(bitmap, MIFMBOX_NUM);
struct mutex lock;
};
#endif

View file

@ -0,0 +1,347 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <scsc/scsc_logring.h>
#include "mifproc.h"
#include "scsc_mif_abs.h"
static struct proc_dir_entry *procfs_dir;
static bool mif_val;
/* WARNING --- SINGLETON FOR THE TIME BEING */
/* EXTEND PROC ENTRIES IF NEEDED!!!!! */
struct scsc_mif_abs *mif_global;
static int mifprocfs_open_file_generic(struct inode *inode, struct file *file)
{
file->private_data = MIF_PDE_DATA(inode);
return 0;
}
#if 0
MIF_PROCFS_RW_FILE_OPS(mif_trg);
#endif
MIF_PROCFS_RW_FILE_OPS(mif_dump);
MIF_PROCFS_RW_FILE_OPS(mif_writemem);
MIF_PROCFS_SEQ_FILE_OPS(mif_dbg);
static ssize_t mifprocfs_mif_writemem_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[128];
int pos = 0;
const size_t bufsz = sizeof(buf);
/* Avoid unused parameter error */
(void)file;
pos += scnprintf(buf + pos, bufsz - pos, "%d\n", (mif_val ? 1 : 0));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t mifprocfs_mif_dump_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[128];
int pos = 0;
const size_t bufsz = sizeof(buf);
/* Avoid unused parameter error */
(void)file;
pos += scnprintf(buf + pos, bufsz - pos, "%d\n", (mif_val ? 1 : 0));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t mifprocfs_mif_writemem_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[128];
char *sptr, *token;
unsigned int len = 0, pass = 0;
u32 value = 0, address = 0;
int match = 0;
void *mem;
/* Avoid unused parameter error */
(void)file;
(void)ppos;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
sptr = buf;
while ((token = strsep(&sptr, " ")) != NULL) {
switch (pass) {
/* register */
case 0:
if ((token[0] == '0') && (token[1] == 'x')) {
if (kstrtou32(token, 16, &address)) {
SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
goto error;
}
} else {
SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
goto error;
}
break;
/* value */
case 1:
if ((token[0] == '0') && (token[1] == 'x')) {
if (kstrtou32(token, 16, &value)) {
SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
goto error;
}
} else {
SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
goto error;
}
break;
}
pass++;
}
if (pass != 2 && !match) {
SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
goto error;
}
/* Get memory offset */
mem = mif_global->get_mifram_ptr(mif_global, 0);
if (!mem) {
SCSC_TAG_INFO(MIF, "Mem not allocated\n");
goto error;
}
SCSC_TAG_INFO(MIF, "Setting value 0x%x at address 0x%x offset\n", value, address);
*((u32 *)(mem + address)) = value;
error:
return count;
}
static ssize_t mifprocfs_mif_dump_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[128];
char *sptr, *token;
unsigned int len = 0, pass = 0;
u32 address = 0;
u32 size;
u8 unit;
void *mem;
(void)file;
(void)ppos;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
sptr = buf;
while ((token = strsep(&sptr, " ")) != NULL) {
switch (pass) {
/* address */
case 0:
if ((token[0] == '0') && (token[1] == 'x')) {
if (kstrtou32(token, 16, &address)) {
SCSC_TAG_INFO(MIF, "Incorrect format,,,address should start by 0x\n");
SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 256 8\"\n");
goto error;
}
SCSC_TAG_INFO(MIF, "address %d 0x%x\n", address, address);
} else {
SCSC_TAG_INFO(MIF, "Incorrect format,,,address should start by 0x\n");
SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 256 8\"\n");
goto error;
}
break;
/* size */
case 1:
if (kstrtou32(token, 0, &size)) {
SCSC_TAG_INFO(MIF, "Incorrect format,,, for size\n");
goto error;
}
SCSC_TAG_INFO(MIF, "size: %d\n", size);
break;
/* unit */
case 2:
if (kstrtou8(token, 0, &unit)) {
SCSC_TAG_INFO(MIF, "Incorrect format,,, for unit\n");
goto error;
}
if ((unit != 8) && (unit != 16) && (unit != 32)) {
SCSC_TAG_INFO(MIF, "Unit %d should be 8/16/32\n", unit);
goto error;
}
SCSC_TAG_INFO(MIF, "unit: %d\n", unit);
break;
}
pass++;
}
if (pass != 3) {
SCSC_TAG_INFO(MIF, "Wrong format: <start_address> <size> <unit>\n");
SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 256 8\"\n");
goto error;
}
mem = mif_global->get_mifram_ptr(mif_global, 0);
SCSC_TAG_INFO(MIF, "mem %p\n", mem);
if (!mem) {
SCSC_TAG_INFO(MIF, "Mem not allocated\n");
goto error;
}
{
#define offset 16
#define bits_byte 8
#define bits_void 32
union {
unsigned int i;
u16 c[2];
} addr_2;
union {
unsigned int i;
u8 c[4];
} addr_4;
unsigned int i;
int byte_addr = bits_void / unit;
int columns = (offset * 8) / unit;
int total = 0;
unsigned int value;
unsigned fpga_offset = address;
if (byte_addr == 1) {
SCSC_TAG_INFO(MIF, "------------------------------------------------------------------------\n");
SCSC_TAG_INFO(MIF, "%s %16s %4s %10s %10s %10s\n", "Phy addr", "ref addr", "0", "4", "8", "c");
SCSC_TAG_INFO(MIF, "------------------------------------------------------------------------\n");
} else if (byte_addr == 2) {
SCSC_TAG_INFO(MIF, "------------------------------------------------------------------------------------\n");
SCSC_TAG_INFO(MIF, "%s %16s %4s %6s %6s %6s %6s %6s %6s %6s\n", "Phy addr", "ref addr", "0", "2", "4", "6", "8", "a", "c", "e");
SCSC_TAG_INFO(MIF, "------------------------------------------------------------------------------------\n");
} else if (byte_addr == 4) {
SCSC_TAG_INFO(MIF, "------------------------------------------------------------------------------------------------------------\n");
SCSC_TAG_INFO(MIF, "%s %16s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s\n",
"Phy addr", "ref addr",
"0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "a", "b", "c", "d", "e", "f");
SCSC_TAG_INFO(MIF, "------------------------------------------------------------------------------------------------------------\n");
}
/* Add offset */
mem = mem + address;
for (i = 0; i < size; i++) {
if (!(i % 4))
SCSC_TAG_INFO(MIF, "%p[0x%08x]", mem + 4 * i, fpga_offset + i * 4);
if (byte_addr == 4) {
addr_4.i = *(unsigned int *)(mem + 4 * i);
SCSC_TAG_INFO(MIF, " 0x%02x 0x%02x 0x%02x 0x%02x", addr_4.c[0], addr_4.c[1], addr_4.c[2], addr_4.c[3]);
total += byte_addr;
} else if (byte_addr == 2) {
addr_2.i = *(unsigned int *)(mem + 4 * i);
SCSC_TAG_INFO(MIF, " 0x%04x 0x%04x", addr_2.c[0], addr_2.c[1]);
total += byte_addr;
} else if (byte_addr == 1) {
value = *(unsigned int *)(mem + 4 * i);
SCSC_TAG_INFO(MIF, " 0x%08x", value);
total += byte_addr;
}
if (total == columns) {
total = 0;
SCSC_TAG_INFO(MIF, "\n");
}
}
SCSC_TAG_INFO(MIF, "\n");
}
error:
return count;
}
static int mifprocfs_mif_dbg_show(struct seq_file *m, void *v)
{
/* Avoid unused parameter error */
(void)v;
if (!mif_global) {
seq_puts(m, "endpoint not registered");
return 0;
}
return 0;
}
static const char *procdir = "driver/mif_ctrl";
#define MIF_DIRLEN 128
int mifproc_create_proc_dir(struct scsc_mif_abs *mif)
{
char dir[MIF_DIRLEN];
struct proc_dir_entry *parent;
/* WARNING --- SINGLETON FOR THE TIME BEING */
/* EXTEND PROC ENTRIES IF NEEDED!!!!! */
if (mif_global)
return -EBUSY;
(void)snprintf(dir, sizeof(dir), "%s", procdir);
parent = proc_mkdir(dir, NULL);
if (parent) {
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
parent->data = NULL;
#endif
procfs_dir = parent;
MIF_PROCFS_ADD_FILE(NULL, mif_writemem, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
MIF_PROCFS_ADD_FILE(NULL, mif_dump, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
MIF_PROCFS_SEQ_ADD_FILE(NULL, mif_dbg, parent, S_IRUSR | S_IRGRP | S_IROTH);
} else {
SCSC_TAG_INFO(MIF, "failed to create /proc dir\n");
return -EINVAL;
}
mif_global = mif;
return 0;
err:
return -EINVAL;
}
void mifproc_remove_proc_dir(void)
{
if (procfs_dir) {
char dir[MIF_DIRLEN];
MIF_PROCFS_REMOVE_FILE(mif_writemem, procfs_dir);
MIF_PROCFS_REMOVE_FILE(mif_dump, procfs_dir);
MIF_PROCFS_REMOVE_FILE(mif_dbg, procfs_dir);
(void)snprintf(dir, sizeof(dir), "%s", procdir);
remove_proc_entry(dir, NULL);
procfs_dir = NULL;
}
mif_global = NULL;
}

View file

@ -0,0 +1,104 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/*
* Chip Manager /proc interface
*/
#include <linux/proc_fs.h>
#include <linux/version.h>
#include <linux/seq_file.h>
#ifndef SCSC_MIF_PROC_H
#define SCSC_MIF_PROC_H
#ifndef AID_MX
#define AID_MX 0444
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define MIF_PDE_DATA(inode) PDE_DATA(inode)
#else
#define MIF_PDE_DATA(inode) (PDE(inode)->data)
#endif
#define MIF_PROCFS_SEQ_FILE_OPS(name) \
static int mifprocfs_ ## name ## _show(struct seq_file *m, void *v); \
static int mifprocfs_ ## name ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, mifprocfs_ ## name ## _show, MIF_PDE_DATA(inode)); \
} \
static const struct file_operations mifprocfs_ ## name ## _fops = { \
.open = mifprocfs_ ## name ## _open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
}
#define MIF_PROCFS_SEQ_ADD_FILE(_sdev, name, parent, mode) \
do { \
struct proc_dir_entry *entry; \
entry = proc_create_data(# name, mode, parent, &mifprocfs_ ## name ## _fops, _sdev); \
if (!entry) { \
goto err; \
} \
MIF_PROCFS_SET_UID_GID(entry); \
} while (0)
#define MIF_PROCFS_RW_FILE_OPS(name) \
static ssize_t mifprocfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
static ssize_t mifprocfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
static const struct file_operations mifprocfs_ ## name ## _fops = { \
.read = mifprocfs_ ## name ## _read, \
.write = mifprocfs_ ## name ## _write, \
.open = mifprocfs_open_file_generic, \
.llseek = generic_file_llseek \
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define MIF_PROCFS_SET_UID_GID(_entry) \
do { \
kuid_t proc_kuid = KUIDT_INIT(AID_MX); \
kgid_t proc_kgid = KGIDT_INIT(AID_MX); \
proc_set_user(_entry, proc_kuid, proc_kgid); \
} while (0)
#else
#define MIF_PROCFS_SET_UID_GID(entry) \
do { \
(entry)->uid = AID_MX; \
(entry)->gid = AID_MX; \
} while (0)
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define MIF_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
do { \
struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &mifprocfs_ ## name ## _fops, _sdev); \
MIF_PROCFS_SET_UID_GID(entry); \
} while (0)
#else
#define MIF_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
do { \
struct proc_dir_entry *entry; \
entry = create_proc_entry(# name, mode, parent); \
if (entry) { \
entry->proc_fops = &mifprocfs_ ## name ## _fops; \
entry->data = _sdev; \
MIF_PROCFS_SET_UID_GID(entry); \
} \
} while (0)
#endif
#define MIF_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
struct scsc_mif_abs;
int mifproc_create_proc_dir(struct scsc_mif_abs *mif);
void mifproc_remove_proc_dir(void);
struct mifproc {
};
#endif /* SCSC_mif_PROC_H */

View file

@ -0,0 +1,174 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <scsc/scsc_logring.h>
#include "scsc_mif_abs.h"
#include "miframman.h"
/* Caller should provide locking */
void miframman_init(struct miframman *ram, void *start_dram, size_t size_pool)
{
mutex_init(&ram->lock);
ram->num_blocks = size_pool / MIFRAMMAN_BLOCK_SIZE;
if (ram->num_blocks == 0) {
SCSC_TAG_ERR(MIF, "Pool size < BLOCK_SIZE\n");
return;
}
if (ram->num_blocks >= MIFRAMMAN_NUM_BLOCKS) {
SCSC_TAG_ERR(MIF, "Not enough memory\n");
return;
}
memset(ram->bitmap, BLOCK_FREE, sizeof(ram->bitmap));
ram->start_dram = start_dram;
ram->size_pool = size_pool;
ram->free_mem = ram->num_blocks * MIFRAMMAN_BLOCK_SIZE;
}
void *__miframman_alloc(struct miframman *ram, size_t nbytes)
{
unsigned int index = 0;
unsigned int available;
unsigned int i;
size_t num_blocks;
void *free_mem = NULL;
if (!nbytes || nbytes > ram->free_mem)
goto end;
/* Number of blocks required (rounding up) */
num_blocks = nbytes / MIFRAMMAN_BLOCK_SIZE +
((nbytes % MIFRAMMAN_BLOCK_SIZE) > 0 ? 1 : 0);
if (num_blocks > ram->num_blocks)
goto end;
while (index <= (ram->num_blocks - num_blocks)) {
available = 0;
/* Search consecutive blocks */
for (i = 0; i < num_blocks; i++) {
if (ram->bitmap[i + index] != BLOCK_FREE)
break;
available++;
}
if (available == num_blocks) {
free_mem = ram->start_dram +
MIFRAMMAN_BLOCK_SIZE * index;
/* Mark the blocks as used */
ram->bitmap[index++] = BLOCK_BOUND;
for (i = 1; i < num_blocks; i++)
ram->bitmap[index++] = BLOCK_INUSE;
ram->free_mem -= num_blocks * MIFRAMMAN_BLOCK_SIZE;
goto exit;
} else
index = index + available + 1;
}
end:
SCSC_TAG_INFO(MIF, "Not enough memory\n");
return NULL;
exit:
return free_mem;
}
#define MIFRAMMAN_ALIGN(mem, align) \
((void *)((((uintptr_t)(mem) + (align + sizeof(void *))) \
& (~(uintptr_t)(align - 1)))))
#define MIFRAMMAN_PTR(mem) \
(*(((void **)((uintptr_t)(mem) & \
(~(uintptr_t)(sizeof(void *) - 1)))) - 1))
void *miframman_alloc(struct miframman *ram, size_t nbytes, size_t align)
{
void *mem, *align_mem = NULL;
mutex_lock(&ram->lock);
if (!is_power_of_2(align) || nbytes == 0)
goto end;
if (align < sizeof(void *))
align = sizeof(void *);
mem = __miframman_alloc(ram, nbytes + align + sizeof(void *));
if (!mem)
goto end;
align_mem = MIFRAMMAN_ALIGN(mem, align);
/* Store allocated pointer */
MIFRAMMAN_PTR(align_mem) = mem;
end:
mutex_unlock(&ram->lock);
return align_mem;
}
void __miframman_free(struct miframman *ram, void *mem)
{
unsigned int index, num_blocks = 0;
if (ram->start_dram == NULL || !mem) {
SCSC_TAG_ERR(MIF, "Mem is NULL\n");
return;
}
/* Get block index */
index = (unsigned int)((mem - ram->start_dram)
/ MIFRAMMAN_BLOCK_SIZE);
/* Check */
if (index >= ram->num_blocks) {
SCSC_TAG_ERR(MIF, "Incorrect index %d\n", index);
return;
}
/* Check it is a Boundary block */
if (ram->bitmap[index] != BLOCK_BOUND) {
SCSC_TAG_ERR(MIF, "Incorrect Block descriptor\n");
return;
}
ram->bitmap[index++] = BLOCK_FREE;
num_blocks++;
while (index < ram->num_blocks && ram->bitmap[index] == BLOCK_INUSE) {
ram->bitmap[index++] = BLOCK_FREE;
num_blocks++;
}
ram->free_mem += num_blocks * MIFRAMMAN_BLOCK_SIZE;
}
void miframman_free(struct miframman *ram, void *mem)
{
mutex_lock(&ram->lock);
/* Restore allocated pointer */
if (mem)
__miframman_free(ram, MIFRAMMAN_PTR(mem));
mutex_unlock(&ram->lock);
}
/* Caller should provide locking */
void miframman_deinit(struct miframman *ram)
{
/* Mark all the blocks as INUSE to prevent new allocations */
memset(ram->bitmap, BLOCK_INUSE, sizeof(ram->bitmap));
ram->num_blocks = 0;
ram->start_dram = NULL;
ram->size_pool = 0;
ram->free_mem = 0;
}

View file

@ -0,0 +1,39 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __MIFRAMMAN_H
#define __MIFRAMMAN_H
#include <linux/mutex.h>
/* TODO: Needs to define the max mem */
struct miframman;
void miframman_init(struct miframman *ram, void *start_dram, size_t size_pool);
void *miframman_alloc(struct miframman *ram, size_t nbytes, size_t align);
void miframman_free(struct miframman *ram, void *mem);
void miframman_deinit(struct miframman *ram);
#define MIFRAMMAN_MAXMEM (4 * 1024 * 1024)
#define MIFRAMMAN_BLOCK_SIZE (2 * 1024)
#define MIFRAMMAN_NUM_BLOCKS ((MIFRAMMAN_MAXMEM) / (MIFRAMMAN_BLOCK_SIZE))
#define BLOCK_FREE 0
#define BLOCK_INUSE 1
#define BLOCK_BOUND 2
/* Inclusion in core.c treat it as opaque */
struct miframman {
void *start_dram;
size_t size_pool;
char bitmap[MIFRAMMAN_NUM_BLOCKS]; /* Zero initialized-> all blocks free */
u32 num_blocks;
u32 free_mem;
struct mutex lock;
};
#endif

View file

@ -0,0 +1,173 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/**
* MIF Byte Stream (Implementation)
*/
/* Implements */
#include "scsc_mx_impl.h"
#include "mifstream.h"
/* Uses */
#include "mifintrbit.h"
void mif_stream_config_serialise(struct mif_stream *stream, struct mxstreamconf *stream_conf)
{
stream_conf->read_bit_idx = stream->read_bit_idx;
stream_conf->write_bit_idx = stream->write_bit_idx;
cpacketbuffer_config_serialise(&stream->buffer, &stream_conf->buf_conf);
}
int mif_stream_init(struct mif_stream *stream, enum scsc_mif_abs_target target, enum MIF_STREAM_DIRECTION direction, uint32_t num_packets, uint32_t packet_size,
struct scsc_mx *mx, enum MIF_STREAM_INTRBIT_TYPE intrbit, mifintrbit_handler tohost_irq_handler, void *data)
{
struct mifintrbit *intr;
int r, r1, r2;
stream->mx = mx;
r = cpacketbuffer_init(&stream->buffer, num_packets, packet_size, mx);
if (r)
return r;
intr = scsc_mx_get_intrbit(mx);
r1 = mifintrbit_alloc_tohost(intr, tohost_irq_handler, data);
if (r1 < 0) {
cpacketbuffer_release(&stream->buffer);
return r1;
}
/**
* MIF interrupt bit 0 in both the to-r4 and to-m4 registers are reserved
* for purpose of forcing panics from the MX Manager directly or via the
* gdb monitor stacks.
*
* At stream initialization the gdb transport requests for the reserved bits
* rather than dynamic allocation of interrupt bits.
*
* So if the interrupt bit type requested is Reserved, just Assign the pre-
* reserved interrupt bits.
*/
if (intrbit == MIF_STREAM_INTRBIT_TYPE_RESERVED) {
if (target == SCSC_MIF_ABS_TARGET_M4)
r2 = MIFINTRBIT_RESERVED_PANIC_M4;
else
r2 = MIFINTRBIT_RESERVED_PANIC_R4;
} else
r2 = mifintrbit_alloc_fromhost(intr, target);
if (r2 < 0) {
cpacketbuffer_release(&stream->buffer);
mifintrbit_free_tohost(intr, r1);
return r2;
}
switch (direction) {
case MIF_STREAM_DIRECTION_OUT:
stream->read_bit_idx = r1;
stream->write_bit_idx = r2;
break;
case MIF_STREAM_DIRECTION_IN:
/* Default value for the shared memory region */
memset(stream->buffer.buffer, 0xff, num_packets * packet_size);
/* Commit */
smp_wmb();
stream->read_bit_idx = r2;
stream->write_bit_idx = r1;
break;
default:
cpacketbuffer_release(&stream->buffer);
mifintrbit_free_tohost(intr, r1);
mifintrbit_free_fromhost(intr, r2, target);
return -EINVAL;
}
stream->direction = direction;
stream->peer = target;
return 0;
}
void mif_stream_release(struct mif_stream *stream)
{
struct mifintrbit *intr;
intr = scsc_mx_get_intrbit(stream->mx);
if (stream->direction == MIF_STREAM_DIRECTION_IN) {
mifintrbit_free_tohost(intr, stream->write_bit_idx);
mifintrbit_free_fromhost(intr, stream->read_bit_idx, stream->peer);
} else {
mifintrbit_free_tohost(intr, stream->read_bit_idx);
mifintrbit_free_fromhost(intr, stream->write_bit_idx, stream->peer);
}
cpacketbuffer_release(&stream->buffer);
}
uint32_t mif_stream_read(struct mif_stream *stream, void *buf, uint32_t num_bytes)
{
struct scsc_mif_abs *mif_abs = scsc_mx_get_mif_abs(stream->mx);
uint32_t num_bytes_read = cpacketbuffer_read(&stream->buffer, buf, num_bytes);
if (num_bytes_read > 0)
/* Signal that the read is finished to anyone interested */
mif_abs->irq_bit_set(mif_abs, stream->read_bit_idx, stream->peer);
return num_bytes_read;
}
const void *mif_stream_peek(struct mif_stream *stream, const void *current_packet)
{
return cpacketbuffer_peek(&stream->buffer, current_packet);
}
void mif_stream_peek_complete(struct mif_stream *stream, const void *packet)
{
struct scsc_mif_abs *mif_abs = scsc_mx_get_mif_abs(stream->mx);
cpacketbuffer_peek_complete(&stream->buffer, packet);
/* Signal that the read is finished to anyone interested */
mif_abs->irq_bit_set(mif_abs, stream->read_bit_idx, stream->peer);
}
bool mif_stream_write(struct mif_stream *stream, const void *buf, uint32_t num_bytes)
{
struct scsc_mif_abs *mif_abs = scsc_mx_get_mif_abs(stream->mx);
if (!cpacketbuffer_write(&stream->buffer, buf, num_bytes))
return false;
/* Kick the assigned interrupt to let others know new data is available */
mif_abs->irq_bit_set(mif_abs, stream->write_bit_idx, stream->peer);
return true;
}
bool mif_stream_write_gather(struct mif_stream *stream, const void **bufs, uint32_t *lengths, uint32_t num_bufs)
{
struct scsc_mif_abs *mif_abs = scsc_mx_get_mif_abs(stream->mx);
if (!cpacketbuffer_write_gather(&stream->buffer, bufs, lengths, num_bufs))
return false;
/* Kick the assigned interrupt to let others know new data is available */
mif_abs->irq_bit_set(mif_abs, stream->write_bit_idx, stream->peer);
return true;
}
uint32_t mif_stream_block_size(struct mif_stream *stream)
{
return cpacketbuffer_packet_size(&stream->buffer);
}
uint8_t mif_stream_read_interrupt(struct mif_stream *stream)
{
return stream->read_bit_idx;
}
uint8_t mif_stream_write_interrupt(struct mif_stream *stream)
{
return stream->write_bit_idx;
}

View file

@ -0,0 +1,178 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/**
* MIF stream (Interface)
*
* Provides a one-way communication mechanism between two points. The consumer side
* will be notified via an interrupt when the producer side writes data to the
* stream, and likewise the producer will be notified when the consumer has read
* data from the stream.
*
* It is expected that the data sent across the stream consists of fixed-size
* packets, and that the underlying storage mechanism is initialised to use a packet size
* that is at least as large as the largest message size. If this is not the case,
* callers are responsible for handling reading of partial messages from the stream
* in multiples of the packet size.
*/
#ifndef MIFSTREAM_H__
#define MIFSTREAM_H__
/** Uses */
#include "cpacket_buffer.h"
#include "mifintrbit.h"
enum MIF_STREAM_PEER {
MIF_STREAM_PEER_R4,
MIF_STREAM_PEER_M4,
};
enum MIF_STREAM_DIRECTION {
MIF_STREAM_DIRECTION_IN,
MIF_STREAM_DIRECTION_OUT,
};
/**
* Defines for the MIF Stream interrupt bits
*
* MIF_STREAM_INTRBIT_TYPE_RESERVED: the bits are reserved
* at initialization and are assigned to GDB transport channels.
* It is for purpose of forcing Panics from either MX manager or GDB
*
* MIF_STREAM_INTRBIT_TYPE_ALLOC: the bits are allocated dynamically
* when a stream is initialized
*/
enum MIF_STREAM_INTRBIT_TYPE {
MIF_STREAM_INTRBIT_TYPE_RESERVED,
MIF_STREAM_INTRBIT_TYPE_ALLOC,
};
struct mif_stream;
/**
* Initialises MIF Stream state.
*/
int mif_stream_init(struct mif_stream *stream, enum scsc_mif_abs_target target, enum MIF_STREAM_DIRECTION direction, uint32_t num_packets, uint32_t packet_size,
struct scsc_mx *mx, enum MIF_STREAM_INTRBIT_TYPE intrbit, mifintrbit_handler tohost_irq_handler, void *data);
/**
* Initialises MIF Output Stream state.
*/
void mif_stream_release(struct mif_stream *stream);
/**
* Reads the given number of bytes from the MIF stream, copying them
* to the provided address. This removes the read data from the stream.
*
* Returns the number of bytes read.
*/
uint32_t mif_stream_read(struct mif_stream *stream, void *buf, uint32_t num_bytes);
/**
* Returns a pointer to the next packet of data within the stream, without
* removing it. This can be used to processss data in place without needing to
* copy it first.
*
* If multiple packets are present these can be read in turn by setting the value
* of current_packet to the returned value from the previous call to mif_stream_peek.
* Each time the returned pointer will advance in the stream by mif_stream_block_size()
* bytes.
*
* Callers cannot assume that multiple calls to mif_stream_peek() will return
* consecutive addresses.
*
* mif_stream_peek_complete must be called to remove the packet(s) from the stream.
*
* Returns a pointer to the beginning of the packet to read, or NULL if there is no
* packet to process.
*
* Example use:
* // Get the first data packet
* void *current_packet = mif_stream_peek( buffer, NULL );
* void *last_packet = NULL;
* while( current_packet != NULL )
* {
* // Process data packet
* ...
*
* // Get the next data packet
* last_packet = current_packet;
* current_packet = mif_stream_peek( buffer, current_packet );
* }
*
* // Remove all processed packets from the stream
* if( last_packet != NULL )
* {
* mif_stream_peek( buffer, last_packet );
* }
*/
const void *mif_stream_peek(struct mif_stream *stream, const void *current_packet);
/**
* Removes all packets from the stream up to and including the given
* packet.
*
* This must be called after using mif_stream_peek to indicate that packet(s)
* can be removed from the stream.
*/
void mif_stream_peek_complete(struct mif_stream *stream, const void *packet);
/**
* Writes the given number of bytes to the MIF stream.
*
* Returns true if the block was written, false if there is not enough
* free space in the buffer for the data.
*/
bool mif_stream_write(struct mif_stream *stream, const void *buf, uint32_t num_bytes);
/**
* Writes a set of non-contiguous data blocks to the MIF stream
* as a contiguous set.
*
* Returns true if the blocks were written, false if there is not enough
* free space in the buffer for the block.
*/
bool mif_stream_write_gather(struct mif_stream *stream, const void **bufs, uint32_t *lengths, uint32_t num_bufs);
/**
* Returns the size in bytes of each individual block within the stream.
*
* When reading data from the stream using mif_stream_read or mif_stream_peek
* this value is the amount of data
*/
uint32_t mif_stream_block_size(struct mif_stream *stream);
/**
* Returns the interrupt number that will be triggered by reads from the stream
*/
uint8_t mif_stream_read_interrupt(struct mif_stream *stream);
/**
* Returns the interrupt number that will be triggered by writes to the stream
*/
uint8_t mif_stream_write_interrupt(struct mif_stream *stream);
/*
* Initialises the stream's part of the configuration area
*/
void mif_stream_config_serialise(struct mif_stream *stream, struct mxstreamconf *stream_conf);
/**
* MIF Packet Stream Descriptor.
*/
struct mif_stream {
struct scsc_mx *mx;
struct cpacketbuffer buffer;
/** MIF stream peer, R4 or M4? */
enum MIF_STREAM_PEER peer;
/** MIF interrupt bit index, one in each direction */
uint8_t read_bit_idx;
uint8_t write_bit_idx;
enum MIF_STREAM_DIRECTION direction;
};
#endif /* MIFSTREAM_H__ */

View file

@ -0,0 +1,764 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#include <linux/module.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/wakelock.h>
#include <scsc/scsc_logring.h>
#include <scsc/scsc_mx.h>
#include "scsc_mx_impl.h"
#ifdef CONFIG_SCSC_CLK20MHZ_TEST
#include "mx140_clk_test.h"
#endif
/* Note: define MX140_CLK_VERBOSE_CALLBACKS to get more callbacks when events occur.
* without this, the only callbacks are failure/success from request()
*/
static int auto_start;
module_param(auto_start, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(auto_start, "Start service automatically: Default 0: disabled, 1: Enabled");
static DEFINE_MUTEX(clk_lock);
static DEFINE_MUTEX(clk_work_lock);
struct workqueue_struct *mx140_clk20mhz_wq;
struct work_struct mx140_clk20mhz_work;
static int recovery;
static int recovery_pending_stop_close;
#define MX140_SERVICE_RECOVERY_TIMEOUT 20000
/* Static Singleton */
static struct mx140_clk20mhz {
/* scsc_service_client has to be the first */
struct scsc_service_client mx140_clk20mhz_service_client;
struct scsc_service *mx140_clk20mhz_service;
struct scsc_mx *mx;
atomic_t clk_request;
atomic_t maxwell_is_present;
atomic_t mx140_clk20mhz_service_started;
atomic_t request_pending;
atomic_t mx140_clk20mhz_service_failed;
void *data;
void (*mx140_clk20mhz_client_cb)(void *data, enum mx140_clk20mhz_status event);
struct proc_dir_entry *procfs_ctrl_dir;
u32 procfs_ctrl_dir_num;
struct wake_lock clk_wake_lock;
struct completion recovery_probe_completion;
} clk20mhz;
static void mx140_clk20mhz_wq_stop(void);
static int mx140_clk20mhz_stop_service(struct scsc_mx *mx);
#ifndef AID_MXPROC
#define AID_MXPROC 0
#endif
static void mx140_clk20mhz_restart(void);
#define MX_CLK20_DIRLEN 128
static const char *procdir_ctrl = "driver/mx140_clk";
static u32 proc_count;
/* Framework for registering proc handlers */
#define MX_CLK20_PROCFS_RW_FILE_OPS(name) \
static ssize_t mx_procfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
static ssize_t mx_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
static const struct file_operations mx_procfs_ ## name ## _fops = { \
.read = mx_procfs_ ## name ## _read, \
.write = mx_procfs_ ## name ## _write, \
.open = mx_clk20_procfs_generic_open, \
.llseek = generic_file_llseek \
}
#define MX_CLK20_PROCFS_RO_FILE_OPS(name) \
static ssize_t mx_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
static const struct file_operations mx_procfs_ ## name ## _fops = { \
.read = mx_procfs_ ## name ## _read, \
.open = mx_clk20_procfs_generic_open, \
.llseek = generic_file_llseek \
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define MX_PDE_DATA(inode) PDE_DATA(inode)
#else
#define MX_PDE_DATA(inode) (PDE(inode)->data)
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define MX_CLK20_PROCFS_SET_UID_GID(_entry) \
do { \
kuid_t proc_kuid = KUIDT_INIT(AID_MXPROC); \
kgid_t proc_kgid = KGIDT_INIT(AID_MXPROC); \
proc_set_user(_entry, proc_kuid, proc_kgid); \
} while (0)
#else
#define MX_CLK20_PROCFS_SET_UID_GID(entry) \
do { \
(entry)->uid = AID_MXPROC; \
(entry)->gid = AID_MXPROC; \
} while (0)
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define MX_CLK20_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
do { \
struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &mx_procfs_ ## name ## _fops, _sdev); \
MX_CLK20_PROCFS_SET_UID_GID(entry); \
} while (0)
#else
#define MX_CLK20_PROCFS_ADD_FILE(_data, name, parent, mode) \
do { \
struct proc_dir_entry *entry; \
entry = create_proc_entry(# name, mode, parent); \
if (entry) { \
entry->proc_fops = &mx_procfs_ ## name ## _fops; \
entry->data = _data; \
MX_CLK20_PROCFS_SET_UID_GID(entry); \
} \
} while (0)
#endif
#define MX_CLK20_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
/* Open handler */
static int mx_clk20_procfs_generic_open(struct inode *inode, struct file *file)
{
file->private_data = MX_PDE_DATA(inode);
return 0;
}
/* No-op */
static ssize_t mx_procfs_restart_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
(void)user_buf;
(void)count;
(void)ppos;
return 0;
}
/* Restart clock service */
static ssize_t mx_procfs_restart_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
{
(void)file;
(void)user_buf;
(void)ppos;
mx140_clk20mhz_restart();
SCSC_TAG_INFO(MX_PROC, "OK\n");
return count;
}
/* Register proc handler */
MX_CLK20_PROCFS_RW_FILE_OPS(restart);
/* Populate proc node */
static int mx140_clk20mhz_create_ctrl_proc_dir(struct mx140_clk20mhz *clk20mhz)
{
char dir[MX_CLK20_DIRLEN];
struct proc_dir_entry *parent;
(void)snprintf(dir, sizeof(dir), "%s%d", procdir_ctrl, proc_count);
parent = proc_mkdir(dir, NULL);
if (!parent) {
SCSC_TAG_ERR(MX_PROC, "failed to create proc dir %s\n", procdir_ctrl);
return -EINVAL;
}
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
parent->data = clk20mhz;
#endif
clk20mhz->procfs_ctrl_dir = parent;
clk20mhz->procfs_ctrl_dir_num = proc_count;
MX_CLK20_PROCFS_ADD_FILE(clk20mhz, restart, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
SCSC_TAG_DEBUG(MX_PROC, "created %s proc dir\n", dir);
proc_count++;
return 0;
}
/* Remove proc node */
static void mx140_clk20mhz_remove_ctrl_proc_dir(struct mx140_clk20mhz *clk20mhz)
{
if (clk20mhz->procfs_ctrl_dir) {
char dir[MX_CLK20_DIRLEN];
MX_CLK20_PROCFS_REMOVE_FILE(restart, clk20mhz->procfs_ctrl_dir);
(void)snprintf(dir, sizeof(dir), "%s%d", procdir_ctrl, clk20mhz->procfs_ctrl_dir_num);
remove_proc_entry(dir, NULL);
clk20mhz->procfs_ctrl_dir = NULL;
proc_count--;
SCSC_TAG_DEBUG(MX_PROC, "removed %s proc dir\n", dir);
}
}
/* Maxwell manager has detected an issue and the service should freeze */
static void mx140_clk20mhz_stop_on_failure(struct scsc_service_client *client)
{
atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 1);
mutex_lock(&clk_work_lock);
recovery = 1;
reinit_completion(&clk20mhz.recovery_probe_completion);
mutex_unlock(&clk_work_lock);
#ifdef MX140_CLK_VERBOSE_CALLBACKS
/* If call back is registered, inform the user about an asynchronous failure */
if (clk20mhz.mx140_clk20mhz_client_cb)
clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_ASYNC_FAIL);
#endif
SCSC_TAG_INFO(CLK20, "\n");
}
/* Maxwell manager has handled a failure and the chip has been resat. */
static void mx140_clk20mhz_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
{
(void)scsc_panic_code;
atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 1);
#ifdef MX140_CLK_VERBOSE_CALLBACKS
/* If call back is registered, inform the user about an asynchronous failure */
if (clk20mhz.mx140_clk20mhz_client_cb)
clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_ASYNC_FAIL);
#endif
SCSC_TAG_INFO(CLK20, "\n");
}
static int mx140_clk20mhz_start_service(struct scsc_mx *mx)
{
int r;
/* Open the service and get resource pointers */
clk20mhz.mx140_clk20mhz_service = scsc_mx_service_open(mx, SCSC_SERVICE_ID_CLK20MHZ, &clk20mhz.mx140_clk20mhz_service_client, &r);
if (!clk20mhz.mx140_clk20mhz_service) {
SCSC_TAG_ERR(CLK20, "scsc_mx_service_open failed %d\n", r);
return r;
}
/* In case of recovery ensure WLBT has ownership */
if (atomic_read(&clk20mhz.mx140_clk20mhz_service_failed)) {
struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(clk20mhz.mx);
if (!mif)
goto error;
if (mif->mif_restart)
mif->mif_restart(mif);
atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 0);
}
/* Start service. Will bring-up the chip if the chip is disabled */
if (scsc_mx_service_start(clk20mhz.mx140_clk20mhz_service, 0)) {
SCSC_TAG_ERR(CLK20, "scsc_mx_service_start failed\n");
goto error;
}
atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 1);
/* If call back is registered, inform the user that the service has started */
if (atomic_read(&clk20mhz.clk_request) && clk20mhz.mx140_clk20mhz_client_cb)
clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_STARTED);
return 0;
error:
return -EIO;
}
static int mx140_clk20mhz_stop_service(struct scsc_mx *mx)
{
if (!atomic_read(&clk20mhz.mx140_clk20mhz_service_started)) {
SCSC_TAG_INFO(CLK20, "Service not started\n");
return -ENODEV;
}
/* Stop service. */
if (scsc_mx_service_stop(clk20mhz.mx140_clk20mhz_service)) {
SCSC_TAG_ERR(CLK20, "scsc_mx_service_stop failed\n");
#ifdef MX140_CLK_VERBOSE_CALLBACKS
/* If call back is registered, inform the user that the service has failed to stop */
if (clk20mhz.mx140_clk20mhz_client_cb)
clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_NOT_STOPPED);
return -EIO;
#endif
}
/* Ignore a service_stop timeout above as it's better to try to close */
/* Close service, if no other service is using Maxwell, chip will turn off */
scsc_mx_service_close(clk20mhz.mx140_clk20mhz_service);
atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
#ifdef MX140_CLK_VERBOSE_CALLBACKS
/* If call back is registered, inform the user that the service has stopped */
if (clk20mhz.mx140_clk20mhz_client_cb)
clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_STOPPED);
#endif
return 0;
}
#define MX140_CLK_TRIES (20)
static void mx140_clk20mhz_work_func(struct work_struct *work)
{
int i;
int r = 0;
enum mx140_clk20mhz_status status;
mutex_lock(&clk_work_lock);
for (i = 0; i < MX140_CLK_TRIES; i++) {
if (atomic_read(&clk20mhz.clk_request) == 0) {
SCSC_TAG_INFO(CLK20, "mx140_clk20mhz_start_service no longer requested\n");
recovery = 0;
mutex_unlock(&clk_work_lock);
return;
}
SCSC_TAG_INFO(CLK20, "Calling mx140_clk20mhz_start_service\n");
r = mx140_clk20mhz_start_service(clk20mhz.mx);
switch (r) {
case 0:
SCSC_TAG_INFO(CLK20, "mx140_clk20mhz_start_service OK\n");
recovery = 0;
mutex_unlock(&clk_work_lock);
return;
case -EAGAIN:
SCSC_TAG_INFO(CLK20, "FW not found because filesystem not mounted yet, retrying...\n");
msleep(500); /* No FS yet, retry */
break;
default:
SCSC_TAG_INFO(CLK20, "mx140_clk20mhz_start_service failed %d\n", r);
goto err;
}
}
err:
SCSC_TAG_ERR(CLK20, "Unable to start the 20MHz clock service\n");
/* Deferred service start failure or timeout.
* We assume it'll never manage to start - e.g. bad or missing f/w.
*/
if (r) {
struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(clk20mhz.mx);
SCSC_TAG_ERR(CLK20, "Deferred start timeout (%d)\n", r);
atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 1);
/* Switch USBPLL ownership to AP so USB may be used */
if (mif && mif->mif_cleanup)
mif->mif_cleanup(mif);
}
/* If call back is registered, inform the user that the service has failed to start */
if (atomic_read(&clk20mhz.clk_request) && clk20mhz.mx140_clk20mhz_client_cb) {
/* The USB/PLL driver has inadequate error handing...
* Lie that the start was successful when AP has control
*/
status = atomic_read(&clk20mhz.mx140_clk20mhz_service_failed) ? MX140_CLK_STARTED : MX140_CLK_NOT_STARTED;
/* Also lie that the start was successful when the mx140 driver is halted after f/w panic */
if (r == -EILSEQ)
status = MX140_CLK_STARTED;
SCSC_TAG_INFO(CLK20, "cb %d\n", status);
clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, status);
}
recovery = 0;
mutex_unlock(&clk_work_lock);
}
static void mx140_clk20mhz_wq_init(void)
{
mx140_clk20mhz_wq = create_singlethread_workqueue("mx140_clk20mhz_wq");
INIT_WORK(&mx140_clk20mhz_work, mx140_clk20mhz_work_func);
}
static void mx140_clk20mhz_wq_stop(void)
{
cancel_work_sync(&mx140_clk20mhz_work);
flush_workqueue(mx140_clk20mhz_wq);
}
static void mx140_clk20mhz_wq_deinit(void)
{
mx140_clk20mhz_wq_stop();
destroy_workqueue(mx140_clk20mhz_wq);
}
static void mx140_clk20mhz_wq_start(void)
{
queue_work(mx140_clk20mhz_wq, &mx140_clk20mhz_work);
}
/* Register a callback function to indicate to the (USB) client the status of
* the clock request
*/
int mx140_clk20mhz_register(void (*client_cb)(void *data, enum mx140_clk20mhz_status event), void *data)
{
SCSC_TAG_INFO(CLK20, "cb %p, %p\n", client_cb, data);
mutex_lock(&clk_lock);
if (clk20mhz.mx140_clk20mhz_client_cb == NULL) {
SCSC_TAG_INFO(CLK20, "clk20Mhz client registered\n");
clk20mhz.mx140_clk20mhz_client_cb = client_cb;
clk20mhz.data = data;
mutex_unlock(&clk_lock);
return 0;
}
SCSC_TAG_ERR(CLK20, "clk20Mhz client already registered\n");
mutex_unlock(&clk_lock);
return -EEXIST;
}
EXPORT_SYMBOL(mx140_clk20mhz_register);
/* Unregister callback function */
void mx140_clk20mhz_unregister(void)
{
SCSC_TAG_INFO(CLK20, "\n");
mutex_lock(&clk_lock);
if (clk20mhz.mx140_clk20mhz_client_cb == NULL) {
SCSC_TAG_INFO(CLK20, "clk20Mhz client not registered\n");
mutex_unlock(&clk_lock);
return;
}
clk20mhz.mx140_clk20mhz_client_cb = NULL;
clk20mhz.data = NULL;
mutex_unlock(&clk_lock);
}
EXPORT_SYMBOL(mx140_clk20mhz_unregister);
/* Indicate that an external client requires mx140's 20 MHz clock.
* The Core driver will boot mx140 as required and ensure that the
* clock remains running.
*
* If a callback was installed by register(), do this asynchronously.
*/
int mx140_clk20mhz_request(void)
{
mutex_lock(&clk_lock);
atomic_inc(&clk20mhz.clk_request);
SCSC_TAG_INFO(CLK20, "%d\n", atomic_read(&clk20mhz.clk_request));
if (!atomic_read(&clk20mhz.maxwell_is_present)) {
SCSC_TAG_INFO(CLK20, "Maxwell is not present yet, store request\n");
atomic_set(&clk20mhz.request_pending, 1);
mutex_unlock(&clk_lock);
return 0;
}
if (recovery) {
int r;
mutex_unlock(&clk_lock);
r = wait_for_completion_timeout(&clk20mhz.recovery_probe_completion,
msecs_to_jiffies(MX140_SERVICE_RECOVERY_TIMEOUT));
mutex_lock(&clk_lock);
if (r == 0) {
SCSC_TAG_INFO(CLK20, "recovery_probe_completion timeout - try a start\n");
mx140_clk20mhz_wq_start();
}
} else if (!atomic_read(&clk20mhz.mx140_clk20mhz_service_started))
mx140_clk20mhz_wq_start();
else
SCSC_TAG_INFO(CLK20, "Service already started\n");
mutex_unlock(&clk_lock);
return 0;
}
EXPORT_SYMBOL(mx140_clk20mhz_request);
/* Indicate that an external client no requires mx140's 20 MHz clock
* The Core driver will shut down mx140 if no other services are
* currently running
*
* If a callback was installed by register(), do this asynchronously.
*/
int mx140_clk20mhz_release(void)
{
int ret = 0;
mutex_lock(&clk_lock);
atomic_dec(&clk20mhz.clk_request);
SCSC_TAG_INFO(CLK20, "%d\n", atomic_read(&clk20mhz.clk_request));
if (!atomic_read(&clk20mhz.maxwell_is_present)) {
SCSC_TAG_INFO(CLK20, "Maxwell is released before probe\n");
if (!atomic_read(&clk20mhz.request_pending)) {
SCSC_TAG_INFO(CLK20, "Maxwell had request pending. Cancel it\n");
atomic_set(&clk20mhz.request_pending, 0);
}
mutex_unlock(&clk_lock);
return 0;
}
/* Cancel any pending attempt */
mx140_clk20mhz_wq_stop();
if (recovery) {
recovery_pending_stop_close = 1;
} else {
ret = mx140_clk20mhz_stop_service(clk20mhz.mx);
if (ret == -ENODEV) {
/* Suppress error if it wasn't running */
ret = 0;
}
}
/* Suppress stop failure if the service is failed */
if (atomic_read(&clk20mhz.mx140_clk20mhz_service_failed)) {
SCSC_TAG_INFO(CLK20, "Return OK as control is with AP\n");
atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
ret = 0;
}
mutex_unlock(&clk_lock);
return ret;
}
EXPORT_SYMBOL(mx140_clk20mhz_release);
/* Probe callback after platform driver is registered */
void mx140_clk20mhz_probe(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
{
SCSC_TAG_INFO(CLK20, "\n");
if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery) {
SCSC_TAG_INFO(CLK20, "Ignore probe - no recovery in progress\n");
return;
}
if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && recovery) {
SCSC_TAG_INFO(CLK20, "Recovery probe\n");
/**
* If recovery_pending_stop_close is set, then there was a stop
* during recovery (could be due to USB cable unplugged) so
* recovery should just stop here.
* The mx140_clk service has been closed in the remove callback.
*/
mutex_lock(&clk_lock);
if (recovery_pending_stop_close) {
SCSC_TAG_INFO(CLK20, "Recovery probe - stop during recovery, so don't recover\n");
recovery_pending_stop_close = 0;
recovery = 0;
mutex_unlock(&clk_lock);
/**
* Should there have been a new start request during
* recovery (very unlikely), then the complete timeout
* will ensure that a start is requested.
*/
return;
}
mutex_unlock(&clk_lock);
mutex_lock(&clk_work_lock);
mx140_clk20mhz_wq_start();
mutex_unlock(&clk_work_lock);
complete_all(&clk20mhz.recovery_probe_completion);
} else {
SCSC_TAG_INFO(CLK20, "Maxwell probed\n");
clk20mhz.mx = mx;
clk20mhz.mx140_clk20mhz_service_client.stop_on_failure = mx140_clk20mhz_stop_on_failure;
clk20mhz.mx140_clk20mhz_service_client.failure_reset = mx140_clk20mhz_failure_reset;
mx140_clk20mhz_create_ctrl_proc_dir(&clk20mhz);
mx140_clk20mhz_wq_init();
atomic_set(&clk20mhz.maxwell_is_present, 1);
mutex_lock(&clk_work_lock);
if ((auto_start || atomic_read(&clk20mhz.request_pending))) {
atomic_set(&clk20mhz.request_pending, 0);
SCSC_TAG_INFO(CLK20, "start pending service\n");
mx140_clk20mhz_wq_start();
}
mutex_unlock(&clk_work_lock);
}
}
static void mx140_clk20mhz_restart(void)
{
int r;
struct scsc_mif_abs *mif;
SCSC_TAG_INFO(CLK20, "\n");
wake_lock(&clk20mhz.clk_wake_lock);
mutex_lock(&clk_lock);
if (!atomic_read(&clk20mhz.mx140_clk20mhz_service_started)) {
SCSC_TAG_INFO(CLK20, "service wasn't started\n");
goto done;
}
mif = scsc_mx_get_mif_abs(clk20mhz.mx);
if (mif == NULL)
goto done;
/* Don't stop the 20 MHz clock service. Leave it running until
* WLBT resets due to the service_close().
*/
/* Ensure USBPLL is running and owned by AP, to stop USB disconnect */
if (mif->mif_cleanup)
mif->mif_cleanup(mif);
scsc_mx_service_close(clk20mhz.mx140_clk20mhz_service);
/* ...and restart the 20 MHz clock service */
clk20mhz.mx140_clk20mhz_service = scsc_mx_service_open(clk20mhz.mx, SCSC_SERVICE_ID_CLK20MHZ, &clk20mhz.mx140_clk20mhz_service_client, &r);
if (clk20mhz.mx140_clk20mhz_service == NULL) {
SCSC_TAG_ERR(CLK20, "reopen failed %d\n", r);
atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
goto done;
}
/* Ensure USBPLL is owned by WLBT again */
if (mif->mif_restart)
mif->mif_restart(mif);
r = scsc_mx_service_start(clk20mhz.mx140_clk20mhz_service, 0);
if (r) {
SCSC_TAG_ERR(CLK20, "restart failed %d\n", r);
scsc_mx_service_close(clk20mhz.mx140_clk20mhz_service);
atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
goto done;
}
SCSC_TAG_INFO(CLK20, "restarted\n");
done:
mutex_unlock(&clk_lock);
wake_unlock(&clk20mhz.clk_wake_lock);
}
/* Remove callback platform driver is unregistered */
void mx140_clk20mhz_remove(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
{
mutex_lock(&clk_work_lock);
if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery) {
SCSC_TAG_INFO(CLK20, "Ignore recovery remove: Service driver not active\n");
goto done;
} else if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && recovery) {
struct scsc_mif_abs *mif;
SCSC_TAG_INFO(CLK20, "Recovery remove\n");
mutex_lock(&clk_lock);
mx140_clk20mhz_wq_stop();
mif = scsc_mx_get_mif_abs(clk20mhz.mx);
if (mif == NULL)
goto done_local;
/**
* If there's been a stop during recovery ensure that the
* mx140_clk service is closed in the mx driver, but do not
* touch USBPLL ownership since this will already have been
* handled.
*/
if (!recovery_pending_stop_close) {
/* Don't stop the clock service - leave it running until
* service_close() resets WLBT.
*/
/* Switch ownership of USBPLL to the AP. Ownership
* returns to WLBT after recovery completes.
*/
if (mif->mif_cleanup)
mif->mif_cleanup(mif);
}
scsc_mx_service_close(clk20mhz.mx140_clk20mhz_service);
atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
done_local:
mutex_unlock(&clk_lock);
} else {
SCSC_TAG_INFO(CLK20, "Maxwell removed\n");
mx140_clk20mhz_remove_ctrl_proc_dir(&clk20mhz);
atomic_set(&clk20mhz.maxwell_is_present, 0);
mx140_clk20mhz_wq_deinit();
}
done:
mutex_unlock(&clk_work_lock);
}
/* 20MHz client driver */
struct scsc_mx_module_client mx140_clk20mhz_driver = {
.name = "MX 20MHz clock client",
.probe = mx140_clk20mhz_probe,
.remove = mx140_clk20mhz_remove,
};
/* 20MHz service driver initialization */
static int __init mx140_clk20mhz_init(void)
{
int ret;
SCSC_TAG_INFO(CLK20, "Registering service\n");
wake_lock_init(&clk20mhz.clk_wake_lock, WAKE_LOCK_SUSPEND, "clk20_wl");
init_completion(&clk20mhz.recovery_probe_completion);
atomic_set(&clk20mhz.clk_request, 0);
atomic_set(&clk20mhz.maxwell_is_present, 0);
atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
atomic_set(&clk20mhz.request_pending, 0);
atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 0);
/* Register with Maxwell Framework */
ret = scsc_mx_module_register_client_module(&mx140_clk20mhz_driver);
if (ret) {
SCSC_TAG_ERR(CLK20, "scsc_mx_module_register_client_module failed: r=%d\n", ret);
return ret;
}
#ifdef CONFIG_SCSC_CLK20MHZ_TEST
mx140_clk_test_init();
#endif
return 0;
}
static void __exit mx140_clk20mhz_exit(void)
{
scsc_mx_module_unregister_client_module(&mx140_clk20mhz_driver);
#ifdef CONFIG_SCSC_CLK20MHZ_TEST
mx140_clk_test_exit();
#endif
complete_all(&clk20mhz.recovery_probe_completion);
wake_lock_destroy(&clk20mhz.clk_wake_lock);
}
module_init(mx140_clk20mhz_init);
module_exit(mx140_clk20mhz_exit);
MODULE_DESCRIPTION("Samsung Maxwell 20MHz Clock Service");
MODULE_AUTHOR("SLSI");
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,161 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <scsc/scsc_logring.h>
#include <scsc/scsc_mx.h>
#include "scsc_mx_impl.h"
/* char device entry declarations */
static dev_t mx140_clk_test_dev_t;
static struct class *mx140_clk_test_class;
static struct cdev *mx140_clk_test_cdev;
/* Call back function registered with 20MHz clock framework */
static void client_cb(void *data, enum mx140_clk20mhz_status event)
{
switch (event) {
case MX140_CLK_STARTED:
SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_STARTED received\n");
break;
case MX140_CLK_STOPPED:
SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_STOPPED received\n");
break;
case MX140_CLK_NOT_STARTED:
SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_NOT_STARTED received\n");
break;
case MX140_CLK_NOT_STOPPED:
SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_NOT_STOPPED received\n");
break;
case MX140_CLK_ASYNC_FAIL:
SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_ASYNC_FAIL received\n");
break;
default:
break;
}
}
static int mx140_clk_test_dev_open(struct inode *inode, struct file *file)
{
mx140_clk20mhz_register(client_cb, NULL);
return 0;
}
static int mx140_clk_test_dev_release(struct inode *inode, struct file *file)
{
mx140_clk20mhz_unregister();
return 0;
}
static ssize_t mx140_clk_test_dev_write(struct file *file, const char *data, size_t len, loff_t *offset)
{
unsigned long count;
char str[2]; /* One value and carry return */
long int val = 0;
if (len > 2) {
SCSC_TAG_ERR(CLK20_TEST, "Incorrect value len %zd\n", len);
goto error;
}
count = copy_from_user(str, data, len);
str[1] = 0;
if (kstrtol(str, 10, &val)) {
SCSC_TAG_ERR(CLK20_TEST, "Invalid value\n");
goto error;
}
if (val == 1)
mx140_clk20mhz_request();
else if (val == 0)
mx140_clk20mhz_release();
else
SCSC_TAG_INFO(CLK20_TEST, "val %ld is not valid, 1 - on, 0 - off\n", val);
error:
return len;
}
static ssize_t mx140_clk_test_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
{
return length;
}
static const struct file_operations mx140_clk_test_dev_fops = {
.owner = THIS_MODULE,
.open = mx140_clk_test_dev_open,
.read = mx140_clk_test_dev_read,
.write = mx140_clk_test_dev_write,
.release = mx140_clk_test_dev_release,
};
/* 20MHz service driver registration */
void mx140_clk_test_init(void)
{
int ret;
SCSC_TAG_INFO(CLK20_TEST, "Registering mx140 TEST\n");
ret = alloc_chrdev_region(&mx140_clk_test_dev_t, 0, 1, "mx140_clk_test-cdev");
if (ret < 0) {
SCSC_TAG_ERR(CLK20_TEST, "failed to alloc chrdev region\n");
goto fail_alloc_chrdev_region;
}
mx140_clk_test_cdev = cdev_alloc();
if (!mx140_clk_test_cdev) {
ret = -ENOMEM;
SCSC_TAG_ERR(CLK20_TEST, "failed to alloc cdev\n");
goto fail_alloc_cdev;
}
cdev_init(mx140_clk_test_cdev, &mx140_clk_test_dev_fops);
ret = cdev_add(mx140_clk_test_cdev, mx140_clk_test_dev_t, 1);
if (ret < 0) {
SCSC_TAG_ERR(CLK20_TEST, "failed to add cdev\n");
goto fail_add_cdev;
}
mx140_clk_test_class = class_create(THIS_MODULE, "mx140_clk_test");
if (!mx140_clk_test_class) {
ret = -EEXIST;
SCSC_TAG_ERR(CLK20_TEST, "failed to create class\n");
goto fail_create_class;
}
if (!device_create(mx140_clk_test_class, NULL, mx140_clk_test_dev_t, NULL, "mx140_usb_clk_test_%d", MINOR(mx140_clk_test_dev_t))) {
ret = -EINVAL;
SCSC_TAG_ERR(CLK20_TEST, "failed to create device\n");
goto fail_create_device;
}
return;
fail_create_device:
class_destroy(mx140_clk_test_class);
fail_create_class:
cdev_del(mx140_clk_test_cdev);
fail_add_cdev:
fail_alloc_cdev:
unregister_chrdev_region(mx140_clk_test_dev_t, 1);
fail_alloc_chrdev_region:
return;
}
void mx140_clk_test_exit(void)
{
device_destroy(mx140_clk_test_class, mx140_clk_test_dev_t);
class_destroy(mx140_clk_test_class);
cdev_del(mx140_clk_test_cdev);
unregister_chrdev_region(mx140_clk_test_dev_t, 1);
}

View file

@ -0,0 +1,13 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __MX140_CLK_TEST_H__
#define __MX140_CLK_TEST_H___
void mx140_clk_test_init(void);
void mx140_clk_test_exit(void);
#endif /* __MX140_CLK_TEST_H___*/

View file

@ -0,0 +1,465 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#include <linux/module.h>
#include <linux/version.h>
#include <linux/firmware.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <scsc/scsc_logring.h>
#include <scsc/scsc_mx.h>
#include "scsc_mx_impl.h"
/* Firmware directory definitions */
#define MX140_USE_OWN_LOAD_FILE 1
#define MX140_FW_BASE_DIR_ETC_WIFI "/system/etc/wifi"
#ifdef MX140_USE_OWN_LOAD_FILE
#define MX140_FW_BASE_DIR "/vendor/firmware/mx140/fw"
#else
#define MX140_FW_BASE_DIR "mx140"
#endif
#define MX140_FW_CONF_SUBDIR "conf"
#define MX140_FW_DEBUG_SUBDIR "debug"
#define MX140_FW_BIN "mx140.bin"
#define MX140_FW_PATH_MAX_LENGTH (512)
#define MX140_FW_VARIANT_DEFAULT "mx140"
#define MX140_FW_VARIANT_LEGACY_DEFAULT "full-service"
/* Table of suffixes to append to f/w name */
struct fw_suffix {
char suffix[4];
u32 hw_ver;
};
/* Table of known RF h/w revs */
static const struct fw_suffix fw_suffixes[] = {
{ .suffix = "_11", .hw_ver = 0x11, },
{ .suffix = "_10", .hw_ver = 0x10, },
{ .suffix = "_00", .hw_ver = 0x00, },
{ .suffix = "", .hw_ver = 0xff, }, /* plain mx140.bin, must be last */
};
/* Once set, we always load this firmware suffix */
static int fw_suffix_found = -1;
/* Variant of firmware binary to load */
static char *firmware_variant = MX140_FW_VARIANT_DEFAULT;
module_param(firmware_variant, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(firmware_variant, "mx140 firmware variant, default mx140");
/* RF hardware version of firmware to load. If "auto" this gets replaced with
* the suffix of FW that got loaded.
* If "manual" it loads the version specified by firmware_variant, verbatim.
*/
static char *firmware_hw_ver = "auto";
module_param(firmware_hw_ver, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(firmware_hw_ver, "mx140 hw version detect, manual=disable");
static char *base_dir = MX140_FW_BASE_DIR_ETC_WIFI;
module_param(base_dir, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(base_dir, "Base directory for firmware and config");
static bool enable_auto_sense;
module_param(enable_auto_sense, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_auto_sense, "Set to true to allow driver to switch to legacy f/w dir if new one is not populated");
static bool use_new_fw_structure = true;
module_param(use_new_fw_structure, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_mcd_structure, "If enable_auto_sense is false and /etc/wifi is used, set this to true");
/* Reads a configuration file into memory (f/w profile specific) */
int mx140_file_request_conf(struct scsc_mx *mx, const struct firmware **conf, const char *config_rel_path)
{
struct device *dev;
char config_path[MX140_FW_PATH_MAX_LENGTH];
dev = scsc_mx_get_device(mx);
/* e.g. /etc/wifi/mx140/conf/wlan/wlan.hcf */
scnprintf(config_path, sizeof(config_path), "%s/%s%s/%s/%s",
base_dir,
firmware_variant,
fw_suffixes[fw_suffix_found].suffix,
MX140_FW_CONF_SUBDIR,
config_rel_path);
#ifdef MX140_USE_OWN_LOAD_FILE
return mx140_request_file(mx, config_path, conf);
#else
return request_firmware(conf, config_path, dev);
#endif
}
EXPORT_SYMBOL(mx140_file_request_conf);
/* Reads a debug configuration file into memory (f/w profile specific) */
int mx140_file_request_debug_conf(struct scsc_mx *mx, const struct firmware **conf, const char *config_rel_path)
{
struct device *dev;
char config_path[MX140_FW_PATH_MAX_LENGTH];
dev = scsc_mx_get_device(mx);
/* e.g. /etc/wifi/mx140/debug/log_strings.bin */
scnprintf(config_path, sizeof(config_path), "%s/%s%s/%s/%s",
base_dir,
firmware_variant,
fw_suffixes[fw_suffix_found].suffix,
MX140_FW_DEBUG_SUBDIR,
config_rel_path);
#ifdef MX140_USE_OWN_LOAD_FILE
return mx140_request_file(mx, config_path, conf);
#else
return request_firmware(conf, config_path, dev);
#endif
}
EXPORT_SYMBOL(mx140_file_request_debug_conf);
/* Read device configuration file into memory (whole device specific) */
int mx140_file_request_device_conf(struct scsc_mx *mx, const struct firmware **conf, const char *config_rel_path)
{
struct device *dev;
char config_path[MX140_FW_PATH_MAX_LENGTH];
dev = scsc_mx_get_device(mx);
/* e.g. /etc/wifi/mx140/wlan/mac.txt */
snprintf(config_path, sizeof(config_path), "%s/%s%s/%s",
base_dir,
fw_suffixes[fw_suffix_found].suffix,
MX140_FW_CONF_SUBDIR,
config_rel_path);
#ifdef MX140_USE_OWN_LOAD_FILE
return mx140_request_file(mx, config_path, conf);
#else
return request_firmware(conf, config_path, dev);
#endif
}
EXPORT_SYMBOL(mx140_file_request_device_conf);
/* Release configuration file memory. */
void mx140_file_release_conf(struct scsc_mx *mx, const struct firmware *conf)
{
(void)mx;
#ifdef MX140_USE_OWN_LOAD_FILE
mx140_release_file(mx, conf);
#else
if (conf)
release_firmware(conf);
#endif
}
EXPORT_SYMBOL(mx140_file_release_conf);
static int __mx140_file_download_fw(struct scsc_mx *mx, void *dest, size_t dest_size, u32 *fw_image_size, const char *fw_suffix)
{
const struct firmware *firm;
struct device *dev;
int r = 0;
char img_path_name[MX140_FW_PATH_MAX_LENGTH];
SCSC_TAG_INFO(MX_FILE, "firmware_variant=%s (%s)\n", firmware_variant, fw_suffix);
dev = scsc_mx_get_device(mx);
/* e.g. /etc/wifi/mx140.bin */
if (use_new_fw_structure) {
scnprintf(img_path_name, sizeof(img_path_name), "%s/%s%s.bin",
base_dir,
firmware_variant,
fw_suffix);
} else {
scnprintf(img_path_name, sizeof(img_path_name), "%s/%s%s/"MX140_FW_BIN,
base_dir,
firmware_variant,
fw_suffix);
}
SCSC_TAG_DEBUG(MX_FILE, "Load CR4 fw %s in shared address %p\n", img_path_name, dest);
#ifdef MX140_USE_OWN_LOAD_FILE
r = mx140_request_file(mx, img_path_name, &firm);
#else
r = request_firmware(&firm, img_path_name, dev);
#endif
if (r) {
SCSC_TAG_ERR(MX_FILE, "Error Loading FW, error %d\n", r);
return r;
}
SCSC_TAG_DEBUG(MX_FILE, "FW Download, size %zu\n", firm->size);
if (firm->size > dest_size) {
SCSC_TAG_ERR(MX_FILE, "firmware image too big for buffer (%zu > %u)", dest_size, *fw_image_size);
r = -EINVAL;
} else {
memcpy(dest, firm->data, firm->size);
*fw_image_size = firm->size;
}
#ifdef MX140_USE_OWN_LOAD_FILE
mx140_release_file(mx, firm);
#else
release_firmware(firm);
#endif
return r;
}
/* Download firmware binary into a buffer supplied by the caller */
int mx140_file_download_fw(struct scsc_mx *mx, void *dest, size_t dest_size, u32 *fw_image_size)
{
int r;
int i;
int manual;
/* Override to use the verbatim image only */
manual = !strcmp(firmware_hw_ver, "manual");
if (manual) {
SCSC_TAG_INFO(MX_FILE, "manual hw version\n");
fw_suffix_found = sizeof(fw_suffixes) / sizeof(fw_suffixes[0]) - 1;
}
SCSC_TAG_DEBUG(MX_FILE, "fw_suffix_found %d\n", fw_suffix_found);
/* If we know which f/w suffix to use, select it immediately */
if (fw_suffix_found != -1) {
r = __mx140_file_download_fw(mx, dest, dest_size, fw_image_size, fw_suffixes[fw_suffix_found].suffix);
goto done;
}
/* Otherwise try the list */
for (i = 0; i < sizeof(fw_suffixes) / sizeof(fw_suffixes[0]); i++) {
/* Try to find each suffix in turn */
SCSC_TAG_INFO(MX_FILE, "try %d %s\n", i, fw_suffixes[i].suffix);
r = __mx140_file_download_fw(mx, dest, dest_size, fw_image_size, fw_suffixes[i].suffix);
if (r != -ENOENT)
break;
}
/* Save this for next time */
if (r == 0)
fw_suffix_found = i;
done:
/* Update firmware_hw_ver to reflect what got auto selected, for moredump */
if (fw_suffix_found != -1 && !manual) {
/* User will only read this, so casting away const is safe */
firmware_hw_ver = (char *)fw_suffixes[fw_suffix_found].suffix;
}
return r;
}
int mx140_request_file(struct scsc_mx *mx, char *path, const struct firmware **firmp)
{
struct file *f;
mm_segment_t fs;
struct kstat stat;
const int max_read_size = 4096;
int r, whats_left, to_read, size;
struct firmware *firm;
char *buf, *p;
SCSC_TAG_DEBUG(MX_FILE, "request %s\n", path);
*firmp = NULL;
/* Current segment. */
fs = get_fs();
/* Set to kernel segment. */
set_fs(get_ds());
/* Check FS is ready */
r = vfs_stat(MX140_FW_BASE_DIR_ETC_WIFI, &stat);
if (r != 0) {
set_fs(fs);
SCSC_TAG_ERR(MX_FILE, "vfs_stat() failed for %s\n", MX140_FW_BASE_DIR_ETC_WIFI);
return -EAGAIN;
}
/* Check f/w bin */
r = vfs_stat(path, &stat);
if (r != 0) {
set_fs(fs);
SCSC_TAG_ERR(MX_FILE, "vfs_stat() failed for %s\n", path);
return -ENOENT;
}
/* Revert to original segment. */
set_fs(fs);
/* Round up for minimum sizes */
size = (stat.size + 256) & ~255;
/* Get memory for file contents. */
buf = vzalloc(size);
if (!buf) {
SCSC_TAG_ERR(MX_FILE, "kzalloc(%d) failed for %s\n", size, path);
return -ENOMEM;
}
p = buf;
/* Get firmware structure. */
firm = kzalloc(sizeof(*firm), GFP_KERNEL);
if (!firm) {
vfree(buf);
SCSC_TAG_ERR(MX_FILE, "kzalloc(%zu) failed for %s\n", sizeof(*firmp), path);
return -ENOMEM;
}
/* Open the file for reading. */
f = filp_open(path, O_RDONLY, 0);
if (IS_ERR(f)) {
vfree(buf);
kfree(firm);
SCSC_TAG_ERR(MX_FILE, "filp_open() failed for %s with %ld\n", path, PTR_ERR(f));
return -ENOENT;
}
whats_left = stat.size;
fs = get_fs();
set_fs(get_ds());
/* Read at most max_read_size in each read. Loop until the whole file has
* been copied to the local buffer.
*/
while (whats_left) {
to_read = whats_left < max_read_size ? whats_left : max_read_size;
r = f->f_op->read(f, p, to_read, &f->f_pos);
if (r < 0) {
SCSC_TAG_ERR(MX_FILE, "error reading %s\n", path);
break;
}
if (r == 0 || r < to_read)
break;
whats_left -= r;
p += r;
}
set_fs(fs);
filp_close(f, NULL);
if (r >= 0) {
r = 0;
/* Pass to caller. Caller will free allocated memory through
* mx140_release_file().
*/
firm->size = p - buf;
firm->data = buf;
*firmp = firm;
} else {
vfree(buf);
kfree(firm);
}
return r;
}
EXPORT_SYMBOL(mx140_request_file);
int mx140_release_file(struct scsc_mx *mx, const struct firmware *firmp)
{
if (!firmp || !firmp->data) {
SCSC_TAG_ERR(MX_FILE, "firmp=%p\n", firmp);
return -EINVAL;
}
SCSC_TAG_DEBUG(MX_FILE, "release firmp=%p, data=%p\n", firmp, firmp->data);
vfree(firmp->data);
kfree(firmp);
return 0;
}
EXPORT_SYMBOL(mx140_release_file);
/* Try to auto detect f/w directory */
void mx140_basedir_file(struct scsc_mx *mx)
{
#ifdef MX140_ALLOW_AUTO_SENSE /* This was added to aid the transition */
#ifdef MX140_USE_OWN_LOAD_FILE
struct kstat stat;
mm_segment_t fs;
int r;
char etc_dir_file[MX140_FW_PATH_MAX_LENGTH];
if (!enable_auto_sense)
return;
use_new_fw_structure = false;
base_dir = MX140_FW_BASE_DIR_ETC_WIFI;
firmware_variant = MX140_FW_VARIANT_DEFAULT;
scnprintf(etc_dir_file, sizeof(etc_dir_file), "%s/"MX140_FW_BIN, base_dir);
/* Current segment. */
fs = get_fs();
/* Set to kernel segment. */
set_fs(get_ds());
r = vfs_stat(etc_dir_file, &stat);
if (r == 0) {
use_new_fw_structure = true;
set_fs(fs);
SCSC_TAG_INFO(MX_FILE, "WiFi/BT firmware base directory is %s\n", base_dir);
return;
}
SCSC_TAG_ERR(MX_FILE, "Base dir: %s doesn't exist\n", base_dir);
base_dir = MX140_FW_BASE_DIR;
firmware_variant = MX140_FW_VARIANT_LEGACY_DEFAULT;
r = vfs_stat(base_dir, &stat);
if (r != 0) {
SCSC_TAG_ERR(MX_FILE, "Base dir: %s doesn't exist\n", base_dir);
base_dir = 0;
}
set_fs(fs);
SCSC_TAG_INFO(MX_FILE, "WiFi/BT firmware base directory is %s\n", base_dir ? base_dir : "not found");
#endif
#endif
}
/* Select file for h/w version from filesystem */
int mx140_file_select_fw(struct scsc_mx *mx, u32 hw_ver)
{
int i;
SCSC_TAG_INFO(MX_FILE, "select f/w for 0x%04x\n", hw_ver);
hw_ver = (hw_ver & 0xff00) >> 8; /* LSB is the RF HW ID (e.g. S610) */
for (i = 0; i < sizeof(fw_suffixes) / sizeof(fw_suffixes[0]); i++) {
if (fw_suffixes[i].hw_ver == hw_ver) {
fw_suffix_found = i;
SCSC_TAG_DEBUG(MX_FILE, "f/w for 0x%04x: index %d, suffix '%s'\n",
hw_ver, i, fw_suffixes[i].suffix);
return 0;
}
}
SCSC_TAG_ERR(MX_FILE, "No known f/w for 0x%04x, default to catchall\n", hw_ver);
/* Enable when a unified FW image is installed */
#ifdef MX140_UNIFIED_HW_FW
/* The last f/w is the non-suffixed "<fw>.bin", assume it's compatible */
fw_suffix_found = i - 1;
#else
fw_suffix_found = -1; /* None found */
#endif
return -EINVAL;
}
/* Query whether this HW is supported by the current FW file set */
bool mx140_file_supported_hw(struct scsc_mx *mx, u32 hw_ver)
{
hw_ver = (hw_ver & 0xff00) >> 8; /* LSB is the RF HW ID (e.g. S610) */
/* Assume installed 0xff is always compatible, and f/w will panic if it isn't */
if (fw_suffixes[fw_suffix_found].hw_ver == 0xff)
return true;
/* Does the select f/w match the hw_ver from chip? */
return (fw_suffixes[fw_suffix_found].hw_ver == hw_ver);
}

View file

@ -0,0 +1,739 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/kfifo.h>
#include <linux/poll.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/kdev_t.h>
#include <asm/page.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <scsc/scsc_mx.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include "mx_dbg_sampler.h"
#include "scsc_mif_abs.h"
#include "mxman.h"
#include "scsc_mx_impl.h"
#include "miframman.h"
#include <scsc/scsc_logring.h>
static unsigned int source_addr = 0xd0300028;
module_param(source_addr, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(source_addr, "Relative address of Location to sample (usually a register) - default: 0xd0300028. Loaded at /dev open");
static unsigned int num_bytes = 4;
module_param(num_bytes, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(num_bytes, "Number of significant octets (1,2 or 4) to log (lsbytes from source) - default: 4. Loaded at /dev open");
static unsigned int period_usecs;
module_param(period_usecs, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(period_usecs, "Sampling period. 0 means as fast as possible (powers of 2 only) - default: 0. Loaded at /dev open");
static bool auto_start;
module_param(auto_start, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(auto_start, "Start/stop sampling when service is started/stopped? - default: N. Loaded at /dev open");
static unsigned int buf_len = 512 * 1024;
module_param(buf_len, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_len, "Circular buffer length (octets, 2^n) in bytes - default: 524288. Loaded at /dev open");
static unsigned int kfifo_len = 4 * 1024 * 1024;
module_param(kfifo_len, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(kfifo_len, "Kfifo buffer length (octets, 2^n) in bytes - default: 4194304. Loaded at /dev open");
static bool self_test;
module_param(self_test, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(self_test, "Execute self test by triggering a Kernel thread which writes into shared memory and then calls irg handler - default: N. Loaded at /dev open");
#define DRV_NAME "mx_dbg_sampler"
#define DEVICE_NAME "mx_dbg_sampler"
#ifndef VM_RESERVED
#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
#endif
#define VER_MAJOR 0
#define VER_MINOR 0
#define SCSC_MX_DEBUG_NODE 1
#define SCSC_MX_DEBUG_INTERFACES (5 * (SCSC_MX_DEBUG_NODE))
DECLARE_BITMAP(bitmap_dbg_sampler_minor, SCSC_MX_DEBUG_INTERFACES);
#define NO_ERROR 0
#define BUFFER_OVERFLOW 1
#define KFIFO_ERROR 2
#define KFIFO_FULL 3
struct mx_dbg_sampler_dev {
/* file pointer */
struct file *filp;
/* char device */
struct cdev cdev;
/*device pointer*/
struct device *dev;
/* mx_wlan_client */
struct scsc_service_client mx_client;
/*service pointer*/
struct scsc_service *service;
/*service pointer*/
scsc_mifram_ref ref;
/*mx pointer*/
struct scsc_mx *mx;
/* Associated kfifo */
DECLARE_KFIFO_PTR(fifo, u8);
/* Associated read_wait queue.*/
wait_queue_head_t read_wait;
/* Associated debug_buffer */
struct debug_sampler_config info;
/* Buffer read index */
u32 read_idx;
/* Device in error */
u8 error;
/* Device node spinlock for IRQ */
spinlock_t spinlock;
/* Device node mutex for fops */
struct mutex mutex;
/* To profile kfifo num elements */
u32 kfifo_max;
};
/**
* SCSC User Space debug sampler interface (singleton)
*/
static struct {
dev_t device;
struct class *class_mx_dbg_sampler;
struct mx_dbg_sampler_dev devs[SCSC_MX_DEBUG_INTERFACES];
} mx_dbg_sampler;
static int recovery_in_progress;
static void mx_dbg_sampler_stop_on_failure(struct scsc_service_client *client)
{
SCSC_TAG_INFO(MX_SAMPLER, "TODO\n");
recovery_in_progress = 1;
}
static void mx_dbg_sampler_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
{
(void)scsc_panic_code;
SCSC_TAG_INFO(MX_SAMPLER, "TODO\n");
}
static void mx_wlan_read_process(const void *data, size_t length, struct mx_dbg_sampler_dev *mx_dev)
{
int ret;
void *read_ptr;
u32 elements;
/* Adjust lenght for kfifo type (u8)- elements -*/
elements = length;
if (mx_dev->filp) {
/* put string into the fifo */
if (kfifo_avail(&mx_dev->fifo) >= elements) {
/* Push values in Fifo*/
read_ptr = (void *)data + (mx_dev->read_idx & (buf_len - 1));
ret = kfifo_in(&mx_dev->fifo, read_ptr, elements);
mx_dev->read_idx += ret;
if (ret != elements || ret == 0) {
mx_dev->error = KFIFO_ERROR;
return;
}
ret = kfifo_len(&mx_dev->fifo);
if (ret > mx_dev->kfifo_max)
mx_dev->kfifo_max = ret;
} else {
/* Mask interrupt to avoid interrupt storm */
mx_dev->error = KFIFO_FULL;
return;
}
wake_up_interruptible(&mx_dev->read_wait);
}
/* Device is closed. Silenty return */
}
static void mx_dbg_sampler_irq_handler(int irq, void *data)
{
struct mx_dbg_sampler_dev *mx_dev = (struct mx_dbg_sampler_dev *)data;
struct scsc_service *service = mx_dev->service;
u32 write_ref;
u32 data_ref;
void *write_ptr;
void *data_ptr;
u32 read_idx;
u32 write_idx;
size_t to_read;
unsigned long flags;
spin_lock_irqsave(&mx_dev->spinlock, flags);
/* check whether service has been released */
if (!mx_dev->service) {
spin_unlock_irqrestore(&mx_dev->spinlock, flags);
return;
}
read_idx = mx_dev->read_idx;
write_ref = mx_dev->info.buffer_info.write_index_offset;
write_ptr = scsc_mx_service_mif_addr_to_ptr(service, write_ref);
write_idx = *((u32 *)write_ptr);
to_read = abs((s32)write_idx - (s32)read_idx);
/* TODO: Decide whether we need to do the memdump on a workqueue/tasklet or just in the int handler */
if (to_read > mx_dev->info.buffer_info.buf_len) {
scsc_service_mifintrbit_bit_clear(service, irq);
scsc_service_mifintrbit_bit_mask(service, irq);
mx_dev->error = BUFFER_OVERFLOW;
goto end;
}
data_ref = mx_dev->info.buffer_info.buf_offset;
data_ptr = scsc_mx_service_mif_addr_to_ptr(service, data_ref);
mx_wlan_read_process(data_ptr, to_read, mx_dev); /* Clear interrupt */
scsc_service_mifintrbit_bit_clear(service, irq);
scsc_service_mifintrbit_bit_unmask(service, irq);
end:
spin_unlock_irqrestore(&mx_dev->spinlock, flags);
/* Mask if dev is in error */
/* We shouldn't be printing out lots of stuff here, but it is in error condition */
if (mx_dev->error != NO_ERROR) {
scsc_service_mifintrbit_bit_mask(service, irq);
if (mx_dev->error == BUFFER_OVERFLOW)
SCSC_TAG_ERR(MX_SAMPLER, "Error, Buffer Overflow %zu write_idx 0x%x read_idex 0x%x\n", to_read, write_idx, read_idx);
else if (mx_dev->error == KFIFO_ERROR)
SCSC_TAG_ERR(MX_SAMPLER, "Error Pushing values in kfifo\n");
else if (mx_dev->error == KFIFO_FULL)
SCSC_TAG_ERR(MX_SAMPLER, "Error kfifo is full\n");
}
}
static struct task_struct *mx_dbg_sampler_task;
#define BULK_DATA (16 * 1024)
int mx_dbg_sampler_thread(void *data)
{
struct mx_dbg_sampler_dev *dev = (struct mx_dbg_sampler_dev *)data;
struct scsc_service *service = dev->service;
u32 write;
u32 mem;
void *write_ptr;
u32 *mem_ptr;
u32 val;
u32 i;
u32 end;
while (!kthread_should_stop() && !(dev->error != NO_ERROR)) {
write = dev->info.buffer_info.write_index_offset;
write_ptr = scsc_mx_service_mif_addr_to_ptr(service, write);
val = *((u32 *)write_ptr);
val += BULK_DATA;
*((u32 *)write_ptr) = val;
end = BULK_DATA;
mem = dev->info.buffer_info.buf_offset;
mem_ptr = scsc_mx_service_mif_addr_to_ptr(service, mem);
mem_ptr += dev->read_idx / sizeof(u32);
for (i = 0; i < end / 4; i++)
*((u32 *)mem_ptr++) = 0x33323130;
mx_dbg_sampler_irq_handler(0, dev);
mdelay(100);
}
mx_dbg_sampler_task = NULL;
return 0;
}
static int mx_dbg_sampler_allocate_resources(struct scsc_service *service, struct mx_dbg_sampler_dev *mx_dev)
{
scsc_mifram_ref ref, ref_buffer, ref_index;
int ret = 0;
struct debug_sampler_align *mem;
/* Allocate memory */
ret = scsc_mx_service_mifram_alloc(service, buf_len + sizeof(struct debug_sampler_align), &ref, 64);
if (ret)
return -ENOMEM;
mem = (struct debug_sampler_align *)scsc_mx_service_mif_addr_to_ptr(service, ref);
/* Allocate interrupt */
ret = scsc_service_mifintrbit_register_tohost(service, mx_dbg_sampler_irq_handler, mx_dev);
if (ret < 0) {
SCSC_TAG_ERR(MX_SAMPLER, "Error allocating interrupt\n");
scsc_mx_service_mifram_free(service, ref);
return ret;
}
/* Populate the buffer_info */
mem->config.version = mx_dev->info.version = 0;
scsc_mx_service_mif_ptr_to_addr(service, &mem->mem, &ref_buffer);
mem->config.buffer_info.buf_offset = mx_dev->info.buffer_info.buf_offset = ref_buffer;
mem->config.buffer_info.buf_len = mx_dev->info.buffer_info.buf_len = buf_len;
scsc_mx_service_mif_ptr_to_addr(service, &mem->index, &ref_index);
mem->config.buffer_info.write_index_offset =
mx_dev->info.buffer_info.write_index_offset = ref_index;
/* Reset write index */
mem->index = 0;
mem->config.buffer_info.intr_num = mx_dev->info.buffer_info.intr_num = ret;
mem->config.sample_spec.source_addr = source_addr;
mem->config.sample_spec.num_bytes = num_bytes;
mem->config.sample_spec.period_usecs = period_usecs;
mem->config.auto_start = auto_start;
mx_dev->ref = ref;
/* Reset read index */
mx_dev->read_idx = 0;
return 0;
}
static int mx_dbg_sampler_free_resources(struct scsc_service *service, struct mx_dbg_sampler_dev *mx_dev)
{
if (self_test)
if (mx_dbg_sampler_task)
kthread_stop(mx_dbg_sampler_task);
scsc_service_mifintrbit_unregister_tohost(service,
mx_dev->info.buffer_info.intr_num);
scsc_mx_service_mifram_free(service,
mx_dev->ref);
return 0;
}
int mx_dbg_sampler_open(struct inode *inode, struct file *filp)
{
struct mx_dbg_sampler_dev *mx_dev;
int ret = 0;
mx_dev = container_of(inode->i_cdev, struct mx_dbg_sampler_dev, cdev);
if (mutex_lock_interruptible(&mx_dev->mutex))
return -ERESTARTSYS;
if (filp->private_data) {
SCSC_TAG_ERR(MX_SAMPLER, "Service already started\n");
goto end;
}
filp->private_data = mx_dev;
mx_dev->filp = filp;
/* Clear any remaining error */
mx_dev->error = NO_ERROR;
ret = kfifo_alloc(&mx_dev->fifo, kfifo_len, GFP_KERNEL);
if (ret) {
SCSC_TAG_ERR(MX_SAMPLER, "kfifo_alloc failed");
goto error;
}
mx_dev->service = scsc_mx_service_open(mx_dev->mx, SCSC_SERVICE_ID_DBG_SAMPLER, &mx_dev->mx_client, &ret);
if (!mx_dev->service) {
SCSC_TAG_ERR(MX_SAMPLER, "Error opening service is NULL\n");
kfifo_free(&mx_dev->fifo);
ret = -EIO;
goto error;
}
/* Allocate resources */
ret = mx_dbg_sampler_allocate_resources(mx_dev->service, mx_dev);
if (ret) {
SCSC_TAG_ERR(MX_SAMPLER, "Error Allocating resources\n");
kfifo_free(&mx_dev->fifo);
scsc_mx_service_close(mx_dev->service);
goto error;
}
ret = scsc_mx_service_start(mx_dev->service, mx_dev->ref);
if (ret) {
SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_start failed\n");
mx_dbg_sampler_free_resources(mx_dev->service, mx_dev);
kfifo_free(&mx_dev->fifo);
scsc_mx_service_close(mx_dev->service);
goto error;
}
/* WARNING: At this point we may be receiving interrupts from Maxwell */
/* Trigger the dummy thread to test the functionality */
if (self_test)
mx_dbg_sampler_task = kthread_run(mx_dbg_sampler_thread, (void *)mx_dev, "mx_dbg_sampler_thread");
SCSC_TAG_INFO(MX_SAMPLER, "%s: Sampling....\n", DRV_NAME);
mutex_unlock(&mx_dev->mutex);
return 0;
error:
filp->private_data = NULL;
mx_dev->filp = NULL;
mx_dev->service = NULL;
end:
mutex_unlock(&mx_dev->mutex);
return ret;
}
static ssize_t mx_dbg_sampler_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
{
unsigned int copied;
int ret = 0;
struct mx_dbg_sampler_dev *mx_dev;
mx_dev = filp->private_data;
if (mutex_lock_interruptible(&mx_dev->mutex))
return -EINTR;
/* Check whether the device is in error */
if (mx_dev->error != NO_ERROR) {
SCSC_TAG_ERR(MX_SAMPLER, "Device in error\n");
ret = -EIO;
goto end;
}
while (len) {
if (kfifo_len(&mx_dev->fifo)) {
ret = kfifo_to_user(&mx_dev->fifo, buf, len, &copied);
if (!ret)
ret = copied;
break;
}
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
ret = wait_event_interruptible(mx_dev->read_wait,
!kfifo_is_empty(&mx_dev->fifo));
if (ret < 0)
break;
}
end:
mutex_unlock(&mx_dev->mutex);
return ret;
}
static unsigned mx_dbg_sampler_poll(struct file *filp, poll_table *wait)
{
struct mx_dbg_sampler_dev *mx_dev;
int ret;
mx_dev = filp->private_data;
if (mutex_lock_interruptible(&mx_dev->mutex))
return -EINTR;
if (mx_dev->error != NO_ERROR) {
ret = POLLERR;
goto end;
}
poll_wait(filp, &mx_dev->read_wait, wait);
if (!kfifo_is_empty(&mx_dev->fifo)) {
ret = POLLIN | POLLRDNORM; /* readeable */
goto end;
}
ret = POLLOUT | POLLWRNORM; /* writable */
end:
mutex_unlock(&mx_dev->mutex);
return ret;
}
int mx_dbg_sampler_release(struct inode *inode, struct file *filp)
{
struct mx_dbg_sampler_dev *mx_dev;
unsigned long flags;
int r;
mx_dev = container_of(inode->i_cdev, struct mx_dbg_sampler_dev, cdev);
if (mutex_lock_interruptible(&mx_dev->mutex))
return -EINTR;
if (mx_dev->filp == NULL) {
SCSC_TAG_ERR(MX_SAMPLER, "Device already closed\n");
mutex_unlock(&mx_dev->mutex);
return -EIO;
}
if (mx_dev != filp->private_data) {
SCSC_TAG_ERR(MX_SAMPLER, "Data mismatch\n");
mutex_unlock(&mx_dev->mutex);
return -EIO;
}
spin_lock_irqsave(&mx_dev->spinlock, flags);
filp->private_data = NULL;
mx_dev->filp = NULL;
kfifo_free(&mx_dev->fifo);
spin_unlock_irqrestore(&mx_dev->spinlock, flags);
if (mx_dev->service) {
r = scsc_mx_service_stop(mx_dev->service);
if (r)
SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_stop failed err: %d\n", r);
mx_dbg_sampler_free_resources(mx_dev->service, mx_dev);
scsc_mx_service_close(mx_dev->service);
spin_lock_irqsave(&mx_dev->spinlock, flags);
mx_dev->service = NULL;
spin_unlock_irqrestore(&mx_dev->spinlock, flags);
}
mutex_unlock(&mx_dev->mutex);
SCSC_TAG_INFO(MX_SAMPLER, "%s: Sampling... end. Kfifo_max = %d\n", DRV_NAME, mx_dev->kfifo_max);
return 0;
}
static const struct file_operations mx_dbg_sampler_fops = {
.owner = THIS_MODULE,
.open = mx_dbg_sampler_open,
.read = mx_dbg_sampler_read,
.release = mx_dbg_sampler_release,
.poll = mx_dbg_sampler_poll,
};
void mx_dbg_sampler_probe(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
{
dev_t devn;
int ret, i = SCSC_MX_DEBUG_INTERFACES;
char dev_name[20];
long uid = 0;
int minor;
struct mx_dbg_sampler_dev *mx_dev;
if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery_in_progress) {
SCSC_TAG_INFO(MX_SAMPLER, "Recovery remove - no recovery in progress\n");
return;
}
if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && recovery_in_progress) {
SCSC_TAG_INFO(MX_SAMPLER, "Recovery probe\n");
while (i--)
if (mx_dbg_sampler.devs[i].cdev.dev && mx_dbg_sampler.devs[i].mx) {
mx_dev = &mx_dbg_sampler.devs[i];
/* This should be never be true - as knod should prevent unloading while
* the service (device node) is open */
mx_dev->service = scsc_mx_service_open(mx_dev->mx, SCSC_SERVICE_ID_DBG_SAMPLER, &mx_dev->mx_client, &ret);
if (!mx_dev->service) {
SCSC_TAG_ERR(MX_SAMPLER, "Error opening service is NULL\n");
} else {
ret = scsc_mx_service_start(mx_dev->service, mx_dev->ref);
if (ret) {
SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_start failed\n");
mx_dbg_sampler_free_resources(mx_dev->service, mx_dev);
scsc_mx_service_close(mx_dev->service);
}
}
}
recovery_in_progress = 0;
} else {
/* Search for free minors */
minor = find_first_zero_bit(bitmap_dbg_sampler_minor, SCSC_MX_DEBUG_INTERFACES);
if (minor >= SCSC_MX_DEBUG_INTERFACES) {
SCSC_TAG_ERR(MX_SAMPLER, "minor %d > SCSC_TTY_MINORS\n", minor);
return;
}
#if 0
/* TODO GET UID */
if (kstrtol(dev_uid, 10, &uid)) {
SCSC_TAG_ERR(MX_SAMPLER, "Invalid device uid default to zero\n");
uid = 0;
}
#endif
devn = MKDEV(MAJOR(mx_dbg_sampler.device), MINOR(minor));
snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "mx", (int)uid, "debug_sampler");
cdev_init(&mx_dbg_sampler.devs[minor].cdev, &mx_dbg_sampler_fops);
mx_dbg_sampler.devs[minor].cdev.owner = THIS_MODULE;
mx_dbg_sampler.devs[minor].cdev.ops = &mx_dbg_sampler_fops;
ret = cdev_add(&mx_dbg_sampler.devs[minor].cdev, devn, 1);
if (ret) {
mx_dbg_sampler.devs[minor].cdev.dev = 0;
mx_dbg_sampler.devs[minor].dev = NULL;
return;
}
mx_dbg_sampler.devs[minor].dev = device_create(mx_dbg_sampler.class_mx_dbg_sampler, NULL, mx_dbg_sampler.devs[minor].cdev.dev, NULL, dev_name);
if (mx_dbg_sampler.devs[minor].dev == NULL) {
SCSC_TAG_ERR(MX_SAMPLER, "dev is NULL\n");
cdev_del(&mx_dbg_sampler.devs[minor].cdev);
return;
}
mx_dbg_sampler.devs[minor].mx = mx;
mx_dbg_sampler.devs[minor].mx_client.stop_on_failure = mx_dbg_sampler_stop_on_failure;
mx_dbg_sampler.devs[minor].mx_client.failure_reset = mx_dbg_sampler_failure_reset;
mutex_init(&mx_dbg_sampler.devs[minor].mutex);
spin_lock_init(&mx_dbg_sampler.devs[minor].spinlock);
mx_dbg_sampler.devs[minor].kfifo_max = 0;
init_waitqueue_head(&mx_dbg_sampler.devs[minor].read_wait);
/* Update bit mask */
set_bit(minor, bitmap_dbg_sampler_minor);
SCSC_TAG_INFO(MX_SAMPLER, "%s: Ready to start sampling....\n", DRV_NAME);
}
}
void mx_dbg_sampler_remove(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
{
int i = SCSC_MX_DEBUG_INTERFACES, r;
struct mx_dbg_sampler_dev *mx_dev;
if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery_in_progress) {
SCSC_TAG_INFO(MX_SAMPLER, "Recovery remove - no recovery in progress\n");
return;
}
if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && recovery_in_progress) {
SCSC_TAG_INFO(MX_SAMPLER, "Recovery remove\n");
while (i--)
if (mx_dbg_sampler.devs[i].cdev.dev && mx_dbg_sampler.devs[i].mx) {
mx_dev = &mx_dbg_sampler.devs[i];
/* This should be never be true - as knod should prevent unloading while
* the service (device node) is open */
if (mx_dbg_sampler.devs[i].service) {
r = scsc_mx_service_stop(mx_dev->service);
if (r)
SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_stop failed err: %d\n", r);
scsc_mx_service_close(mx_dev->service);
}
}
} else {
while (i--)
if (mx_dbg_sampler.devs[i].mx == mx) {
device_destroy(mx_dbg_sampler.class_mx_dbg_sampler, mx_dbg_sampler.devs[i].cdev.dev);
cdev_del(&mx_dbg_sampler.devs[i].cdev);
memset(&mx_dbg_sampler.devs[i].cdev, 0, sizeof(struct cdev));
mx_dbg_sampler.devs[i].mx = NULL;
clear_bit(i, bitmap_dbg_sampler_minor);
}
}
}
/* Test client driver registration */
struct scsc_mx_module_client mx_dbg_sampler_driver = {
.name = "MX client test driver",
.probe = mx_dbg_sampler_probe,
.remove = mx_dbg_sampler_remove,
};
/* Test client driver registration */
static int __init mx_dbg_sampler_init(void)
{
int ret;
SCSC_TAG_INFO(MX_SAMPLER, "mx_dbg_sampler INIT; version: %d.%d\n", VER_MAJOR, VER_MINOR);
ret = alloc_chrdev_region(&mx_dbg_sampler.device, 0, SCSC_MX_DEBUG_INTERFACES, "mx_dbg_sampler_char");
if (ret)
goto error;
mx_dbg_sampler.class_mx_dbg_sampler = class_create(THIS_MODULE, DEVICE_NAME);
if (IS_ERR(mx_dbg_sampler.class_mx_dbg_sampler)) {
SCSC_TAG_ERR(MX_SAMPLER, "mx_dbg_sampler class creation failed\n");
ret = PTR_ERR(mx_dbg_sampler.class_mx_dbg_sampler);
goto error_class;
}
ret = scsc_mx_module_register_client_module(&mx_dbg_sampler_driver);
if (ret) {
SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_module_register_client_module failed: ret=%d\n", ret);
goto error_reg;
}
return 0;
error_reg:
class_destroy(mx_dbg_sampler.class_mx_dbg_sampler);
error_class:
unregister_chrdev_region(mx_dbg_sampler.device, SCSC_MX_DEBUG_INTERFACES);
error:
return ret;
}
/* module level */
static void __exit mx_dbg_sampler_unload(void)
{
int i = SCSC_MX_DEBUG_INTERFACES;
unsigned long flags;
struct mx_dbg_sampler_dev *mx_dev;
int r;
while (i--)
if (mx_dbg_sampler.devs[i].cdev.dev && mx_dbg_sampler.devs[i].mx) {
mx_dev = &mx_dbg_sampler.devs[i];
/* This should be never be true - as knod should prevent unloading while
* the service (device node) is open */
if (mx_dbg_sampler.devs[i].service) {
r = scsc_mx_service_stop(mx_dev->service);
if (r)
SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_stop failed err: %d\n", r);
mx_dbg_sampler_free_resources(mx_dev->service, mx_dev);
scsc_mx_service_close(mx_dev->service);
spin_lock_irqsave(&mx_dbg_sampler.devs[i].spinlock, flags);
mx_dbg_sampler.devs[i].filp = NULL;
kfifo_free(&mx_dbg_sampler.devs[i].fifo);
mx_dbg_sampler.devs[i].service = NULL;
spin_unlock_irqrestore(&mx_dev->spinlock, flags);
}
device_destroy(mx_dbg_sampler.class_mx_dbg_sampler, mx_dbg_sampler.devs[i].cdev.dev);
cdev_del(&mx_dbg_sampler.devs[i].cdev);
memset(&mx_dbg_sampler.devs[i].cdev, 0, sizeof(struct cdev));
mx_dbg_sampler.devs[i].mx = NULL;
clear_bit(i, bitmap_dbg_sampler_minor);
}
class_destroy(mx_dbg_sampler.class_mx_dbg_sampler);
unregister_chrdev_region(mx_dbg_sampler.device, SCSC_MX_DEBUG_INTERFACES);
SCSC_TAG_INFO(MX_SAMPLER, "mx_dbg_sampler EXIT; version: %d.%d\n", VER_MAJOR, VER_MINOR);
}
module_init(mx_dbg_sampler_init);
module_exit(mx_dbg_sampler_unload);
MODULE_DESCRIPTION("Samsung debug sampler Driver");
MODULE_AUTHOR("SLSI");
MODULE_LICENSE("GPL and additional rights");
/*
* MODULE_INFO(version, VER_MAJOR);
* MODULE_INFO(build, SLSI_BUILD_STRING);
* MODULE_INFO(release, SLSI_RELEASE_STRING);
*/

View file

@ -0,0 +1,114 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __MX_DBG_SAMPLER_H__
#define __MX_DBG_SAMPLER_H__
/**
* Debug Sampler DRAM Buffer descriptor.
*
* Initialised by Debug Sampler Driver on AP and passed by
* reference to Debug Sampler (Proxy) on R4 (by reference in
* WLAN config).
*
* Integer fields are LittleEndian.
*/
struct debug_sampler_buffer_info {
/**
* Offset of circular octet buffer w.r.t. shared dram start
*/
uint32_t buf_offset;
/**
* Circular buffer length (octets, 2^n)
*
* Default = 32KiB default
*/
uint32_t buf_len;
/**
* Offset of 32bit write index (not wrapped, counts octets) w.r.t. shared dram start
*/
uint32_t write_index_offset;
/**
* To AP interrupt number (0 15)
*/
uint32_t intr_num;
};
struct debug_sampler_sample_spec {
/**
* -relative address of Location to sample (usually a register)
*
* Default = 0x00000000
*/
uint32_t source_addr;
/**
* Number of significant octets (1,2 or 4) to log (lsbytes from source)
*
* Default = 4
*/
uint32_t num_bytes;
/**
* Sampling period.
*
* 0 means as fast as possible (powers of 2 only)
*
* Default = 0
*/
uint32_t period_usecs;
};
/**
* Debug Sampler Config Structure.
*
* This structure is allocated and initialised by the Debug Sampler driver
* on the AP and passed via the service_start message.
*/
struct debug_sampler_config {
/**
* Config Structure Version (= DBGSAMPLER_CONFIG_VERSION)
*
* Set by driver, checked by service.
*/
uint32_t version;
/**
* To-host circular buffer desciptor.
*/
struct debug_sampler_buffer_info buffer_info;
/**
* Init/default sampling specification.
*
* (There is also an API on R4 to allow dynamic specification
* change - e.g. by WLAN service)
*/
struct debug_sampler_sample_spec sample_spec;
/**
* Start/stop sampling when service is started/stopped?
*
* (There is also an API on R4 to allow dynamic start/stop
* - e.g. by WLAN service)
*
* Default = 0
*/
uint32_t auto_start;
};
struct debug_sampler_align {
struct debug_sampler_config config __aligned(4);
u32 index;
void *mem __aligned(64);
};
#endif /* __MX_DBG_SAMPLER_H__ */

View file

@ -0,0 +1,493 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/kfifo.h>
#include <linux/poll.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/kdev_t.h>
#include <asm/page.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <scsc/scsc_logring.h>
#include <scsc/scsc_mx.h>
#include "scsc_mif_abs.h"
#include "mxman.h"
#include "scsc_mx_impl.h"
#include "gdb_transport.h"
#define DRV_NAME "mx_mmap"
#define DEVICE_NAME "maxwell_mmap"
#ifndef VM_RESERVED
#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
#endif
#define VER_MAJOR 0
#define VER_MINOR 0
#define SCSC_MMAP_NODE 1
#define SCSC_GDB_NODE 1
#define SCSC_MAX_INTERFACES (5 * (SCSC_MMAP_NODE + SCSC_GDB_NODE))
#define MAX_MEMORY 4194304UL /* 4M maximum memory */
DECLARE_BITMAP(bitmap_minor, SCSC_MAX_INTERFACES);
/* keep track of how many times it is mmapped */
struct mmap_info {
char *data; /* the data */
int reference; /* how many times it is mmapped */
};
struct mx_mmap_dev {
/* file pointer */
struct file *filp;
/* char device */
struct cdev cdev;
/*device pointer*/
struct device *dev;
/*mif_abs pointer*/
struct scsc_mif_abs *mif_abs;
/*mif_abs pointer*/
struct scsc_mx *mx;
/*mif_abs pointer*/
struct gdb_transport *gdb_transport;
/*memory cache*/
void *mem;
/* Associated kfifo */
struct kfifo fifo;
/* Associated read_wait queue.*/
wait_queue_head_t read_wait;
};
/**
* SCSC User Space mmap interface (singleton)
*/
static struct {
dev_t device;
struct class *class_mx_mmap;
struct mx_mmap_dev devs[SCSC_MAX_INTERFACES]; /*MMAP NODE + GDB NODE*/
} mx_mmap;
int mx_mmap_open(struct inode *inode, struct file *filp)
{
struct mx_mmap_dev *dev;
dev = container_of(inode->i_cdev, struct mx_mmap_dev, cdev);
filp->private_data = dev;
return 0;
}
/*
* This function maps the contiguous device mapped area
* to user space. This is specfic to device which is called though fd.
* */
int mx_mmap_mmap(struct file *filp, struct vm_area_struct *vma)
{
int err;
struct mx_mmap_dev *mx_dev;
uintptr_t pfn = 0;
if (vma->vm_end - vma->vm_start > MAX_MEMORY) {
SCSC_TAG_ERR(MX_MMAP, "Incorrect mapping size %ld, should be less than %ld\n",
vma->vm_end - vma->vm_start, MAX_MEMORY);
err = -EINVAL;
}
mx_dev = filp->private_data;
/* Get the memory */
mx_dev->mem = mx_dev->mif_abs->get_mifram_ptr(mx_dev->mif_abs, 0);
if (!mx_dev->mem)
return -ENODEV;
/* Get page frame number from virtual abstraction layer */
pfn = mx_dev->mif_abs->get_mifram_pfn(mx_dev->mif_abs);
/* remap kernel memory to userspace */
err = remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
return err;
}
int mx_mmap_release(struct inode *inode, struct file *filp)
{
/* TODO : Unmap pfn_range */
return 0;
}
static const struct file_operations mx_mmap_fops = {
.owner = THIS_MODULE,
.open = mx_mmap_open,
.mmap = mx_mmap_mmap,
.release = mx_mmap_release,
};
int mx_gdb_open(struct inode *inode, struct file *filp)
{
struct mx_mmap_dev *mx_dev;
int ret;
mx_dev = container_of(inode->i_cdev, struct mx_mmap_dev, cdev);
filp->private_data = mx_dev;
mx_dev->filp = filp;
ret = kfifo_alloc(&mx_dev->fifo, GDB_TRANSPORT_BUF_LENGTH, GFP_KERNEL);
if (ret)
return -ENOMEM;
return 0;
}
static ssize_t mx_gdb_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
{
struct mx_mmap_dev *mx_dev;
mx_dev = filp->private_data;
gdb_transport_send(mx_dev->gdb_transport, (void *)buf, len);
return len;
}
static ssize_t mx_gdb_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
{
int ret = 0;
unsigned int copied;
struct mx_mmap_dev *mx_dev;
mx_dev = filp->private_data;
while (len) {
if (kfifo_len(&mx_dev->fifo)) {
ret = kfifo_to_user(&mx_dev->fifo, buf, len, &copied);
if (!ret)
ret = copied;
break;
}
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
ret = wait_event_interruptible(mx_dev->read_wait,
!kfifo_is_empty(&mx_dev->fifo));
if (ret < 0)
break;
}
return ret;
}
void gdb_read_callback(const void *message, size_t length, void *data)
{
struct mx_mmap_dev *mx_dev = (struct mx_mmap_dev *)data;
int ret;
if (mx_dev->filp) {
if (kfifo_avail(&mx_dev->fifo) >= length) {
ret = kfifo_in(&mx_dev->fifo, message, length);
/*SCSC_TAG_DEBUG(MX_MMAP, "num bytes pushed ret %d of %zu\n", ret, length);*/
if (ret != length) {
SCSC_TAG_ERR(MX_MMAP, "Unable to push into Kfifo Buffer");
return;
}
} else {
SCSC_TAG_ERR(MX_MMAP, "Kfifo Buffer Overflow");
return;
}
wake_up_interruptible(&mx_dev->read_wait);
} else
SCSC_TAG_ERR(MX_MMAP, "Device is closed. Dropping %zu octets",
length);
}
static unsigned mx_gdb_poll(struct file *filp, poll_table *wait)
{
struct mx_mmap_dev *mx_dev;
mx_dev = filp->private_data;
poll_wait(filp, &mx_dev->read_wait, wait);
if (!kfifo_is_empty(&mx_dev->fifo))
return POLLIN | POLLRDNORM; /* readeable */
return POLLOUT | POLLWRNORM; /* writable */
}
int mx_gdb_release(struct inode *inode, struct file *filp)
{
struct mx_mmap_dev *mx_dev;
mx_dev = container_of(inode->i_cdev, struct mx_mmap_dev, cdev);
if (mx_dev->filp == NULL) {
SCSC_TAG_ERR(MX_MMAP, "Device already closed\n");
return -EIO;
}
if (mx_dev != filp->private_data) {
SCSC_TAG_ERR(MX_MMAP, "Data mismatch\n");
return -EIO;
}
filp->private_data = NULL;
mx_dev->filp = NULL;
kfifo_free(&mx_dev->fifo);
return 0;
}
static const struct file_operations mx_gdb_fops = {
.owner = THIS_MODULE,
.open = mx_gdb_open,
.write = mx_gdb_write,
.read = mx_gdb_read,
.release = mx_gdb_release,
.poll = mx_gdb_poll,
};
/*
* Receive handler for messages from the FW along the maxwell management transport
*/
void client_gdb_probe(struct gdb_transport_client *gdb_client, struct gdb_transport *gdb_transport, char *dev_uid)
{
dev_t devn;
int ret;
char dev_name[20];
struct mx_mmap_dev *mx_dev;
long uid = 0;
int minor;
/************/
/* GDB node */
/************/
/* Search for free minors */
minor = find_first_zero_bit(bitmap_minor, SCSC_MAX_INTERFACES);
if (minor >= SCSC_MAX_INTERFACES) {
SCSC_TAG_ERR(MX_MMAP, "minor %d > SCSC_TTY_MINORS\n", minor);
return;
}
if (kstrtol(dev_uid, 10, &uid)) {
SCSC_TAG_ERR(MX_MMAP, "Invalid device uid default to zero\n");
uid = 0;
}
devn = MKDEV(MAJOR(mx_mmap.device), MINOR(minor));
if (gdb_transport->type == GDB_TRANSPORT_M4)
snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "mx", (int)uid, "m4_gdb");
else
snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "mx", (int)uid, "r4_gdb");
cdev_init(&mx_mmap.devs[minor].cdev, &mx_gdb_fops);
mx_mmap.devs[minor].cdev.owner = THIS_MODULE;
mx_mmap.devs[minor].cdev.ops = &mx_gdb_fops;
ret = cdev_add(&mx_mmap.devs[minor].cdev, devn, 1);
if (ret) {
SCSC_TAG_ERR(MX_MMAP, "cdev_add failed for device %s\n", dev_name);
mx_mmap.devs[minor].cdev.dev = 0;
mx_mmap.devs[minor].dev = NULL;
return;
}
mx_mmap.devs[minor].dev = device_create(mx_mmap.class_mx_mmap, NULL, mx_mmap.devs[minor].cdev.dev, NULL, dev_name);
if (mx_mmap.devs[minor].dev == NULL) {
cdev_del(&mx_mmap.devs[minor].cdev);
return;
}
mx_dev = &mx_mmap.devs[minor];
mx_mmap.devs[minor].gdb_transport = gdb_transport;
gdb_transport_register_channel_handler(gdb_transport, gdb_read_callback, (void *)mx_dev);
init_waitqueue_head(&mx_mmap.devs[minor].read_wait);
/* Update bit mask */
set_bit(minor, bitmap_minor);
}
void client_gdb_remove(struct gdb_transport_client *gdb_client, struct gdb_transport *gdb_transport)
{
int i = SCSC_MAX_INTERFACES;
while (i--)
if (mx_mmap.devs[i].gdb_transport == gdb_transport) {
device_destroy(mx_mmap.class_mx_mmap, mx_mmap.devs[i].cdev.dev);
cdev_del(&mx_mmap.devs[i].cdev);
memset(&mx_mmap.devs[i].cdev, 0, sizeof(struct cdev));
mx_mmap.devs[i].gdb_transport = NULL;
clear_bit(i, bitmap_minor);
}
}
/* Test client driver registration */
struct gdb_transport_client client_gdb_driver = {
.name = "GDB client driver",
.probe = client_gdb_probe,
.remove = client_gdb_remove,
};
void scsc_mx_mmap_module_probe(struct scsc_mif_mmap_driver *abs_driver, struct scsc_mif_abs *mif_abs)
{
dev_t devn;
int ret;
char dev_name[20];
char *dev_uid;
long uid = 0;
int minor = 0;
/* Search for free minors */
minor = find_first_zero_bit(bitmap_minor, SCSC_MAX_INTERFACES);
if (minor >= SCSC_MAX_INTERFACES) {
SCSC_TAG_ERR(MX_MMAP, "minor %d > SCSC_TTY_MINORS\n", minor);
return;
}
/*************/
/* MMAP node */
/*************/
dev_uid = mif_abs->get_uid(mif_abs);
if (kstrtol(dev_uid, 10, &uid))
uid = 0;
devn = MKDEV(MAJOR(mx_mmap.device), MINOR(minor));
snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "mx", (int)uid, "mmap");
cdev_init(&mx_mmap.devs[minor].cdev, &mx_mmap_fops);
mx_mmap.devs[minor].cdev.owner = THIS_MODULE;
mx_mmap.devs[minor].cdev.ops = &mx_mmap_fops;
ret = cdev_add(&mx_mmap.devs[minor].cdev, devn, 1);
if (ret) {
SCSC_TAG_ERR(MX_MMAP, "cdev_add failed for device %s\n", dev_name);
mx_mmap.devs[minor].cdev.dev = 0;
mx_mmap.devs[minor].dev = NULL;
return;
}
mx_mmap.devs[minor].dev = device_create(mx_mmap.class_mx_mmap, NULL, mx_mmap.devs[minor].cdev.dev, NULL, dev_name);
if (mx_mmap.devs[minor].dev == NULL) {
cdev_del(&mx_mmap.devs[minor].cdev);
return;
}
mx_mmap.devs[minor].mif_abs = mif_abs;
mx_mmap.devs[minor].mem = mif_abs->get_mifram_ptr(mif_abs, 0);
/* Update bit mask */
set_bit(minor, bitmap_minor);
}
void scsc_mx_mmap_module_remove(struct scsc_mif_abs *mif_abs)
{
int i = SCSC_MAX_INTERFACES;
while (i--)
if (mx_mmap.devs[i].mif_abs == mif_abs) {
device_destroy(mx_mmap.class_mx_mmap, mx_mmap.devs[i].cdev.dev);
cdev_del(&mx_mmap.devs[i].cdev);
memset(&mx_mmap.devs[i].cdev, 0, sizeof(struct cdev));
mx_mmap.devs[i].mif_abs = NULL;
clear_bit(i, bitmap_minor);
}
}
static struct scsc_mif_mmap_driver mx_module_mmap_if = {
.name = "Maxwell mmap Driver",
.probe = scsc_mx_mmap_module_probe,
.remove = scsc_mx_mmap_module_remove,
};
static int __init mx_mmap_init(void)
{
int ret;
SCSC_TAG_INFO(MX_MMAP, "mx_mmap INIT; version: %d.%d\n", VER_MAJOR, VER_MINOR);
ret = alloc_chrdev_region(&mx_mmap.device, 0, SCSC_MAX_INTERFACES, "mx_mmap_char");
if (ret)
goto error;
mx_mmap.class_mx_mmap = class_create(THIS_MODULE, DEVICE_NAME);
if (IS_ERR(mx_mmap.class_mx_mmap)) {
ret = PTR_ERR(mx_mmap.class_mx_mmap);
goto error_class;
}
scsc_mif_mmap_register(&mx_module_mmap_if);
ret = gdb_transport_register_client(&client_gdb_driver);
if (ret)
SCSC_TAG_ERR(MX_MMAP, "scsc_mx_module_register_client_module failed: r=%d\n", ret);
return 0;
error_class:
unregister_chrdev_region(mx_mmap.device, SCSC_MAX_INTERFACES);
error:
return ret;
}
static void __exit mx_mmap_cleanup(void)
{
int i = SCSC_MAX_INTERFACES;
while (i--)
if (mx_mmap.devs[i].cdev.dev) {
device_destroy(mx_mmap.class_mx_mmap, mx_mmap.devs[i].cdev.dev);
cdev_del(&mx_mmap.devs[i].cdev);
memset(&mx_mmap.devs[i].cdev, 0, sizeof(struct cdev));
clear_bit(i, bitmap_minor);
}
class_destroy(mx_mmap.class_mx_mmap);
unregister_chrdev_region(mx_mmap.device, SCSC_MAX_INTERFACES);
SCSC_TAG_INFO(MX_MMAP, "mx_mmap EXIT; version: %d.%d\n", VER_MAJOR, VER_MINOR);
gdb_transport_unregister_client(&client_gdb_driver);
/* Notify lower layers that we are unloading */
scsc_mif_mmap_unregister(&mx_module_mmap_if);
}
module_init(mx_mmap_init);
module_exit(mx_mmap_cleanup);
MODULE_DESCRIPTION("Samsung MMAP/GDB Driver");
MODULE_AUTHOR("SLSI");
MODULE_LICENSE("GPL and additional rights");
/*
* MODULE_INFO(version, VER_MAJOR);
* MODULE_INFO(build, SLSI_BUILD_STRING);
* MODULE_INFO(release, SLSI_RELEASE_STRING);
*/

View file

@ -0,0 +1,145 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/**
* mx140 Infrastructure Configuration Structure.
*
* Used to pass configuration data from AP to R4 infrastructure
* on Maxwell Subsystem startup.
*
* Notes:
*
* - All multi-octet integers shall be stored LittleEndian.
*
* - All location fields ("*_loc") are 32 bit octet offsets w.r.t. the R4
* address map. They can therefore refer to DRAM memory or Mailbox registers.
*
* - "typedefs" are avoided to allow inclusion in linux source code.
*/
#ifndef MXCONF_H__
#define MXCONF_H__
/* Uses */
/* Definitions */
/**
* Config structure magic number.
*
* The AP writes this value and the R4 checks it to trap endian mismatches.
*/
#define MXCONF_MAGIC 0x79828486
/**
* Config structure version
*
* The AP writes these values and the R4 checks them to trap config structure
* mismatches.
*/
#define MXCONF_VERSION_MAJOR 0
#define MXCONF_VERSION_MINOR 1
/* Macros */
#define GCC_PACKED __attribute__((packed))
/* Types */
/**
* Maxwell Circular Packet Buffer Configuration.
*/
GCC_PACKED struct mxcbufconf {
scsc_mifram_ref buffer_loc; /**< Location of allocated buffer in DRAM */
uint32_t num_packets; /**< Total number of packets that can be stored in the buffer */
uint32_t packet_size; /**< Size of each individual packet within the buffer */
scsc_mifram_ref read_index_loc; /**< Location of 32bit read index in DRAM or Mailbox */
scsc_mifram_ref write_index_loc; /**< Location of 32bit write index */
};
/**
* Maxwell Management Simplex Stream Configuration
*
* A circular buffer plus a pair of R/W signaling bits.
*/
GCC_PACKED struct mxstreamconf {
/** Circular Packet Buffer configuration */
struct mxcbufconf buf_conf;
/** Allocated MIF Interrupt Read Bit Index */
uint8_t read_bit_idx;
/** Allocated MIF Interrupt Write Bit Index */
uint8_t write_bit_idx;
};
/**
* Maxwell Management Transport Configuration
*
* A pair of simplex streams.
*/
GCC_PACKED struct mxtransconf {
struct mxstreamconf to_ap_stream_conf;
struct mxstreamconf from_ap_stream_conf;
};
/**
* Maxwell Infrastructure Configuration Version
*/
GCC_PACKED struct mxconfversion {
uint16_t major;
uint16_t minor;
};
/**
* Mxlog Event Buffer Configuration.
*
* A circular buffer. Size must be a multiple of 2.
*/
GCC_PACKED struct mxlogconf
{
struct mxstreamconf stream_conf;
};
/**
* Maxwell Infrastructure Configuration
*/
GCC_PACKED struct mxconf {
/**
* Config Magic Number
*
* Always 1st field in config.
*/
uint32_t magic;
/**
* Config Version.
*
* Always second field in config.
*/
struct mxconfversion version;
/**
* MX Management Message Transport Configuration.
*/
struct mxtransconf mx_trans_conf;
/**
* MX Management GDB Message Transport Configuration.
*/
/* Cortex-R4 channel */
struct mxtransconf mx_trans_conf_gdb_r4;
/* Cortex-M4 channel */
struct mxtransconf mx_trans_conf_gdb_m4;
/**
* Mxlog Event Buffer Configuration.
*/
struct mxlogconf mxlogconf;
};
#endif /* MXCONF_H__ */

View file

@ -0,0 +1,304 @@
/*****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <scsc/scsc_logring.h>
#include <scsc/scsc_mx.h>
#include "scsc_mx_impl.h"
#include "mxmgmt_transport.h"
#include "mxlog_transport.h"
#include "mxlog.h"
/*
* Receive handler for messages from the FW along the maxwell management transport
*/
static inline void mxlog_phase4_message_handler(const void *message,
size_t length, u32 level,
void *data)
{
unsigned char *buf = (unsigned char *)message;
SCSC_TAG_LVL(MX_FW, level, "%d: %s\n", (int)length, buf);
}
/**
* This function is used to parse a NULL terminated format string
* and report on the provided output bitmaps smap/lmap which args
* are 'long' and which are signedi...as in %ld.
*/
static inline void build_len_sign_maps(char *fmt, u32 *smap, u32 *lmap)
{
u32 p = 0;
char *s = fmt;
if (!s)
return;
for (; *s != '\0'; ++s) {
if (*s != '%')
continue;
if (*++s == 'l') {
*lmap |= (1 << p);
++s;
}
if (*s == 'd')
*smap |= (1 << p);
p++;
}
}
/**
* The binary protocol described at:
*
* http://wiki/Maxwell_common_firmware/Mxlog#Phase_5_:_string_decoded_on_the_host
*
* states that we'd receive the following record content on each mxlog
* message from FW, where:
*
* - each element is a 32bit word
* - 1st element is a record header
* - len = number of elements following the first element
*
* | 1st | 2nd | 3rd | 4th | 5th | 6th
* -----------------------------------------------------------
* | sync|lvl|len || tstamp || offset || arg1 || arg2 || arg3.
* -----------------------------------------------------------
* | e l o g m s g |
*
* BUT NOTE THAT: here we DO NOT receive 1st header element BUT
* instead we got:
* @message: pointer to 2nd element
* @length: in bytes of the message (so starting from 2nd element) and
* including tstamp and offset elements: we must calculate
* num_args accordingly.
* @level: the debug level already remapped from FW to Kernel namespace
*/
static inline void mxlog_phase5_message_handler(const void *message,
size_t length, u32 level,
void *data)
{
struct mxlog *mxlog = (struct mxlog *)data;
struct mxlog_event_log_msg *elogmsg =
(struct mxlog_event_log_msg *)message;
if (length < MINIMUM_MXLOG_MSG_LEN_BYTES)
return;
if (mxlog && elogmsg) {
int num_args = 0;
char spare[MAX_SPARE_FMT + TSTAMP_LEN] = {};
char *fmt = NULL;
size_t fmt_sz = 0;
u32 smap = 0, lmap = 0;
u32 *args = NULL;
/* Check OFFSET sanity... beware of FW guys :D ! */
if (elogmsg->offset >= mxlog->logstrings->size) {
SCSC_TAG_ERR(MX_FW,
"Received message OFFSET(%d) is OUT OF range(%zd)...skip..\n",
elogmsg->offset, mxlog->logstrings->size);
return;
}
args = (u32 *)(elogmsg + 1);
num_args =
(length - MINIMUM_MXLOG_MSG_LEN_BYTES) /
MXLOG_ELEMENT_SIZE;
fmt = (char *)(mxlog->logstrings->data + elogmsg->offset);
/* Avoid being fooled by a NON NULL-terminated strings too ! */
fmt_sz = strnlen(fmt,
mxlog->logstrings->size - elogmsg->offset);
if (fmt_sz >= MAX_SPARE_FMT - 1) {
SCSC_TAG_ERR(MX_FW,
"UNSUPPORTED message length %zd ... truncated.\n",
fmt_sz);
fmt_sz = MAX_SPARE_FMT - 2;
}
/* Pre-Process fmt string to be able to do proper casting */
if (num_args)
build_len_sign_maps(fmt, &smap, &lmap);
/* Add FW provided tstamp on front and proper \n at
* the end when needed
*/
snprintf(spare, MAX_SPARE_FMT + TSTAMP_LEN - 2, "%08X %s%c",
elogmsg->timestamp, fmt,
(fmt[fmt_sz] != '\n') ? '\n' : '\0');
fmt = spare;
switch (num_args) {
case 0:
SCSC_TAG_LVL(MX_FW, level, fmt);
break;
case 1:
SCSC_TAG_LVL(MX_FW, level, fmt,
MXLOG_CAST(args[0], 0, smap, lmap));
break;
case 2:
SCSC_TAG_LVL(MX_FW, level, fmt,
MXLOG_CAST(args[0], 0, smap, lmap),
MXLOG_CAST(args[1], 1, smap, lmap));
break;
case 3:
SCSC_TAG_LVL(MX_FW, level, fmt,
MXLOG_CAST(args[0], 0, smap, lmap),
MXLOG_CAST(args[1], 1, smap, lmap),
MXLOG_CAST(args[2], 2, smap, lmap));
break;
case 4:
SCSC_TAG_LVL(MX_FW, level, fmt,
MXLOG_CAST(args[0], 0, smap, lmap),
MXLOG_CAST(args[1], 1, smap, lmap),
MXLOG_CAST(args[2], 2, smap, lmap),
MXLOG_CAST(args[3], 3, smap, lmap));
break;
case 5:
SCSC_TAG_LVL(MX_FW, level, fmt,
MXLOG_CAST(args[0], 0, smap, lmap),
MXLOG_CAST(args[1], 1, smap, lmap),
MXLOG_CAST(args[2], 2, smap, lmap),
MXLOG_CAST(args[3], 3, smap, lmap),
MXLOG_CAST(args[4], 4, smap, lmap));
break;
case 6:
SCSC_TAG_LVL(MX_FW, level, fmt,
MXLOG_CAST(args[0], 0, smap, lmap),
MXLOG_CAST(args[1], 1, smap, lmap),
MXLOG_CAST(args[2], 2, smap, lmap),
MXLOG_CAST(args[3], 3, smap, lmap),
MXLOG_CAST(args[4], 4, smap, lmap),
MXLOG_CAST(args[5], 5, smap, lmap));
break;
case 7:
SCSC_TAG_LVL(MX_FW, level, fmt,
MXLOG_CAST(args[0], 0, smap, lmap),
MXLOG_CAST(args[1], 1, smap, lmap),
MXLOG_CAST(args[2], 2, smap, lmap),
MXLOG_CAST(args[3], 3, smap, lmap),
MXLOG_CAST(args[4], 4, smap, lmap),
MXLOG_CAST(args[5], 5, smap, lmap),
MXLOG_CAST(args[6], 6, smap, lmap));
break;
case 8:
default:
if (num_args > MAX_MX_LOG_ARGS)
SCSC_TAG_ERR(MX_FW,
"MXLOG: Too many args:%d ... print only first %d\n",
num_args, MAX_MX_LOG_ARGS);
SCSC_TAG_LVL(MX_FW, level, fmt,
MXLOG_CAST(args[0], 0, smap, lmap),
MXLOG_CAST(args[1], 1, smap, lmap),
MXLOG_CAST(args[2], 2, smap, lmap),
MXLOG_CAST(args[3], 3, smap, lmap),
MXLOG_CAST(args[4], 4, smap, lmap),
MXLOG_CAST(args[5], 5, smap, lmap),
MXLOG_CAST(args[6], 6, smap, lmap),
MXLOG_CAST(args[7], 7, smap, lmap));
break;
}
}
}
/* A generic message handler to multiplex between phases */
static void mxlog_message_handler(u8 phase, const void *message,
size_t length, u32 level, void *data)
{
struct mxlog *mxlog = (struct mxlog *)data;
if (!mxlog) {
SCSC_TAG_ERR(MX_FW, "Missing MXLOG reference.\n");
return;
}
switch (phase) {
case MX_LOG_PHASE_4:
mxlog_phase4_message_handler(message, length, level, data);
break;
case MX_LOG_PHASE_5:
if (mxlog->logstrings)
mxlog_phase5_message_handler(message, length,
level, data);
else
SCSC_TAG_ERR(MX_FW,
"Missing LogStrings...dropping incoming PHASE5 message !\n");
break;
default:
SCSC_TAG_ERR(MX_FW,
"MXLOG Unsupported phase %d ... dropping message !\n",
phase);
break;
}
}
static int mxlog_header_parser(u32 header, u8 *phase,
u8 *level, u32 *num_bytes)
{
u32 fw2kern_map[] = {
0, /* 0 MX_ERROR --> 0 KERN_EMERG .. it's panic.*/
4, /* 1 MX_WARN --> 4 KERN_WARNING */
5, /* 2 MX_MAJOR --> 5 KERN_NOTICE */
6, /* 3 MX_MINOR --> 6 KERN_INFO */
7, /* 4 MX_DETAIL --> 7 KERN_DEBUG */
};
u16 sync = ((header & 0xFFFF0000) >> 16);
switch (sync) {
case SYNC_VALUE_PHASE_4:
*phase = MX_LOG_PHASE_4;
/* len() field represent number of chars bytes */
*num_bytes = header & 0x000000FF;
break;
case SYNC_VALUE_PHASE_5:
*phase = MX_LOG_PHASE_5;
/* len() field represent number of 4 bytes words */
*num_bytes = (header & 0x000000FF) * 4;
break;
default:
return -1;
}
/* Remap FW debug levels to KERN debug levels domain */
*level = (header & 0x0000FF00) >> 8;
if (*level < ARRAY_SIZE(fw2kern_map)) {
*level = fw2kern_map[*level];
} else {
SCSC_TAG_ERR(MX_FW,
"UNKNOWN MX debug level %d ... marking as MX_DETAIL.\n",
*level);
*level = fw2kern_map[ARRAY_SIZE(fw2kern_map) - 1];
}
return 0;
}
void mxlog_init(struct mxlog *mxlog, struct scsc_mx *mx)
{
int ret = 0;
mxlog->mx = mx;
mxlog->index = 0;
mxlog->logstrings = NULL;
/* File is in f/w profile directory */
ret = mx140_file_request_debug_conf(mx,
(const struct firmware **)&mxlog->logstrings,
MX_LOG_LOGSTRINGS_PATH);
if (!ret && mxlog->logstrings)
SCSC_TAG_INFO(MX_FW, "Loaded %zd bytes of log-strings from %s\n",
mxlog->logstrings->size, MX_LOG_LOGSTRINGS_PATH);
else
SCSC_TAG_ERR(MX_FW, "Failed to read %s needed by MXlog Phase 5\n",
MX_LOG_LOGSTRINGS_PATH);
/* Registering a generic channel handler */
mxlog_transport_register_channel_handler(scsc_mx_get_mxlog_transport(mx),
&mxlog_header_parser,
&mxlog_message_handler, mxlog);
}
void mxlog_release(struct mxlog *mxlog)
{
mxlog_transport_register_channel_handler(scsc_mx_get_mxlog_transport(mxlog->mx),
NULL, NULL, NULL);
if (mxlog->logstrings)
mx140_release_file(mxlog->mx, mxlog->logstrings);
mxlog->logstrings = NULL;
}

View file

@ -0,0 +1,61 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef _MXLOG_H
#define _MXLOG_H
#include <linux/firmware.h>
#define MX_LOG_PHASE_4 4
#define MX_LOG_PHASE_5 5
#define SYNC_VALUE_PHASE_4 (0xA55A)
#define SYNC_VALUE_PHASE_5 (0x9669)
#define MXLOG_BUFFER_SIZE 512
#define MINIMUM_MXLOG_MSG_LEN_BYTES (sizeof(u32) * 2)
#define MXLOG_ELEMENT_SIZE (sizeof(u32))
#define MAX_SPARE_FMT 256
#define TSTAMP_LEN 9
#define MAX_MX_LOG_ARGS 8
#define MX_LOG_LOGSTRINGS_PATH "common/log-strings.bin" /* in f/w debug dir */
#define MXLOG_SEXT(x) (((x) & 0x80000000) ? ((x) | 0xffffffff00000000) : (x))
/**
* We must process MXLOG messages 32bit-args coming from FW that have
* a different fmt string interpretation in Kernel:
*
* FW KERN MXLOG_CAST
* ---------------------------------------------------------
* %d s16 s32 (s16)
* %u %x u16 u32 (u16)
* %ld s32 s64 (SIGN_EXT((s64)))
* %lu u32 u64 (u64)
*
*/
#define MXLOG_CAST(x, p, smap, lmap) \
(((smap) & 1 << (p)) ? \
(((lmap) & 1 << (p)) ? MXLOG_SEXT((s64)(x)) : (s16)(x)) : \
(((lmap) & 1 << (p)) ? (u64)(x) : (u16)(x)))
struct mxlog_event_log_msg {
u32 timestamp;
u32 offset;
} __packed;
struct mxlog;
void mxlog_init(struct mxlog *mxlog, struct scsc_mx *mx);
void mxlog_release(struct mxlog *mxlog);
struct mxlog {
struct scsc_mx *mx;
u8 buffer[MXLOG_BUFFER_SIZE];
u16 index;
struct firmware *logstrings;
};
#endif /* _MXLOG_H */

View file

@ -0,0 +1,280 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/** Uses */
#include <linux/module.h>
#include <linux/slab.h>
#include <scsc/scsc_logring.h>
#include "scsc_mif_abs.h"
#include "mifintrbit.h"
/** Implements */
#include "mxlog_transport.h"
#define MXLOG_TRANSPORT_BUF_LENGTH (4 * 1024)
#define MXLOG_TRANSPORT_PACKET_SIZE (4)
/* Flag that an error has occurred so the I/O thread processing should stop */
void mxlog_transport_set_error(struct mxlog_transport *mxlog_transport)
{
SCSC_TAG_WARNING(MXLOG_TRANS, "I/O thread processing is suspended\n");
mxlog_transport->mxlog_thread.block_thread = 1;
}
static void input_irq_handler(int irq, void *data)
{
struct mxlog_transport *mxlog_transport = (struct mxlog_transport *)data;
struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
struct scsc_mif_abs *mif_abs;
SCSC_TAG_DEBUG(MXLOG_TRANS, "mxlog intr\n");
/* Clear the interrupt first to ensure we can't possibly miss one */
mif_abs = scsc_mx_get_mif_abs(mxlog_transport->mx);
mif_abs->irq_bit_clear(mif_abs, irq);
/* The the other side wrote some data to the input stream, wake up the thread
* that deals with this. */
if (th->task == NULL) {
SCSC_TAG_ERR(MXLOG_TRANS, "mxlog_thread is NOT running\n");
return;
}
/*
* If an error has occured, we discard silently all messages from the stream
* until the error has been processed and the system has been reinitialised.
*/
if (th->block_thread == 1) {
SCSC_TAG_DEBUG(MXLOG_TRANS, "discard message.\n");
/*
* Do not try to acknowledge a pending interrupt here.
* This function is called by a function which in turn can be
* running in an atomic or 'disabled irq' level.
*/
return;
}
th->wakeup_flag = 1;
/* wake up I/O thread */
wake_up_interruptible(&th->wakeup_q);
}
static void thread_wait_until_stopped(struct mxlog_transport *mxlog_transport)
{
struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
/*
* kthread_stop() cannot handle the th exiting while
* kthread_should_stop() is false, so sleep until kthread_stop()
* wakes us up.
*/
SCSC_TAG_DEBUG(MXLOG_TRANS, "%s waiting for the stop signal.\n", th->name);
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop()) {
SCSC_TAG_DEBUG(MXLOG_TRANS, "%s schedule....\n", th->name);
schedule();
}
th->task = NULL;
SCSC_TAG_DEBUG(MXLOG_TRANS, "%s exiting....\n", th->name);
}
/**
* A thread that forwards messages sent across the transport to
* the registered handlers for each channel.
*/
static int mxlog_thread_function(void *arg)
{
struct mxlog_transport *mxlog_transport = (struct mxlog_transport *)arg;
struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
int ret;
u32 header;
char *buf = NULL;
size_t buf_sz = 4096;
complete(&th->completion);
th->block_thread = 0;
buf = kmalloc(buf_sz, GFP_KERNEL);
while (!kthread_should_stop()) {
/* wait until an error occurs, or we need to process something. */
ret = wait_event_interruptible(th->wakeup_q,
(th->wakeup_flag && !th->block_thread) ||
kthread_should_stop());
if (kthread_should_stop()) {
SCSC_TAG_DEBUG(MXLOG_TRANS, "signalled to exit\n");
break;
}
if (ret < 0) {
SCSC_TAG_DEBUG(MXLOG_TRANS,
"wait_event returned %d, thread will exit\n", ret);
thread_wait_until_stopped(mxlog_transport);
break;
}
th->wakeup_flag = 0;
SCSC_TAG_DEBUG(MXLOG_TRANS, "wokeup: r=%d\n", ret);
if (!mxlog_transport->header_handler_fn) {
/* HERE: Invalid header handler, raise fault or log message */
SCSC_TAG_WARNING(MXLOG_TRANS,
"mxlog_transport->header_handler_fn_==NULL\n");
kfree(buf);
return 0;
}
while (mif_stream_read(&mxlog_transport->mif_stream,
&header, sizeof(uint32_t))) {
u8 level = 0;
u8 phase = 0;
u32 num_bytes = 0;
mutex_lock(&mxlog_transport->lock);
if (!mxlog_transport->header_handler_fn) {
/* HERE: NULL header handler. Channel has been released */
SCSC_TAG_WARNING(MXLOG_TRANS,
"mxlog_transport->header_handler_fn_==NULL. Channel has been released\n");
kfree(buf);
mutex_unlock(&mxlog_transport->lock);
return 0;
}
/**
* A generic header processor will properly retrieve
* level and num_bytes as specifically implemented by the phase.
*/
if (mxlog_transport->header_handler_fn(header, &phase,
&level, &num_bytes)) {
SCSC_TAG_ERR(MXLOG_TRANS,
"Bad sync in header: header=0x%08x\n", header);
kfree(buf);
mutex_unlock(&mxlog_transport->lock);
return 0;
}
if (num_bytes > 0 &&
num_bytes < (MXLOG_TRANSPORT_BUF_LENGTH - sizeof(uint32_t))) {
u32 ret_bytes = 0;
/* 2nd read - payload (msg) */
ret_bytes = mif_stream_read(&mxlog_transport->mif_stream,
buf, num_bytes);
mxlog_transport->channel_handler_fn(phase, buf,
ret_bytes,
level,
mxlog_transport->channel_handler_data);
} else {
SCSC_TAG_ERR(MXLOG_TRANS,
"Bad num_bytes(%d) in header: header=0x%08x\n",
num_bytes, header);
}
mutex_unlock(&mxlog_transport->lock);
}
}
SCSC_TAG_INFO(MXLOG_TRANS, "exiting....\n");
kfree(buf);
complete(&th->completion);
return 0;
}
static int mxlog_thread_start(struct mxlog_transport *mxlog_transport)
{
int err;
struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
if (th->task != NULL) {
SCSC_TAG_WARNING(MXLOG_TRANS, "%s thread already started\n", th->name);
return 0;
}
/* Initialise thread structure */
th->block_thread = 1;
init_waitqueue_head(&th->wakeup_q);
init_completion(&th->completion);
th->wakeup_flag = 0;
snprintf(th->name, MXLOG_THREAD_NAME_MAX_LENGTH, "mxlog_thread");
/* Start the kernel thread */
th->task = kthread_run(mxlog_thread_function, mxlog_transport, "%s", th->name);
if (IS_ERR(th->task))
return (int)PTR_ERR(th->task);
SCSC_TAG_INFO(MXLOG_TRANS, "Started thread %s\n", th->name);
/* wait until thread is started */
#define LOG_THREAD_START_TMO_SEC (3)
err = wait_for_completion_timeout(&th->completion, msecs_to_jiffies(LOG_THREAD_START_TMO_SEC*1000));
if (err == 0) {
SCSC_TAG_ERR(MXLOG_TRANS, "timeout in starting thread\n");
return -ETIMEDOUT;
}
return 0;
}
static void mxlog_thread_stop(struct mxlog_transport *mxlog_transport)
{
struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
if (!th->task) {
SCSC_TAG_WARNING(MXLOG_TRANS, "%s mxlog_thread is already stopped\n", th->name);
return;
}
SCSC_TAG_INFO(MXLOG_TRANS, "Stopping %s mxlog_thread\n", th->name);
kthread_stop(th->task);
/* wait until th stopped */
#define LOG_THREAD_STOP_TMO_SEC (3)
wait_for_completion_timeout(&th->completion, msecs_to_jiffies(LOG_THREAD_STOP_TMO_SEC*1000));
th->task = NULL;
}
void mxlog_transport_release(struct mxlog_transport *mxlog_transport)
{
mxlog_thread_stop(mxlog_transport);
mif_stream_release(&mxlog_transport->mif_stream);
}
void mxlog_transport_config_serialise(struct mxlog_transport *mxlog_transport,
struct mxlogconf *mxlogconf)
{
mif_stream_config_serialise(&mxlog_transport->mif_stream, &mxlogconf->stream_conf);
}
/** Public functions */
int mxlog_transport_init(struct mxlog_transport *mxlog_transport, struct scsc_mx *mx)
{
int r;
uint32_t mem_length = MXLOG_TRANSPORT_BUF_LENGTH;
uint32_t packet_size = MXLOG_TRANSPORT_PACKET_SIZE;
uint32_t num_packets;
/*
* Initialising a buffer of 1 byte is never legitimate, do not allow it.
* The memory buffer length must be a multiple of the packet size.
*/
memset(mxlog_transport, 0, sizeof(struct mxlog_transport));
mutex_init(&mxlog_transport->lock);
num_packets = mem_length / packet_size;
mxlog_transport->mx = mx;
r = mif_stream_init(&mxlog_transport->mif_stream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_IN, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, input_irq_handler, mxlog_transport);
if (r)
return r;
r = mxlog_thread_start(mxlog_transport);
if (r) {
mif_stream_release(&mxlog_transport->mif_stream);
return r;
}
return 0;
}
void mxlog_transport_register_channel_handler(struct mxlog_transport *mxlog_transport,
mxlog_header_handler parser,
mxlog_channel_handler handler,
void *data)
{
mutex_lock(&mxlog_transport->lock);
mxlog_transport->header_handler_fn = parser;
mxlog_transport->channel_handler_fn = handler;
mxlog_transport->channel_handler_data = (void *)data;
mutex_unlock(&mxlog_transport->lock);
}

View file

@ -0,0 +1,77 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/**
* Maxwell mxlog transport (Interface)
*
* Provides communication between the firmware and the host.
*
*/
#ifndef MXLOG_TRANSPORT_H__
#define MXLOG_TRANSPORT_H__
/** Uses */
#include <linux/kthread.h>
#include "mifstream.h"
struct mxlog_transport;
typedef int (*mxlog_header_handler)(u32 header, u8 *phase,
u8 *level, u32 *num_bytes);
/**
* Transport channel callback handler. This will be invoked each time a message on a channel is
* received. Handlers may perform work within their callback implementation, but should not block.
* The detected phase is passed as first parameter.
*
* Note that the message pointer passed is only valid for the duration of the function call.
*/
typedef void (*mxlog_channel_handler)(u8 phase, const void *message,
size_t length, u32 level, void *data);
/**
* Initialises the maxwell management transport and configures the necessary
* interrupt handlers.
*/
int mxlog_transport_init(struct mxlog_transport *mxlog_transport, struct scsc_mx *mx);
void mxlog_transport_release(struct mxlog_transport *mxlog_transport);
/*
* Initialises the configuration area incl. Maxwell Infrastructure Configuration,
* MIF Management Transport Configuration and MIF Management Stream Configuration.
*/
void mxlog_transport_config_serialise(struct mxlog_transport *mxlog_transport, struct mxlogconf *mxlogconf);
void mxlog_transport_register_channel_handler(struct mxlog_transport *mxlog_transport,
mxlog_header_handler parser,
mxlog_channel_handler handler,
void *data);
void mxlog_transport_set_error(struct mxlog_transport *mxlog_transport);
#define MXLOG_THREAD_NAME_MAX_LENGTH 32
struct mxlog_thread {
struct task_struct *task;
char name[MXLOG_THREAD_NAME_MAX_LENGTH];
int prio;
struct completion completion;
wait_queue_head_t wakeup_q;
unsigned int wakeup_flag;
/*
* Use it to block the I/O thread when
* an error occurs.
*/
int block_thread;
};
struct mxlog_transport {
struct scsc_mx *mx;
struct mxlog_thread mxlog_thread;
struct mif_stream mif_stream;
mxlog_header_handler header_handler_fn;
mxlog_channel_handler channel_handler_fn;
void *channel_handler_data;
struct mutex lock;
};
#endif /* MXLOG_TRANSPORT_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,74 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef _MAXWELL_MANAGER_H
#define _MAXWELL_MANAGER_H
#include <linux/workqueue.h>
#include "fwhdr.h"
#include "mxmgmt_transport.h"
#include "mxproc.h"
#include "scsc_mx.h"
#include <linux/wakelock.h>
struct mxman;
void mxman_init(struct mxman *mxman, struct scsc_mx *mx);
void mxman_deinit(struct mxman *mxman);
int mxman_open(struct mxman *mxman);
void mxman_close(struct mxman *mxman);
void mxman_fail(struct mxman *mxman, u16 scsc_panic_code);
void mxman_freeze(struct mxman *mxman);
int mxman_force_panic(struct mxman *mxman);
int mxman_suspend(struct mxman *mxman);
void mxman_resume(struct mxman *mxman);
enum mxman_state {
MXMAN_STATE_STOPPED,
MXMAN_STATE_STARTED,
MXMAN_STATE_FAILED,
MXMAN_STATE_FREEZED,
};
struct mxman {
struct scsc_mx *mx;
int users;
void *start_dram;
struct workqueue_struct *fw_crc_wq;
struct delayed_work fw_crc_work;
struct workqueue_struct *failure_wq;
struct work_struct failure_work;
char *fw;
u32 fw_image_size;
struct completion mm_msg_start_ind_completion;
struct fwhdr fwhdr;
struct mxconf *mxconf;
enum mxman_state mxman_state;
enum mxman_state mxman_next_state;
struct mutex mxman_mutex;
struct mxproc mxproc;
int suspended;
atomic_t suspend_count;
bool check_crc;
char fw_build_id[64];
struct completion recovery_completion;
struct wake_lock recovery_wake_lock;
u32 rf_hw_ver;
u16 scsc_panic_code;
};
void mxman_register_gdb_channel(struct scsc_mx *mx, mxmgmt_channel_handler handler, void *data);
void mxman_send_gdb_channel(struct scsc_mx *mx, void *data, size_t length);
#ifdef CONFIG_SCSC_CHV_SUPPORT
#define SCSC_CHV_ARGV_ADDR_OFFSET 0x200008
extern int chv_run;
#endif
#endif

View file

@ -0,0 +1,291 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/**
* Maxwell management transport (implementation)
*/
/** Implements */
#include "mxmgmt_transport.h"
/** Uses */
#include <scsc/scsc_logring.h>
#include <linux/module.h>
#include "mxmgmt_transport_format.h"
#include "mifintrbit.h"
/* Flag that an error has occurred so the I/O thread processing should stop */
void mxmgmt_transport_set_error(struct mxmgmt_transport *mxmgmt_transport)
{
SCSC_TAG_WARNING(MXMGT_TRANS, "I/O thread processing is suspended\n");
mxmgmt_transport->mxmgmt_thread.block_thread = 1;
}
/** MIF Interrupt handler for writes made to the AP */
static void input_irq_handler(int irq, void *data)
{
struct mxmgmt_transport *mxmgmt_transport = (struct mxmgmt_transport *)data;
struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
struct scsc_mif_abs *mif_abs;
SCSC_TAG_DEBUG(MXMGT_TRANS, "IN\n");
/* Clear the interrupt first to ensure we can't possibly miss one */
mif_abs = scsc_mx_get_mif_abs(mxmgmt_transport->mx);
mif_abs->irq_bit_clear(mif_abs, irq);
/* The the other side wrote some data to the input stream, wake up the thread
* that deals with this. */
if (th->task == NULL) {
SCSC_TAG_ERR(MXMGT_TRANS, "th is NOT running\n");
return;
}
/*
* If an error has occured, we discard silently all messages from the stream
* until the error has been processed and the system has been reinitialised.
*/
if (th->block_thread == 1) {
SCSC_TAG_DEBUG(MXMGT_TRANS, "discard message.\n");
/*
* Do not try to acknowledge a pending interrupt here.
* This function is called by a function which in turn can be
* running in an atomic or 'disabled irq' level.
*/
return;
}
th->wakeup_flag = 1;
/* wake up I/O thread */
wake_up_interruptible(&th->wakeup_q);
}
/** MIF Interrupt handler for acknowledging writes made by the AP */
static void output_irq_handler(int irq, void *data)
{
struct scsc_mif_abs *mif_abs;
struct mxmgmt_transport *mxmgmt_transport = (struct mxmgmt_transport *)data;
SCSC_TAG_INFO(MXMGT_TRANS, "OUT\n");
/* Clear the interrupt first to ensure we can't possibly miss one */
/* The FW read some data from the output stream.
* Currently we do not care, so just clear the interrupt. */
mif_abs = scsc_mx_get_mif_abs(mxmgmt_transport->mx);
mif_abs->irq_bit_clear(mif_abs, irq);
/* The driver doesn't use the ack IRQ, so mask it from now on,
* otherwise we may get spurious host-wakes.
*/
mif_abs->irq_bit_mask(mif_abs, irq);
}
static void thread_wait_until_stopped(struct mxmgmt_transport *mxmgmt_transport)
{
struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
/*
* kthread_stop() cannot handle the th exiting while
* kthread_should_stop() is false, so sleep until kthread_stop()
* wakes us up.
*/
SCSC_TAG_DEBUG(MXMGT_TRANS, "%s waiting for the stop signal.\n", th->name);
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop()) {
SCSC_TAG_DEBUG(MXMGT_TRANS, "%s schedule....\n", th->name);
schedule();
}
th->task = NULL;
SCSC_TAG_DEBUG(MXMGT_TRANS, "%s exiting....\n", th->name);
}
/**
* A thread that forwards messages sent across the transport to
* the registered handlers for each channel.
*/
static int mxmgmt_thread_function(void *arg)
{
struct mxmgmt_transport *mxmgmt_transport = (struct mxmgmt_transport *)arg;
struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
const struct mxmgr_message *current_message;
int ret;
complete(&th->completion);
th->block_thread = 0;
while (!kthread_should_stop()) {
/* wait until an error occurs, or we need to process something. */
ret = wait_event_interruptible(th->wakeup_q,
(th->wakeup_flag && !th->block_thread) ||
kthread_should_stop());
if (kthread_should_stop()) {
SCSC_TAG_DEBUG(MXMGT_TRANS, "signalled to exit\n");
break;
}
if (ret < 0) {
SCSC_TAG_DEBUG(MXMGT_TRANS, "wait_event returned %d, thread will exit\n", ret);
thread_wait_until_stopped(mxmgmt_transport);
break;
}
th->wakeup_flag = 0;
SCSC_TAG_DEBUG(MXMGT_TRANS, "wokeup: r=%d\n", ret);
/* Forward each pending message to the applicable channel handler */
current_message = mif_stream_peek(&mxmgmt_transport->mif_istream, NULL);
while (current_message != NULL) {
mutex_lock(&mxmgmt_transport->channel_handler_mutex);
if (current_message->channel_id < MMTRANS_NUM_CHANNELS &&
mxmgmt_transport->channel_handler_fns[current_message->channel_id]) {
SCSC_TAG_DEBUG(MXMGT_TRANS, "Calling handler for channel_id: %d\n", current_message->channel_id);
(*mxmgmt_transport->channel_handler_fns[current_message->channel_id])(current_message->payload,
mxmgmt_transport->channel_handler_data[current_message->channel_id]);
} else
/* HERE: Invalid channel or no handler, raise fault or log message */
SCSC_TAG_WARNING(MXMGT_TRANS, "Invalid channel or no handler channel_id: %d\n", current_message->channel_id);
mutex_unlock(&mxmgmt_transport->channel_handler_mutex);
/* Remove the current message from the buffer before processing the next
* one in case it generated another message, otherwise it's possible we
* could run out of space in the stream before we get through all the messages. */
mif_stream_peek_complete(&mxmgmt_transport->mif_istream, current_message);
current_message = mif_stream_peek(&mxmgmt_transport->mif_istream, NULL);
}
}
SCSC_TAG_DEBUG(MXMGT_TRANS, "exiting....\n");
complete(&th->completion);
return 0;
}
static int mxmgmt_thread_start(struct mxmgmt_transport *mxmgmt_transport)
{
int err;
struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
if (th->task != NULL) {
SCSC_TAG_WARNING(MXMGT_TRANS, "%s thread already started\n", th->name);
return 0;
}
/* Initialise thread structure */
th->block_thread = 1;
init_waitqueue_head(&th->wakeup_q);
init_completion(&th->completion);
th->wakeup_flag = 0;
snprintf(th->name, MXMGMT_THREAD_NAME_MAX_LENGTH, "mxmgmt_thread");
/* Start the kernel thread */
th->task = kthread_run(mxmgmt_thread_function, mxmgmt_transport, "%s", th->name);
if (IS_ERR(th->task))
return (int)PTR_ERR(th->task);
SCSC_TAG_DEBUG(MXMGT_TRANS, "Started thread %s\n", th->name);
/* wait until thread is started */
#define MGMT_THREAD_START_TMO_SEC (3)
err = wait_for_completion_timeout(&th->completion, msecs_to_jiffies(MGMT_THREAD_START_TMO_SEC*1000));
if (err == 0) {
SCSC_TAG_ERR(MXMGT_TRANS, "timeout in starting thread\n");
return -ETIMEDOUT;
}
return 0;
}
static void mgmt_thread_stop(struct mxmgmt_transport *mxmgmt_transport)
{
struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
if (!th->task) {
SCSC_TAG_WARNING(MXMGT_TRANS, "%s mgmt_thread is already stopped\n", th->name);
return;
}
SCSC_TAG_DEBUG(MXMGT_TRANS, "Stopping %s mgmt_thread\n", th->name);
kthread_stop(th->task);
/* wait until th stopped */
#define MGMT_THREAD_STOP_TMO_SEC (3)
wait_for_completion_timeout(&th->completion, msecs_to_jiffies(MGMT_THREAD_STOP_TMO_SEC*1000));
th->task = NULL;
}
void mxmgmt_transport_release(struct mxmgmt_transport *mxmgmt_transport)
{
mgmt_thread_stop(mxmgmt_transport);
mif_stream_release(&mxmgmt_transport->mif_istream);
mif_stream_release(&mxmgmt_transport->mif_ostream);
}
void mxmgmt_transport_config_serialise(struct mxmgmt_transport *mxmgmt_transport,
struct mxtransconf *trans_conf)
{
mif_stream_config_serialise(&mxmgmt_transport->mif_istream, &trans_conf->to_ap_stream_conf);
mif_stream_config_serialise(&mxmgmt_transport->mif_ostream, &trans_conf->from_ap_stream_conf);
}
/** Public functions */
int mxmgmt_transport_init(struct mxmgmt_transport *mxmgmt_transport, struct scsc_mx *mx)
{
#define MEM_LENGTH 512
int r;
uint32_t mem_length = MEM_LENGTH;
uint32_t packet_size = sizeof(struct mxmgr_message);
uint32_t num_packets;
/*
* Initialising a buffer of 1 byte is never legitimate, do not allow it.
* The memory buffer length must be a multiple of the packet size.
*/
if (mem_length <= 1 || mem_length % packet_size != 0)
return -EIO;
memset(mxmgmt_transport, 0, sizeof(struct mxmgmt_transport));
num_packets = mem_length / packet_size;
mutex_init(&mxmgmt_transport->channel_handler_mutex);
mxmgmt_transport->mx = mx;
r = mif_stream_init(&mxmgmt_transport->mif_istream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_IN, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, input_irq_handler, mxmgmt_transport);
if (r)
return r;
r = mif_stream_init(&mxmgmt_transport->mif_ostream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_OUT, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, output_irq_handler, mxmgmt_transport);
if (r) {
mif_stream_release(&mxmgmt_transport->mif_istream);
return r;
}
r = mxmgmt_thread_start(mxmgmt_transport);
if (r) {
mif_stream_release(&mxmgmt_transport->mif_istream);
mif_stream_release(&mxmgmt_transport->mif_ostream);
return r;
}
return 0;
}
void mxmgmt_transport_register_channel_handler(struct mxmgmt_transport *mxmgmt_transport, enum mxmgr_channels channel_id,
mxmgmt_channel_handler handler, void *data)
{
if (channel_id >= MMTRANS_NUM_CHANNELS) {
SCSC_TAG_ERR(MXMGT_TRANS, "Invalid channel id: %d\n", channel_id);
return;
}
mutex_lock(&mxmgmt_transport->channel_handler_mutex);
mxmgmt_transport->channel_handler_fns[channel_id] = handler;
mxmgmt_transport->channel_handler_data[channel_id] = data;
mutex_unlock(&mxmgmt_transport->channel_handler_mutex);
}
void mxmgmt_transport_send(struct mxmgmt_transport *mxmgmt_transport, enum mxmgr_channels channel_id,
void *message, uint32_t message_length)
{
struct mxmgr_message transport_msg = { .channel_id = channel_id };
const void *bufs[2] = { &transport_msg.channel_id, message };
uint32_t buf_lengths[2] = { sizeof(transport_msg.channel_id), message_length };
mif_stream_write_gather(&mxmgmt_transport->mif_ostream, bufs, buf_lengths, 2);
}

View file

@ -0,0 +1,104 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/**
* mx140 management transport (Interface)
*
* Provides bi-directional communication between the firmware and the
* host. Messages sent across the transport are divided into a
* number of channels, with each channel having its own dedicated handler.
*
* This interface also provides a utility method for sending messages across
* the stream.
*/
#ifndef MXMANAGEMENT_TRANSPORT_H__
#define MXMANAGEMENT_TRANSPORT_H__
/** Uses */
#include <linux/kthread.h>
#include "mifstream.h"
struct mxmgmt_transport;
/**
* The various channels that can send messages
* across the transport.
*
* Channel IDs are limited to one byte.
*/
enum mxmgr_channels {
MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT = 0,
MMTRANS_CHAN_ID_SERVICE_MANAGEMENT = 1,
MMTRANS_CHAN_ID_MAXWELL_LOGGING = 2,
MMTRANS_NUM_CHANNELS = 3
};
/**
* Transport channel callback handler. This will be invoked each time a message on a channel is
* received in the context of the transport stream's thread. Handlers may perform work within
* their callback implementation, but should not block.
*
* Note that the message pointer passed is only valid for the duration of the function call.
*/
typedef void (*mxmgmt_channel_handler)(const void *message, void *data);
/**
* Registers the callback function that will be invoked to handle data coming in from the AP
* for the given channel.
*/
void mxmgmt_transport_register_channel_handler(struct mxmgmt_transport *mxmgmt_transport, enum mxmgr_channels channel_id,
mxmgmt_channel_handler handler, void *data);
/**
* Sends a message to the AP across the given channel.
*
* This function is safe to call from any RTOS thread.
*/
void mxmgmt_transport_send(struct mxmgmt_transport *mxmgmt_transport, enum mxmgr_channels channel_id,
void *message, uint32_t message_length);
/**
* Initialises the maxwell management transport and configures the necessary
* interrupt handlers. Called once during boot.
*/
int mxmgmt_transport_init(struct mxmgmt_transport *mxmgmt_transport, struct scsc_mx *mx);
void mxmgmt_transport_release(struct mxmgmt_transport *mxmgmt_transport);
/*
* Initialises the configuration area incl. Maxwell Infrastructure Configuration,
* MIF Management Transport Configuration and MIF Management Stream Configuration.
*/
void mxmgmt_transport_config_serialise(struct mxmgmt_transport *mxmgmt_transport, struct mxtransconf *trans_conf);
void mxmgmt_transport_set_error(struct mxmgmt_transport *mxmgmt_transport);
#define MXMGMT_THREAD_NAME_MAX_LENGTH 32
struct mxmgmt_thread {
struct task_struct *task;
char name[MXMGMT_THREAD_NAME_MAX_LENGTH];
int prio;
struct completion completion;
wait_queue_head_t wakeup_q;
unsigned int wakeup_flag;
/*
* Use it to block the I/O thread when
* an error occurs.
*/
int block_thread;
};
struct mxmgmt_transport {
struct scsc_mx *mx;
struct mif_stream mif_istream;
struct mif_stream mif_ostream;
struct mxmgmt_thread mxmgmt_thread;
/** Registered channel handlers for messages coming from the AP for each channel */
mxmgmt_channel_handler channel_handler_fns[MMTRANS_NUM_CHANNELS];
void *channel_handler_data[MMTRANS_NUM_CHANNELS];
struct mutex channel_handler_mutex;
};
#endif /* MXMANAGEMENT_TRANSPORT_H__ */

View file

@ -0,0 +1,21 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef MXMGR_TRANSPORT_FORMAT_H__
#define MXMGR_TRANSPORT_FORMAT_H__
/**
* Layout of messages across the manager transport streams.
*
* HERE: This is a dummy definition and will be replaced
* once more of the service management infrastructure is completed.
*/
struct mxmgr_message {
uint8_t channel_id; /* Channel ID from mxmgr_channels */
uint8_t payload[7]; /* Message content to store in the transport stream - user defined format */
} __packed;
#endif /* MXMGR_TRANSPORT_FORMAT_H__ */

View file

@ -0,0 +1,24 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef MXMGR_TRANSPORT_STREAMS_H__
#define MXMGR_TRANSPORT_STREAMS_H__
/**
* MIF input/output streams to/from the AP.
* These are seperated out to allow their use directly from within unit tests.
*/
struct {
/** from AP */
mif_stream *istream;
/** to AP */
mif_stream *ostream;
} mxmgr_stream_container;
extern mxmgr_stream_container mxmgr_streams;
#endif /* MXMGR_TRANSPORT_STREAMS_H__ */

View file

@ -0,0 +1,409 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/proc_fs.h>
#include <linux/version.h>
#include <linux/seq_file.h>
#include <scsc/scsc_release.h>
#include <scsc/scsc_logring.h>
#include "mxman.h"
#include "mxproc.h"
#ifndef AID_MXPROC
#define AID_MXPROC 0
#endif
#define MX_PROCFS_RW_FILE_OPS(name) \
static ssize_t mx_procfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
static ssize_t mx_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
static const struct file_operations mx_procfs_ ## name ## _fops = { \
.read = mx_procfs_ ## name ## _read, \
.write = mx_procfs_ ## name ## _write, \
.open = mx_procfs_generic_open, \
.llseek = generic_file_llseek \
}
#define MX_PROCFS_RO_FILE_OPS(name) \
static ssize_t mx_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
static const struct file_operations mx_procfs_ ## name ## _fops = { \
.read = mx_procfs_ ## name ## _read, \
.open = mx_procfs_generic_open, \
.llseek = generic_file_llseek \
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define MX_PDE_DATA(inode) PDE_DATA(inode)
#else
#define MX_PDE_DATA(inode) (PDE(inode)->data)
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define MX_PROCFS_SET_UID_GID(_entry) \
do { \
kuid_t proc_kuid = KUIDT_INIT(AID_MXPROC); \
kgid_t proc_kgid = KGIDT_INIT(AID_MXPROC); \
proc_set_user(_entry, proc_kuid, proc_kgid); \
} while (0)
#else
#define MX_PROCFS_SET_UID_GID(entry) \
do { \
(entry)->uid = AID_MXPROC; \
(entry)->gid = AID_MXPROC; \
} while (0)
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define MX_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
do { \
struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &mx_procfs_ ## name ## _fops, _sdev); \
MX_PROCFS_SET_UID_GID(entry); \
} while (0)
#else
#define MX_PROCFS_ADD_FILE(_data, name, parent, mode) \
do { \
struct proc_dir_entry *entry; \
entry = create_proc_entry(# name, mode, parent); \
if (entry) { \
entry->proc_fops = &mx_procfs_ ## name ## _fops; \
entry->data = _data; \
MX_PROCFS_SET_UID_GID(entry); \
} \
} while (0)
#endif
#define MX_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
#define OS_UNUSED_PARAMETER(x) ((void)(x))
#define MX_DIRLEN 128
static const char *procdir_ctrl = "driver/mxman_ctrl";
static const char *procdir_info = "driver/mxman_info";
static int mx_procfs_generic_open(struct inode *inode, struct file *file)
{
file->private_data = MX_PDE_DATA(inode);
return 0;
}
static ssize_t mx_procfs_mx_fail_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
OS_UNUSED_PARAMETER(mxproc);
OS_UNUSED_PARAMETER(file);
OS_UNUSED_PARAMETER(user_buf);
OS_UNUSED_PARAMETER(count);
OS_UNUSED_PARAMETER(ppos);
SCSC_TAG_DEBUG(MX_PROC, "OK\n");
return 0;
}
static ssize_t mx_procfs_mx_fail_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
OS_UNUSED_PARAMETER(file);
OS_UNUSED_PARAMETER(user_buf);
OS_UNUSED_PARAMETER(count);
OS_UNUSED_PARAMETER(ppos);
if (mxproc)
mxman_fail(mxproc->mxman, SCSC_PANIC_CODE_HOST << 15);
SCSC_TAG_DEBUG(MX_PROC, "OK\n");
return count;
}
static ssize_t mx_procfs_mx_freeze_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
OS_UNUSED_PARAMETER(mxproc);
OS_UNUSED_PARAMETER(file);
OS_UNUSED_PARAMETER(user_buf);
OS_UNUSED_PARAMETER(count);
OS_UNUSED_PARAMETER(ppos);
SCSC_TAG_DEBUG(MX_PROC, "OK\n");
return 0;
}
static ssize_t mx_procfs_mx_freeze_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
OS_UNUSED_PARAMETER(file);
OS_UNUSED_PARAMETER(user_buf);
OS_UNUSED_PARAMETER(count);
OS_UNUSED_PARAMETER(ppos);
if (mxproc)
mxman_freeze(mxproc->mxman);
SCSC_TAG_INFO(MX_PROC, "OK\n");
return count;
}
static ssize_t mx_procfs_mx_panic_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
OS_UNUSED_PARAMETER(mxproc);
OS_UNUSED_PARAMETER(file);
OS_UNUSED_PARAMETER(user_buf);
OS_UNUSED_PARAMETER(count);
OS_UNUSED_PARAMETER(ppos);
SCSC_TAG_INFO(MX_PROC, "OK\n");
return 0;
}
static ssize_t mx_procfs_mx_panic_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
OS_UNUSED_PARAMETER(file);
OS_UNUSED_PARAMETER(user_buf);
OS_UNUSED_PARAMETER(count);
OS_UNUSED_PARAMETER(ppos);
if (mxproc)
mxman_force_panic(mxproc->mxman);
SCSC_TAG_INFO(MX_PROC, "OK\n");
return count;
}
static ssize_t mx_procfs_mx_suspend_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
char buf[3];
OS_UNUSED_PARAMETER(file);
buf[0] = mxproc->mxman->suspended ? 'Y' : 'N';
buf[1] = '\n';
buf[2] = '\0';
SCSC_TAG_INFO(MX_PROC, "suspended: %c\n", buf[0]);
return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
}
static ssize_t mx_procfs_mx_suspend_count_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
int pos = 0;
char buf[32];
const size_t bufsz = sizeof(buf);
u32 suspend_count;
OS_UNUSED_PARAMETER(file);
if (!mxproc || !mxproc->mxman)
return 0;
suspend_count = atomic_read(&mxproc->mxman->suspend_count);
SCSC_TAG_INFO(MX_PROC, "suspend_count: %u\n", suspend_count);
pos += scnprintf(buf + pos, bufsz - pos, "%u\n", suspend_count);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t mx_procfs_mx_suspend_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
int r;
OS_UNUSED_PARAMETER(file);
OS_UNUSED_PARAMETER(ppos);
if (count && mxproc) {
switch (user_buf[0]) {
case 'Y':
SCSC_TAG_INFO(MX_PROC, "force suspend\n");
r = mxman_suspend(mxproc->mxman);
if (r) {
SCSC_TAG_INFO(MX_PROC, "mx_suspend failed %d\n", r);
return r;
}
break;
case 'N':
SCSC_TAG_INFO(MX_PROC, "force resume\n");
mxman_resume(mxproc->mxman);
break;
default:
SCSC_TAG_INFO(MX_PROC, "invalid value %c\n", user_buf[0]);
return -EINVAL;
}
}
return count;
}
static ssize_t mx_procfs_mx_status_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
struct mxproc *mxproc = file->private_data;
char buf[32];
int pos = 0;
const size_t bufsz = sizeof(buf);
if (!mxproc || !mxproc->mxman)
return 0;
switch (mxproc->mxman->mxman_state) {
case MXMAN_STATE_STOPPED:
pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "MXMAN_STATE_STOPPED");
break;
case MXMAN_STATE_STARTED:
pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "MXMAN_STATE_STARTED");
break;
case MXMAN_STATE_FAILED:
pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "MXMAN_STATE_FAILED");
break;
case MXMAN_STATE_FREEZED:
pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "MXMAN_STATE_FREEZED");
break;
default:
return 0;
}
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
MX_PROCFS_RW_FILE_OPS(mx_fail);
MX_PROCFS_RW_FILE_OPS(mx_freeze);
MX_PROCFS_RW_FILE_OPS(mx_panic);
MX_PROCFS_RW_FILE_OPS(mx_suspend);
MX_PROCFS_RO_FILE_OPS(mx_suspend_count);
MX_PROCFS_RO_FILE_OPS(mx_status);
static u32 proc_count;
int mxproc_create_ctrl_proc_dir(struct mxproc *mxproc, struct mxman *mxman)
{
char dir[MX_DIRLEN];
struct proc_dir_entry *parent;
(void)snprintf(dir, sizeof(dir), "%s%d", procdir_ctrl, proc_count);
parent = proc_mkdir(dir, NULL);
if (!parent) {
SCSC_TAG_ERR(MX_PROC, "failed to create proc dir %s\n", procdir_ctrl);
return -EINVAL;
}
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
parent->data = mxproc;
#endif
mxproc->mxman = mxman;
mxproc->procfs_ctrl_dir = parent;
mxproc->procfs_ctrl_dir_num = proc_count;
MX_PROCFS_ADD_FILE(mxproc, mx_fail, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
MX_PROCFS_ADD_FILE(mxproc, mx_freeze, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
MX_PROCFS_ADD_FILE(mxproc, mx_panic, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
MX_PROCFS_ADD_FILE(mxproc, mx_suspend, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
MX_PROCFS_ADD_FILE(mxproc, mx_suspend_count, parent, S_IRUSR | S_IRGRP);
MX_PROCFS_ADD_FILE(mxproc, mx_status, parent, S_IRUSR | S_IRGRP);
SCSC_TAG_DEBUG(MX_PROC, "created %s proc dir\n", dir);
proc_count++;
return 0;
}
void mxproc_remove_ctrl_proc_dir(struct mxproc *mxproc)
{
if (mxproc->procfs_ctrl_dir) {
char dir[MX_DIRLEN];
MX_PROCFS_REMOVE_FILE(mx_fail, mxproc->procfs_ctrl_dir);
MX_PROCFS_REMOVE_FILE(mx_freeze, mxproc->procfs_ctrl_dir);
MX_PROCFS_REMOVE_FILE(mx_panic, mxproc->procfs_ctrl_dir);
MX_PROCFS_REMOVE_FILE(mx_suspend, mxproc->procfs_ctrl_dir);
MX_PROCFS_REMOVE_FILE(mx_suspend_count, mxproc->procfs_ctrl_dir);
MX_PROCFS_REMOVE_FILE(mx_status, mxproc->procfs_ctrl_dir);
(void)snprintf(dir, sizeof(dir), "%s%d", procdir_ctrl, mxproc->procfs_ctrl_dir_num);
remove_proc_entry(dir, NULL);
mxproc->procfs_ctrl_dir = NULL;
proc_count--;
SCSC_TAG_DEBUG(MX_PROC, "removed %s proc dir\n", dir);
}
}
static ssize_t mx_procfs_mx_rf_hw_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[80];
int bytes;
struct mxproc *mxproc = file->private_data;
if (!mxproc || !mxproc->mxman)
return -EINVAL;
memset(buf, '\0', sizeof(buf));
bytes = snprintf(buf, sizeof(buf), "RF version: 0x%04x\n", (mxproc->mxman->rf_hw_ver));
return simple_read_from_buffer(user_buf, count, ppos, buf, bytes);
}
MX_PROCFS_RO_FILE_OPS(mx_rf_hw_ver);
static ssize_t mx_procfs_mx_release_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[80];
int bytes;
struct mxproc *mxproc = file->private_data;
char *build_id = 0;
OS_UNUSED_PARAMETER(file);
if (mxproc && mxproc->mxman)
build_id = mxproc->mxman->fw_build_id;
memset(buf, '\0', sizeof(buf));
bytes = snprintf(buf, sizeof(buf), "Release: %d.%d.%d (f/w: %s)\n",
SCSC_RELEASE_PRODUCT, SCSC_RELEASE_ITERATION, SCSC_RELEASE_CANDIDATE,
build_id ? build_id : "unknown");
if (bytes > sizeof(buf))
bytes = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, bytes);
}
MX_PROCFS_RO_FILE_OPS(mx_release);
int mxproc_create_info_proc_dir(struct mxproc *mxproc, struct mxman *mxman)
{
char dir[MX_DIRLEN];
struct proc_dir_entry *parent;
(void)snprintf(dir, sizeof(dir), "%s", procdir_info);
parent = proc_mkdir(dir, NULL);
if (!parent) {
SCSC_TAG_ERR(MX_PROC, "failed to create /proc dir\n");
return -EINVAL;
}
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
parent->data = mxproc;
#endif
mxproc->mxman = mxman;
mxproc->procfs_info_dir = parent;
MX_PROCFS_ADD_FILE(mxproc, mx_release, parent, S_IRUSR | S_IRGRP);
MX_PROCFS_ADD_FILE(mxproc, mx_rf_hw_ver, parent, S_IRUSR | S_IRGRP);
SCSC_TAG_DEBUG(MX_PROC, "created %s proc dir\n", dir);
return 0;
}
void mxproc_remove_info_proc_dir(struct mxproc *mxproc)
{
if (mxproc->procfs_info_dir) {
char dir[MX_DIRLEN];
MX_PROCFS_REMOVE_FILE(mx_release, mxproc->procfs_info_dir);
MX_PROCFS_REMOVE_FILE(mx_rf_hw_ver, mxproc->procfs_info_dir);
(void)snprintf(dir, sizeof(dir), "%s", procdir_info);
remove_proc_entry(dir, NULL);
mxproc->procfs_info_dir = NULL;
SCSC_TAG_DEBUG(MX_PROC, "removed %s proc dir\n", dir);
}
}

View file

@ -0,0 +1,28 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/*
* mx140 proc interface
*/
#ifndef MXPROC_H
#define MXPROC_H
struct mxproc;
int mxproc_create_ctrl_proc_dir(struct mxproc *mxproc, struct mxman *mxman);
void mxproc_remove_ctrl_proc_dir(struct mxproc *mxproc);
int mxproc_create_info_proc_dir(struct mxproc *mxproc, struct mxman *mxman);
void mxproc_remove_info_proc_dir(struct mxproc *mxproc);
struct mxproc {
struct mxman *mxman;
struct proc_dir_entry *procfs_ctrl_dir;
u32 procfs_ctrl_dir_num;
struct proc_dir_entry *procfs_info_dir;
};
#endif /* MXPROC_H */

View file

@ -0,0 +1,138 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef PANIC_RECORD_R4_DEFS_H__
#define PANIC_RECORD_R4_DEFS_H__
/*
* R4 Panic Record Definitions.
*
* This record is used to pass summary information about the context of
* Maxwell R4 firmware panics to the host.
*
* The record location, relative to shared DRAM memory, is defined by the
* R4_PANIC_RECORD_OFFSET field in the firmware header [see SC-505846-SW].
*
* Notes:-
* - The host panic handler should _not_ expect the R4 record to be
* written prior to a panic indication from Maxwell, and it may never
* be written at all. The checksum should indicate a valid record.
*
* N.B. Defined in this standalone header for inclusion in .s and .c.
*/
/*
* The current version of the PANIC_RECORD_R4 structure defined below.
* Written to version field by firmware, checked by host.
* This also serves as a rudimentary endianess check.
*/
#define PANIC_RECORD_R4_VERSION_1 1
/*
* Total number of R4 registers saved.
*/
#define PANIC_RECORD_R4_REGISTER_COUNT 18
/*
* Number of panic info arguments.
*/
#define PANIC_RECORD_R4_INFO_COUNT 4
/*
* Checksum seed to prevent false match on all zeros or ones.
*/
#define PANIC_RECORD_R4_CKSUM_SEED 0xa5a5a5a5
/*****************************************************************************
* R4 Panic Record 32bit field indices.
*****************************************************************************/
/*
* Version of this structure.
*/
#define PANIC_RECORD_R4_VERSION_INDEX 0
/*
* Clock counters at time of the R4 panic.
*
* The 1M clock is generally the most useful but there is period
* after IP wake-up when it is not monotonic. The 32K count
* is included in-case of a panic during wake-up.
*/
#define PANIC_RECORD_R4_TIMESTAMP_1M_INDEX (PANIC_RECORD_R4_VERSION_INDEX + 1)
#define PANIC_RECORD_R4_TIMESTAMP_32K_INDEX (PANIC_RECORD_R4_TIMESTAMP_1M_INDEX + 1)
/*
* Snapshot of main r4 CPU registers.
*/
#define PANIC_RECORD_R4_REGISTERS_INDEX (PANIC_RECORD_R4_TIMESTAMP_32K_INDEX + 1)
/*
* Panic info.
*
* 1st field is key/index of panic_string.
*/
#define PANIC_RECORD_R4_INFO_INDEX (PANIC_RECORD_R4_REGISTERS_INDEX + PANIC_RECORD_R4_REGISTER_COUNT)
/*
* 32bit XOR of all the fields above + PANIC_RECORD_R4_CKSUM_SEED
*
* Written by firmware on panic, checked by host.
*/
#define PANIC_RECORD_R4_CKSUM_INDEX (PANIC_RECORD_R4_INFO_INDEX + PANIC_RECORD_R4_INFO_COUNT)
/*
* Length of the r4 panic record (uint32s).
*/
#define PANIC_RECORD_R4_LEN (PANIC_RECORD_R4_CKSUM_INDEX + 1)
/*****************************************************************************
* R4 uint32 Register indices relative to PANIC_RECORD_R4_REGISTERS_INDEX
*****************************************************************************/
#define PANIC_RECORD_R4_REGISTER_R0 0
#define PANIC_RECORD_R4_REGISTER_R1 1
#define PANIC_RECORD_R4_REGISTER_R2 2
#define PANIC_RECORD_R4_REGISTER_R3 3
#define PANIC_RECORD_R4_REGISTER_R4 4
#define PANIC_RECORD_R4_REGISTER_R5 5
#define PANIC_RECORD_R4_REGISTER_R6 6
#define PANIC_RECORD_R4_REGISTER_R7 7
#define PANIC_RECORD_R4_REGISTER_R8 8
#define PANIC_RECORD_R4_REGISTER_R9 9
#define PANIC_RECORD_R4_REGISTER_R10 10
#define PANIC_RECORD_R4_REGISTER_R11 11
#define PANIC_RECORD_R4_REGISTER_R12 12
#define PANIC_RECORD_R4_REGISTER_SP 13
#define PANIC_RECORD_R4_REGISTER_LR 14
#define PANIC_RECORD_R4_REGISTER_SPSR 15
#define PANIC_RECORD_R4_REGISTER_PC 16
#define PANIC_RECORD_R4_REGISTER_CPSR 17
/*****************************************************************************
* R4 Register octet offsets relative to PANIC_RECORD_R4_REGISTERS_INDEX
*****************************************************************************/
#define PANIC_RECORD_R4_REGISTER_OFFSET_R0 (PANIC_RECORD_R4_REGISTER_R0 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R1 (PANIC_RECORD_R4_REGISTER_R1 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R2 (PANIC_RECORD_R4_REGISTER_R2 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R3 (PANIC_RECORD_R4_REGISTER_R3 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R4 (PANIC_RECORD_R4_REGISTER_R4 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R5 (PANIC_RECORD_R4_REGISTER_R5 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R6 (PANIC_RECORD_R4_REGISTER_R6 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R7 (PANIC_RECORD_R4_REGISTER_R7 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R8 (PANIC_RECORD_R4_REGISTER_R8 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R9 (PANIC_RECORD_R4_REGISTER_R9 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R10 (PANIC_RECORD_R4_REGISTER_R10 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R11 (PANIC_RECORD_R4_REGISTER_R11 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_R12 (PANIC_RECORD_R4_REGISTER_R12 * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_SP (PANIC_RECORD_R4_REGISTER_SP * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_LR (PANIC_RECORD_R4_REGISTER_LR * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_SPSR (PANIC_RECORD_R4_REGISTER_SPSR * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_PC (PANIC_RECORD_R4_REGISTER_PC * 4)
#define PANIC_RECORD_R4_REGISTER_OFFSET_CPSR (PANIC_RECORD_R4_REGISTER_CPSR * 4)
#endif /* PANIC_RECORD_R4_DEFS_H__ */

View file

@ -0,0 +1,40 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#include <scsc/scsc_logring.h>
#include "panicmon.h"
#include "scsc_mif_abs.h"
#include "mxman.h"
static void panicmon_isr(int irq, void *data)
{
struct panicmon *panicmon = (struct panicmon *)data;
SCSC_TAG_DEBUG(PANIC_MON, "panicmon=%p panicmon->mx=%p mxman=%p\n", panicmon, panicmon->mx, scsc_mx_get_mxman(panicmon->mx));
/* Avoid unused parameter error */
(void)irq;
mxman_fail(scsc_mx_get_mxman(panicmon->mx), SCSC_PANIC_CODE_FW << 15);
}
void panicmon_init(struct panicmon *panicmon, struct scsc_mx *mx)
{
struct scsc_mif_abs *mif;
panicmon->mx = mx;
mif = scsc_mx_get_mif_abs(mx);
/* register isr with mif abstraction */
mif->irq_reg_reset_request_handler(mif, panicmon_isr, (void *)panicmon);
}
void panicmon_deinit(struct panicmon *panicmon)
{
struct scsc_mif_abs *mif;
mif = scsc_mx_get_mif_abs(panicmon->mx);
mif->irq_unreg_reset_request_handler(mif);
}

View file

@ -0,0 +1,20 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#ifndef _PANICMON_H
#define _PANICMON_H
#include "mxman.h"
struct panicmon;
void panicmon_init(struct panicmon *panicmon, struct scsc_mx *mx);
void panicmon_deinit(struct panicmon *panicmon);
struct panicmon {
struct scsc_mx *mx;
};
#endif /* _PANICMON_H */

View file

@ -0,0 +1,623 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/* Implements */
#include "pcie_mif.h"
/* Uses */
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/moduleparam.h>
#include <scsc/scsc_logring.h>
#include "pcie_mif_module.h"
#include "peterson_mutex.h"
#include "pcie_proc.h"
static bool enable_pcie_mif_arm_reset = true;
module_param(enable_pcie_mif_arm_reset, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_pcie_mif_arm_reset, "Enables ARM cores reset");
struct pcie_mif {
struct scsc_mif_abs interface;
struct scsc_mbox_s *mbox;
struct peterson_mutex *p_mutex_r4; /* AP will READ - CR4 will WRITE */
struct peterson_mutex *p_mutex_ap; /* AP will WRITE - CR4 will READ */
struct pci_dev *pdev;
int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
__iomem void *registers;
struct device *dev;
void *mem;
size_t mem_allocated;
dma_addr_t dma_addr;
/* Callback function and dev pointer mif_intr manager handler */
void (*r4_handler)(int irq, void *data);
void *irq_dev;
#ifdef SUPPORTED_M4
void (*m4_handler)(int irq, void *data);
#endif
};
#define pcie_mif_from_mif_abs(MIF_ABS_PTR) container_of(MIF_ABS_PTR, struct pcie_mif, interface)
static void pcie_mif_irq_default_handler(int irq, void *data)
{
/* Avoid unused parameter error */
(void)irq;
(void)data;
}
irqreturn_t pcie_mif_isr(int irq, void *data)
{
struct pcie_mif *pcie = (struct pcie_mif *)data;
#ifdef SUPPORTED_M4
/* TODO */
#endif
if (pcie->r4_handler != pcie_mif_irq_default_handler)
pcie->r4_handler(irq, pcie->irq_dev);
else
SCSC_TAG_INFO(PCIE_MIF, "Any handler registered\n");
return IRQ_HANDLED;
}
static void pcie_mif_destroy(struct scsc_mif_abs *interface)
{
/* Avoid unused parameter error */
(void)interface;
}
static char *pcie_mif_get_uid(struct scsc_mif_abs *interface)
{
/* Avoid unused parameter error */
(void)interface;
/* TODO */
/* return "0" for the time being */
return "0";
}
static int pcie_mif_reset(struct scsc_mif_abs *interface, bool reset)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
int ret;
if (enable_pcie_mif_arm_reset || !reset) {
/* Sanity check */
iowrite32(0xdeadbeef, pcie->registers + SCSC_PCIE_SIGNATURE);
mmiowb();
ret = ioread32(pcie->registers + SCSC_PCIE_SIGNATURE);
if (ret != 0xdeadbeef) {
SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Can't acces BAR0 magic number. Readed: 0x%x Expected: 0x%x\n",
ret, 0xdeadbeef);
return -ENODEV;
}
iowrite32(reset ? 1 : 0,
pcie->registers + SCSC_PCIE_GRST_OFFSET);
mmiowb();
} else
SCSC_TAG_INFO(PCIE_MIF, "Not resetting ARM Cores enable_pcie_mif_arm_reset: %d\n",
enable_pcie_mif_arm_reset);
return 0;
}
static void *pcie_mif_map(struct scsc_mif_abs *interface, size_t *allocated)
{
int ret;
size_t map_len = PCIE_MIF_ALLOC_MEM;
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
if (allocated)
*allocated = 0;
if (map_len > (PCIE_MIF_PREALLOC_MEM - 1)) {
SCSC_TAG_ERR(PCIE_MIF, "Error allocating DMA memory, requested %zu, maximum %d, consider different size\n", map_len, PCIE_MIF_PREALLOC_MEM);
return NULL;
}
/* should return PAGE_ALIGN Memory */
pcie->mem = dma_alloc_coherent(pcie->dev,
PCIE_MIF_PREALLOC_MEM, &pcie->dma_addr, GFP_KERNEL);
if (pcie->mem == NULL) {
SCSC_TAG_ERR(PCIE_MIF, "Error allocating %d DMA memory\n", PCIE_MIF_PREALLOC_MEM);
return NULL;
}
pcie->mem_allocated = map_len;
SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "Allocated dma coherent mem: %p addr %p\n", pcie->mem, (void *)pcie->dma_addr);
iowrite32((unsigned int)pcie->dma_addr,
pcie->registers + SCSC_PCIE_OFFSET);
mmiowb();
ret = ioread32(pcie->registers + SCSC_PCIE_OFFSET);
SCSC_TAG_INFO(PCIE_MIF, "Read SHARED_BA 0x%0x\n", ret);
if (ret != (unsigned int)pcie->dma_addr) {
SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Can't acces BAR0 Shared BA. Readed: 0x%x Expected: 0x%x\n", ret, (unsigned int)pcie->dma_addr);
return NULL;
}
#ifdef OLD_REG
/* Allocate mbox struct at the end of the PCIE_MIF_PREALLOC_MEM */
pcie->mbox = (void *)pcie->mem + PCIE_MIF_PREALLOC_MEM - sizeof(struct scsc_mbox_s);
/* Allocate Peterson algo shared varialbles before mbox */
pcie->p_mutex_r4 = (void *)pcie->mem + PCIE_MIF_PREALLOC_MEM - sizeof(struct scsc_mbox_s) - sizeof(struct peterson_mutex);
pcie->p_mutex_ap = (void *)pcie->mem + PCIE_MIF_PREALLOC_MEM - sizeof(struct scsc_mbox_s) - 2 * (sizeof(struct peterson_mutex));
#else
/* Allocate mbox struct at the end of the PCIE_MIF_PREALLOC_MEM */
pcie->mbox = (void *)pcie->mem + MBOX_OFFSET;
memset(pcie->mbox, 0, sizeof(struct scsc_mbox_s));
/* Allocate Peterson algo shared varialbles before mbox */
pcie->p_mutex_r4 = (void *)pcie->mem + P_OFFSET_R4;
pcie->p_mutex_ap = (void *)pcie->mem + P_OFFSET_AP;
#endif
SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "pcie->mbox is pointing at %p pcie->mem %p map_len %zu sizeof %zu\n",
pcie->mbox,
pcie->mem,
map_len,
sizeof(struct scsc_mbox_s));
#ifdef SUPPORTED_M4
/* TODO */
#endif
peterson_mutex_init(pcie->p_mutex_ap);
/* Return the max allocatable memory on this abs. implementation */
if (allocated)
*allocated = map_len;
return pcie->mem;
}
/* HERE: Not sure why mem is passed in - its stored in pcie - as it should be */
static void pcie_mif_unmap(struct scsc_mif_abs *interface, void *mem)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
/* Avoid unused parameter error */
(void)mem;
dma_free_coherent(pcie->dev, PCIE_MIF_PREALLOC_MEM, pcie->mem, pcie->dma_addr);
SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "Freed dma coherent mem: %p addr %p\n", pcie->mem, (void *)pcie->dma_addr);
}
#ifdef MAILBOX_SETGET
static void pcie_mif_mailbox_set(struct scsc_mif_abs *interface, u32 mbox_num, u32 value)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
if (mbox_num >= NUM_MBOX) {
SCSC_TAG_ERR(PCIE_MIF, "MBOX not mapped\n");
return;
}
pcie->mbox->issr[mbox_num] = value;
}
static u32 pcie_mif_mailbox_get(struct scsc_mif_abs *interface, u32 mbox_num)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
u32 val;
if (mbox_num >= NUM_MBOX) {
SCSC_TAG_ERR(PCIE_MIF, "MBOX not mapped\n");
return -1;
}
val = pcie->mbox->issr[mbox_num];
return val;
}
#endif
static u32 pcie_mif_irq_bit_mask_status_get(struct scsc_mif_abs *interface)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
u32 val;
val = (pcie->mbox->intmr0) >> 16;
return val;
}
static u32 pcie_mif_irq_get(struct scsc_mif_abs *interface)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
u32 val;
val = pcie->mbox->intsr1 >> 16;
return val;
}
static void pcie_mif_irq_bit_set(struct scsc_mif_abs *interface, int bit_num, enum scsc_mif_abs_target target)
{
volatile u32 *set_reg;
volatile u32 *mask_reg;
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
if (bit_num >= 16) {
SCSC_TAG_ERR(PCIE_MIF, "Incorrect INT\n");
return;
}
peterson_mutex_lock(pcie->p_mutex_r4, AP_PROCESS);
/* Set Status Register */
if (target == SCSC_MIF_ABS_TARGET_R4) {
set_reg = &pcie->mbox->intsr0;
mask_reg = &pcie->mbox->intmr0;
} else if (target == SCSC_MIF_ABS_TARGET_M4) {
set_reg = &pcie->mbox->intsr2;
mask_reg = &pcie->mbox->intmr2;
} else {
SCSC_TAG_ERR(PCIE_MIF, "Incorrect Target %d\n", target);
return;
}
*set_reg |= (1 << bit_num) << 16;
/* Check whether int is masked */
if (*mask_reg & ((1 << bit_num) << 16)) {
SCSC_TAG_ERR(PCIE_MIF, "Interrupt is masked - do not generate interrupt\n");
peterson_mutex_unlock(pcie->p_mutex_r4, AP_PROCESS);
return;
}
iowrite32(0xffffff, pcie->registers + SCSC_PCIE_NEWMSG);
mmiowb();
peterson_mutex_unlock(pcie->p_mutex_r4, AP_PROCESS);
}
static void pcie_mif_irq_bit_clear(struct scsc_mif_abs *interface, int bit_num)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
if (bit_num >= 16) {
SCSC_TAG_ERR(PCIE_MIF, "Incorrect INT\n");
return;
}
peterson_mutex_lock(pcie->p_mutex_ap, AP_PROCESS);
pcie->mbox->intsr1 &= ~((1 << bit_num) << 16);
peterson_mutex_unlock(pcie->p_mutex_ap, AP_PROCESS);
}
static void pcie_mif_irq_bit_mask(struct scsc_mif_abs *interface, int bit_num)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
if (bit_num >= 16) {
SCSC_TAG_ERR(PCIE_MIF, "Incorrect INT\n");
return;
}
peterson_mutex_lock(pcie->p_mutex_ap, AP_PROCESS);
pcie->mbox->intmr1 |= ((1 << bit_num) << 16);
peterson_mutex_unlock(pcie->p_mutex_ap, AP_PROCESS);
}
static void pcie_mif_irq_bit_unmask(struct scsc_mif_abs *interface, int bit_num)
{
int irq_unmasked;
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
if (bit_num >= 16) {
SCSC_TAG_ERR(PCIE_MIF, "Incorrect INT\n");
return;
}
peterson_mutex_lock(pcie->p_mutex_ap, AP_PROCESS);
pcie->mbox->intmr1 &= ~((1 << bit_num) << 16);
irq_unmasked = pcie_mif_irq_get(interface) & (1 << bit_num);
peterson_mutex_unlock(pcie->p_mutex_ap, AP_PROCESS);
/* Check whether the interrupt has been triggered */
if (irq_unmasked)
if (pcie->r4_handler != pcie_mif_irq_default_handler)
pcie->r4_handler(bit_num, pcie->irq_dev);
}
static void pcie_mif_irq_reg_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
pcie->r4_handler = handler;
pcie->irq_dev = dev;
}
static void pcie_mif_irq_unreg_handler(struct scsc_mif_abs *interface)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
pcie->r4_handler = pcie_mif_irq_default_handler;
pcie->irq_dev = NULL;
}
static void pcie_mif_irq_reg_reset_request_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
{
(void)interface;
(void)handler;
(void)dev;
}
static void pcie_mif_irq_unreg_reset_request_handler(struct scsc_mif_abs *interface)
{
(void)interface;
}
static u32 *pcie_mif_get_mbox_ptr(struct scsc_mif_abs *interface, u32 mbox_index)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
u32 *addr;
addr = (u32 *)(&pcie->mbox->issr[mbox_index]);
return addr;
}
static int pcie_mif_get_mifram_ref(struct scsc_mif_abs *interface, void *ptr, scsc_mifram_ref *ref)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
if (ptr > (pcie->mem + 4 * 1024 * 1024)) {
SCSC_TAG_ERR(PCIE_MIF, "ooops limits reached\n");
return -ENOMEM;
}
*ref = (scsc_mifram_ref)((uintptr_t)ptr - (uintptr_t)pcie->mem);
return 0;
}
static void *pcie_mif_get_mifram_ptr(struct scsc_mif_abs *interface, scsc_mifram_ref ref)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
return (void *)((uintptr_t)pcie->mem + (uintptr_t)ref);
}
static uintptr_t pcie_mif_get_mif_pfn(struct scsc_mif_abs *interface)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
return virt_to_phys(pcie->mem) >> PAGE_SHIFT;
}
static struct device *pcie_mif_get_mif_device(struct scsc_mif_abs *interface)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
return pcie->dev;
}
static void pcie_mif_irq_clear(void)
{
}
static void pcie_mif_dump_register(struct scsc_mif_abs *interface)
{
}
struct scsc_mif_abs *pcie_mif_create(struct pci_dev *pdev, const struct pci_device_id *id)
{
int rc = 0;
struct scsc_mif_abs *pcie_if;
struct pcie_mif *pcie = (struct pcie_mif *)devm_kzalloc(&pdev->dev, sizeof(struct pcie_mif), GFP_KERNEL);
u16 cmd;
/* Avoid unused parameter error */
(void)id;
if (!pcie)
return NULL;
pcie_if = &pcie->interface;
/* initialise interface structure */
pcie_if->destroy = pcie_mif_destroy;
pcie_if->get_uid = pcie_mif_get_uid;
pcie_if->reset = pcie_mif_reset;
pcie_if->map = pcie_mif_map;
pcie_if->unmap = pcie_mif_unmap;
#ifdef MAILBOX_SETGET
pcie_if->mailbox_set = pcie_mif_mailbox_set;
pcie_if->mailbox_get = pcie_mif_mailbox_get;
#endif
pcie_if->irq_bit_set = pcie_mif_irq_bit_set;
pcie_if->irq_get = pcie_mif_irq_get;
pcie_if->irq_bit_mask_status_get = pcie_mif_irq_bit_mask_status_get;
pcie_if->irq_bit_clear = pcie_mif_irq_bit_clear;
pcie_if->irq_bit_mask = pcie_mif_irq_bit_mask;
pcie_if->irq_bit_unmask = pcie_mif_irq_bit_unmask;
pcie_if->irq_reg_handler = pcie_mif_irq_reg_handler;
pcie_if->irq_unreg_handler = pcie_mif_irq_unreg_handler;
pcie_if->irq_reg_reset_request_handler = pcie_mif_irq_reg_reset_request_handler;
pcie_if->irq_unreg_reset_request_handler = pcie_mif_irq_unreg_reset_request_handler;
pcie_if->get_mbox_ptr = pcie_mif_get_mbox_ptr;
pcie_if->get_mifram_ptr = pcie_mif_get_mifram_ptr;
pcie_if->get_mifram_ref = pcie_mif_get_mifram_ref;
pcie_if->get_mifram_pfn = pcie_mif_get_mif_pfn;
pcie_if->get_mif_device = pcie_mif_get_mif_device;
pcie_if->irq_clear = pcie_mif_irq_clear;
pcie_if->mif_dump_registers = pcie_mif_dump_register;
/* Suspend/resume not supported in PCIe MIF */
pcie_if->suspend_reg_handler = NULL;
pcie_if->suspend_unreg_handler = NULL;
/* Update state */
pcie->pdev = pdev;
pcie->dev = &pdev->dev;
pcie->r4_handler = pcie_mif_irq_default_handler;
pcie->irq_dev = NULL;
/* Just do whats is necessary to meet the pci probe
* -BAR0 stuff
* -Interrupt (will be able to handle interrupts?)
*/
/* My stuff */
pci_set_drvdata(pdev, pcie);
rc = pcim_enable_device(pdev);
if (rc) {
SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev,
"Error enabling device.\n");
return NULL;
}
/* This function returns the flags associated with this resource.*/
/* esource flags are used to define some features of the individual resource.
* For PCI resources associated with PCI I/O regions, the information is extracted from the base address registers */
/* IORESOURCE_MEM = If the associated I/O region exists, one and only one of these flags is set */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
SCSC_TAG_ERR(PCIE_MIF, "Incorrect BAR configuration\n");
return NULL;
}
/* old --- rc = pci_request_regions(pdev, "foo"); */
/* Request and iomap regions specified by @mask (0x01 ---> BAR0)*/
rc = pcim_iomap_regions(pdev, BIT(0), DRV_NAME);
if (rc) {
SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev,
"pcim_iomap_regions() failed. Aborting.\n");
return NULL;
}
pci_set_master(pdev);
/* Access iomap allocation table */
/* return __iomem * const * */
pcie->registers = pcim_iomap_table(pdev)[0];
/* Set up a single MSI interrupt */
if (pci_enable_msi(pdev)) {
SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev,
"Failed to enable MSI interrupts. Aborting.\n");
return NULL;
}
rc = devm_request_irq(&pdev->dev, pdev->irq, pcie_mif_isr, 0,
DRV_NAME, pcie);
if (rc) {
SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev,
"Failed to register MSI handler. Aborting.\n");
return NULL;
}
/* if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
* SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "DMA mask 64bits.\n");
* pcie->dma_using_dac = 1; */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "DMA mask 32bits.\n");
pcie->dma_using_dac = 0;
} else {
SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Failed to set DMA mask. Aborting.\n");
return NULL;
}
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
/* Make sure Mx is in the reset state */
pcie_mif_reset(pcie_if, true);
/* Create debug proc entry */
pcie_create_proc_dir(pcie);
return pcie_if;
}
void pcie_mif_destroy_pcie(struct pci_dev *pdev, struct scsc_mif_abs *interface)
{
/* Create debug proc entry */
pcie_remove_proc_dir();
pci_disable_device(pdev);
}
struct pci_dev *pcie_mif_get_pci_dev(struct scsc_mif_abs *interface)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
BUG_ON(!interface || !pcie);
return pcie->pdev;
}
struct device *pcie_mif_get_dev(struct scsc_mif_abs *interface)
{
struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
BUG_ON(!interface || !pcie);
return pcie->dev;
}
/* Funtions for proc entry */
int pcie_mif_set_bar0_register(struct pcie_mif *pcie, unsigned int value, unsigned int offset)
{
iowrite32(value, pcie->registers + offset);
mmiowb();
return 0;
}
void pcie_mif_get_bar0(struct pcie_mif *pcie, struct scsc_bar0_reg *bar0)
{
bar0->NEWMSG = ioread32(pcie->registers + SCSC_PCIE_NEWMSG);
bar0->SIGNATURE = ioread32(pcie->registers + SCSC_PCIE_SIGNATURE);
bar0->OFFSET = ioread32(pcie->registers + SCSC_PCIE_OFFSET);
bar0->RUNEN = ioread32(pcie->registers + SCSC_PCIE_RUNEN);
bar0->DEBUG = ioread32(pcie->registers + SCSC_PCIE_DEBUG);
bar0->AXIWCNT = ioread32(pcie->registers + SCSC_PCIE_AXIWCNT);
bar0->AXIRCNT = ioread32(pcie->registers + SCSC_PCIE_AXIRCNT);
bar0->AXIWADDR = ioread32(pcie->registers + SCSC_PCIE_AXIWADDR);
bar0->AXIRADDR = ioread32(pcie->registers + SCSC_PCIE_AXIRADDR);
bar0->TBD = ioread32(pcie->registers + SCSC_PCIE_TBD);
bar0->AXICTRL = ioread32(pcie->registers + SCSC_PCIE_AXICTRL);
bar0->AXIDATA = ioread32(pcie->registers + SCSC_PCIE_AXIDATA);
bar0->AXIRDBP = ioread32(pcie->registers + SCSC_PCIE_AXIRDBP);
bar0->IFAXIWCNT = ioread32(pcie->registers + SCSC_PCIE_IFAXIWCNT);
bar0->IFAXIRCNT = ioread32(pcie->registers + SCSC_PCIE_IFAXIRCNT);
bar0->IFAXIWADDR = ioread32(pcie->registers + SCSC_PCIE_IFAXIWADDR);
bar0->IFAXIRADDR = ioread32(pcie->registers + SCSC_PCIE_IFAXIRADDR);
bar0->IFAXICTRL = ioread32(pcie->registers + SCSC_PCIE_IFAXICTRL);
bar0->GRST = ioread32(pcie->registers + SCSC_PCIE_GRST);
bar0->AMBA2TRANSAXIWCNT = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIWCNT);
bar0->AMBA2TRANSAXIRCNT = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIRCNT);
bar0->AMBA2TRANSAXIWADDR = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIWADDR);
bar0->AMBA2TRANSAXIRADDR = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIRADDR);
bar0->AMBA2TRANSAXICTR = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXICTR);
bar0->TRANS2PCIEREADALIGNAXIWCNT = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIWCNT);
bar0->TRANS2PCIEREADALIGNAXIRCNT = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIRCNT);
bar0->TRANS2PCIEREADALIGNAXIWADDR = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIWADDR);
bar0->TRANS2PCIEREADALIGNAXIRADDR = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIRADDR);
bar0->TRANS2PCIEREADALIGNAXICTRL = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXICTRL);
bar0->READROUNDTRIPMIN = ioread32(pcie->registers + SCSC_PCIE_READROUNDTRIPMIN);
bar0->READROUNDTRIPMAX = ioread32(pcie->registers + SCSC_PCIE_READROUNDTRIPMAX);
bar0->READROUNDTRIPLAST = ioread32(pcie->registers + SCSC_PCIE_READROUNDTRIPLAST);
bar0->CPTAW0 = ioread32(pcie->registers + SCSC_PCIE_CPTAW0);
bar0->CPTAW1 = ioread32(pcie->registers + SCSC_PCIE_CPTAW1);
bar0->CPTAR0 = ioread32(pcie->registers + SCSC_PCIE_CPTAR0);
bar0->CPTAR1 = ioread32(pcie->registers + SCSC_PCIE_CPTAR1);
bar0->CPTB0 = ioread32(pcie->registers + SCSC_PCIE_CPTB0);
bar0->CPTW0 = ioread32(pcie->registers + SCSC_PCIE_CPTW0);
bar0->CPTW1 = ioread32(pcie->registers + SCSC_PCIE_CPTW1);
bar0->CPTW2 = ioread32(pcie->registers + SCSC_PCIE_CPTW2);
bar0->CPTR0 = ioread32(pcie->registers + SCSC_PCIE_CPTR0);
bar0->CPTR1 = ioread32(pcie->registers + SCSC_PCIE_CPTR1);
bar0->CPTR2 = ioread32(pcie->registers + SCSC_PCIE_CPTR2);
bar0->CPTRES = ioread32(pcie->registers + SCSC_PCIE_CPTRES);
bar0->CPTAWDELAY = ioread32(pcie->registers + SCSC_PCIE_CPTAWDELAY);
bar0->CPTARDELAY = ioread32(pcie->registers + SCSC_PCIE_CPTARDELAY);
bar0->CPTSRTADDR = ioread32(pcie->registers + SCSC_PCIE_CPTSRTADDR);
bar0->CPTENDADDR = ioread32(pcie->registers + SCSC_PCIE_CPTENDADDR);
bar0->CPTSZLTHID = ioread32(pcie->registers + SCSC_PCIE_CPTSZLTHID);
bar0->CPTPHSEL = ioread32(pcie->registers + SCSC_PCIE_CPTPHSEL);
bar0->CPTRUN = ioread32(pcie->registers + SCSC_PCIE_CPTRUN);
bar0->FPGAVER = ioread32(pcie->registers + SCSC_PCIE_FPGAVER);
}

View file

@ -0,0 +1,141 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __PCIE_MIF_H
#define __PCIE_MIF_H
#include <linux/pci.h>
#include "scsc_mif_abs.h"
#ifdef CONDOR
#define FPGA_OFFSET 0xb8000000
#else
#define FPGA_OFFSET 0x80000000
#endif
#define SCSC_PCIE_MAGIC_VAL 0xdeadbeef
#define SCSC_PCIE_GRST_OFFSET 0x48
/* BAR0 Registers */
#define SCSC_PCIE_NEWMSG 0x0
#define SCSC_PCIE_SIGNATURE 0x4
#define SCSC_PCIE_OFFSET 0x8
#define SCSC_PCIE_RUNEN 0xC
#define SCSC_PCIE_DEBUG 0x10
#define SCSC_PCIE_AXIWCNT 0x14
#define SCSC_PCIE_AXIRCNT 0x18
#define SCSC_PCIE_AXIWADDR 0x1C
#define SCSC_PCIE_AXIRADDR 0x20
#define SCSC_PCIE_TBD 0x24
#define SCSC_PCIE_AXICTRL 0x28
#define SCSC_PCIE_AXIDATA 0x2C
#define SCSC_PCIE_AXIRDBP 0x30
#define SCSC_PCIE_IFAXIWCNT 0x34
#define SCSC_PCIE_IFAXIRCNT 0x38
#define SCSC_PCIE_IFAXIWADDR 0x3C
#define SCSC_PCIE_IFAXIRADDR 0x40
#define SCSC_PCIE_IFAXICTRL 0x44
#define SCSC_PCIE_GRST 0x48
#define SCSC_PCIE_AMBA2TRANSAXIWCNT 0x4C
#define SCSC_PCIE_AMBA2TRANSAXIRCNT 0x50
#define SCSC_PCIE_AMBA2TRANSAXIWADDR 0x54
#define SCSC_PCIE_AMBA2TRANSAXIRADDR 0x58
#define SCSC_PCIE_AMBA2TRANSAXICTR 0x5C
#define SCSC_PCIE_TRANS2PCIEREADALIGNAXIWCNT 0x60
#define SCSC_PCIE_TRANS2PCIEREADALIGNAXIRCNT 0x64
#define SCSC_PCIE_TRANS2PCIEREADALIGNAXIWADDR 0x68
#define SCSC_PCIE_TRANS2PCIEREADALIGNAXIRADDR 0x6C
#define SCSC_PCIE_TRANS2PCIEREADALIGNAXICTRL 0x70
#define SCSC_PCIE_READROUNDTRIPMIN 0x74
#define SCSC_PCIE_READROUNDTRIPMAX 0x78
#define SCSC_PCIE_READROUNDTRIPLAST 0x7C
#define SCSC_PCIE_CPTAW0 0x80
#define SCSC_PCIE_CPTAW1 0x84
#define SCSC_PCIE_CPTAR0 0x88
#define SCSC_PCIE_CPTAR1 0x8C
#define SCSC_PCIE_CPTB0 0x90
#define SCSC_PCIE_CPTW0 0x94
#define SCSC_PCIE_CPTW1 0x98
#define SCSC_PCIE_CPTW2 0x9C
#define SCSC_PCIE_CPTR0 0xA0
#define SCSC_PCIE_CPTR1 0xA4
#define SCSC_PCIE_CPTR2 0xA8
#define SCSC_PCIE_CPTRES 0xAC
#define SCSC_PCIE_CPTAWDELAY 0xB0
#define SCSC_PCIE_CPTARDELAY 0xB4
#define SCSC_PCIE_CPTSRTADDR 0xB8
#define SCSC_PCIE_CPTENDADDR 0xBC
#define SCSC_PCIE_CPTSZLTHID 0xC0
#define SCSC_PCIE_CPTPHSEL 0xC4
#define SCSC_PCIE_CPTRUN 0xC8
#define SCSC_PCIE_FPGAVER 0xCC
struct scsc_bar0_reg {
u32 NEWMSG;
u32 SIGNATURE;
u32 OFFSET;
u32 RUNEN;
u32 DEBUG;
u32 AXIWCNT;
u32 AXIRCNT;
u32 AXIWADDR;
u32 AXIRADDR;
u32 TBD;
u32 AXICTRL;
u32 AXIDATA;
u32 AXIRDBP;
u32 IFAXIWCNT;
u32 IFAXIRCNT;
u32 IFAXIWADDR;
u32 IFAXIRADDR;
u32 IFAXICTRL;
u32 GRST;
u32 AMBA2TRANSAXIWCNT;
u32 AMBA2TRANSAXIRCNT;
u32 AMBA2TRANSAXIWADDR;
u32 AMBA2TRANSAXIRADDR;
u32 AMBA2TRANSAXICTR;
u32 TRANS2PCIEREADALIGNAXIWCNT;
u32 TRANS2PCIEREADALIGNAXIRCNT;
u32 TRANS2PCIEREADALIGNAXIWADDR;
u32 TRANS2PCIEREADALIGNAXIRADDR;
u32 TRANS2PCIEREADALIGNAXICTRL;
u32 READROUNDTRIPMIN;
u32 READROUNDTRIPMAX;
u32 READROUNDTRIPLAST;
u32 CPTAW0;
u32 CPTAW1;
u32 CPTAR0;
u32 CPTAR1;
u32 CPTB0;
u32 CPTW0;
u32 CPTW1;
u32 CPTW2;
u32 CPTR0;
u32 CPTR1;
u32 CPTR2;
u32 CPTRES;
u32 CPTAWDELAY;
u32 CPTARDELAY;
u32 CPTSRTADDR;
u32 CPTENDADDR;
u32 CPTSZLTHID;
u32 CPTPHSEL;
u32 CPTRUN;
u32 FPGAVER;
};
struct scsc_mif_abs *pcie_mif_create(struct pci_dev *pdev, const struct pci_device_id *id);
void pcie_mif_destroy_pcie(struct pci_dev *pdev, struct scsc_mif_abs *interface);
struct pci_dev *pcie_mif_get_pci_dev(struct scsc_mif_abs *interface);
struct device *pcie_mif_get_dev(struct scsc_mif_abs *interface);
struct pcie_mif;
void pcie_mif_get_bar0(struct pcie_mif *pcie, struct scsc_bar0_reg *bar0);
int pcie_mif_set_bar0_register(struct pcie_mif *pcie, unsigned int value, unsigned int offset);
#endif

View file

@ -0,0 +1,190 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <scsc/scsc_logring.h>
#include "pcie_mif_module.h"
#include "pcie_mif.h"
/* Implements */
#include "scsc_mif_abs.h"
struct mif_abs_node {
struct list_head list;
struct scsc_mif_abs *mif_abs;
};
struct mif_driver_node {
struct list_head list;
struct scsc_mif_abs_driver *driver; /* list of drivers (in practice just the core_module) */
};
struct mif_mmap_node {
struct list_head list;
struct scsc_mif_mmap_driver *driver; /* list of drivers (in practive just the core_module) */
};
static struct pcie_mif_module {
struct list_head mif_abs_list;
struct list_head mif_driver_list;
struct list_head mif_mmap_list;
} mif_module = {
.mif_abs_list = LIST_HEAD_INIT(mif_module.mif_abs_list),
.mif_driver_list = LIST_HEAD_INIT(mif_module.mif_driver_list),
.mif_mmap_list = LIST_HEAD_INIT(mif_module.mif_mmap_list),
};
static const struct pci_device_id pcie_mif_module_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_SAMSUNG_SCSC) },
{ /*End: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, pcie_mif_module_tbl);
static void pcie_mif_module_probe_registered_clients(struct scsc_mif_abs *mif_abs)
{
struct mif_driver_node *mif_driver_node, *next;
struct device *dev;
bool driver_registered = false;
/* Traverse Linked List for each mif_driver node */
list_for_each_entry_safe(mif_driver_node, next, &mif_module.mif_driver_list, list) {
SCSC_TAG_INFO(PCIE_MIF, "node %p\n", mif_driver_node);
dev = pcie_mif_get_dev(mif_abs);
mif_driver_node->driver->probe(mif_driver_node->driver, mif_abs);
driver_registered = true;
}
if (driver_registered == false)
SCSC_TAG_INFO(PCIE_MIF, "No mif drivers registered\n");
}
static int pcie_mif_module_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mif_abs_node *mif_node;
struct scsc_mif_abs *mif_abs;
mif_node = kzalloc(sizeof(*mif_node), GFP_ATOMIC);
if (!mif_node)
return -ENODEV;
mif_abs = pcie_mif_create(pdev, id);
if (!mif_abs) {
SCSC_TAG_INFO(PCIE_MIF, "Error creating PCIe interface\n");
kfree(mif_node);
return -ENODEV;
}
/* Add node */
mif_node->mif_abs = mif_abs;
SCSC_TAG_INFO(PCIE_MIF, "mif_node A %p\n", mif_node);
list_add_tail(&mif_node->list, &mif_module.mif_abs_list);
pcie_mif_module_probe_registered_clients(mif_abs);
return 0;
}
static void pcie_mif_module_remove(struct pci_dev *pdev)
{
struct mif_abs_node *mif_node, *next;
bool match = false;
/* Remove node */
list_for_each_entry_safe(mif_node, next, &mif_module.mif_abs_list, list) {
if (pcie_mif_get_pci_dev(mif_node->mif_abs) == pdev) {
match = true;
SCSC_TAG_INFO(PCIE_MIF, "Match, destroy pcie_mif\n");
pcie_mif_destroy_pcie(pdev, mif_node->mif_abs);
list_del(&mif_node->list);
kfree(mif_node);
}
}
if (match == false)
SCSC_TAG_INFO(PCIE_MIF, "FATAL, no match for given scsc_mif_abs\n");
}
static struct pci_driver scsc_pcie = {
.name = DRV_NAME,
.id_table = pcie_mif_module_tbl,
.probe = pcie_mif_module_probe,
.remove = pcie_mif_module_remove,
};
void scsc_mif_abs_register(struct scsc_mif_abs_driver *driver)
{
struct mif_driver_node *mif_driver_node;
struct mif_abs_node *mif_node;
struct device *dev;
/* Add node in driver linked list */
mif_driver_node = kzalloc(sizeof(*mif_driver_node), GFP_ATOMIC);
if (!mif_driver_node)
return;
mif_driver_node->driver = driver;
list_add_tail(&mif_driver_node->list, &mif_module.mif_driver_list);
/* Traverse Linked List for each mif_abs node */
list_for_each_entry(mif_node, &mif_module.mif_abs_list, list) {
dev = pcie_mif_get_dev(mif_node->mif_abs);
driver->probe(driver, mif_node->mif_abs);
}
}
EXPORT_SYMBOL(scsc_mif_abs_register);
void scsc_mif_abs_unregister(struct scsc_mif_abs_driver *driver)
{
struct mif_driver_node *mif_driver_node, *next;
/* Traverse Linked List for each mif_driver node */
list_for_each_entry_safe(mif_driver_node, next, &mif_module.mif_driver_list, list) {
if (mif_driver_node->driver == driver) {
list_del(&mif_driver_node->list);
kfree(mif_driver_node);
}
}
}
EXPORT_SYMBOL(scsc_mif_abs_unregister);
/* Register a mmap - debug driver - for this specific transport*/
void scsc_mif_mmap_register(struct scsc_mif_mmap_driver *mmap_driver)
{
struct mif_mmap_node *mif_mmap_node;
struct mif_abs_node *mif_node;
/* Add node in driver linked list */
mif_mmap_node = kzalloc(sizeof(*mif_mmap_node), GFP_ATOMIC);
if (!mif_mmap_node)
return;
mif_mmap_node->driver = mmap_driver;
list_add_tail(&mif_mmap_node->list, &mif_module.mif_mmap_list);
/* Traverse Linked List for each mif_abs node */
list_for_each_entry(mif_node, &mif_module.mif_abs_list, list) {
mmap_driver->probe(mmap_driver, mif_node->mif_abs);
}
}
EXPORT_SYMBOL(scsc_mif_mmap_register);
/* Unregister a mmap - debug driver - for this specific transport*/
void scsc_mif_mmap_unregister(struct scsc_mif_mmap_driver *mmap_driver)
{
struct mif_mmap_node *mif_mmap_node, *next;
/* Traverse Linked List for each mif_mmap_driver node */
list_for_each_entry_safe(mif_mmap_node, next, &mif_module.mif_mmap_list, list) {
if (mif_mmap_node->driver == mmap_driver) {
list_del(&mif_mmap_node->list);
kfree(mif_mmap_node);
}
}
}
EXPORT_SYMBOL(scsc_mif_mmap_unregister);
module_pci_driver(scsc_pcie);
MODULE_DESCRIPTION("SLSI PCIe mx140 MIF abstraction");
MODULE_AUTHOR("SLSI");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,27 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#define PCI_DEVICE_ID_SAMSUNG_SCSC 0x7011
#define DRV_NAME "scscPCIe"
/* Max amount of memory allocated by dma_alloc_coherent */
#define PCIE_MIF_PREALLOC_MEM (4 * 1024 * 1024)
/* Allocatable memory for upper layers */
/* This value should take into account PCIE_MIF_PREALLOC_MEM - mbox/register
* emulation - peterson mutex ex: */
/* -------------------- PCIE_MIF_PREALLOC_MEM
| scsc_mbox_s |
| --------------------
| peterson_m |
| --------------------
| /////////////// |
| ------------------- PCIE_MIF_ALLOC_MEM
| alloc memory |
| |
| |
| --------------------
*/
#define PCIE_MIF_ALLOC_MEM (3 * 1024 * 1024)

View file

@ -0,0 +1,278 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/uaccess.h>
#include <scsc/scsc_logring.h>
#include "pcie_proc.h"
#include "pcie_mif.h"
static struct proc_dir_entry *procfs_dir;
static bool pcie_val;
/* singleton */
struct pcie_mif *pcie_global;
static int pcie_procfs_open_file_generic(struct inode *inode, struct file *file)
{
file->private_data = PCIE_PDE_DATA(inode);
return 0;
}
PCIE_PROCFS_RW_FILE_OPS(pcie_trg);
PCIE_PROCFS_SEQ_FILE_OPS(pcie_dbg);
static ssize_t pcie_procfs_pcie_trg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[128];
int pos = 0;
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "%d\n", (pcie_val ? 1 : 0));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
#define ROW 52
#define COL 2
char *lookup_regs[ROW][COL] = {
{ "NEWMSG", "0" },
{ "SIGNATURE", "4" },
{ "OFFSET", "8" },
{ "RUNEN", "12" },
{ "DEBUG", "16" },
{ "AXIWCNT", "20" },
{ "AXIRCNT", "24" },
{ "AXIWADDR", "28" },
{ "AXIRADDR", "32" },
{ "TBD", "36" },
{ "AXICTRL", "40" },
{ "AXIDATA", "44" },
{ "AXIRDBP", "48" },
{ "IFAXIWCNT", "52" },
{ "IFAXIRCNT", "56" },
{ "IFAXIWADDR", "60" },
{ "IFAXIRADDR", "64" },
{ "IFAXICTRL", "68" },
{ "GRST", "72" },
{ "AMBA2TRANSAXIWCNT", "76" },
{ "AMBA2TRANSAXIRCNT", "80" },
{ "AMBA2TRANSAXIWADDR", "84" },
{ "AMBA2TRANSAXIRADDR", "88" },
{ "AMBA2TRANSAXICTR", "92" },
{ "TRANS2PCIEREADALIGNAXIWCNT", "96" },
{ "TRANS2PCIEREADALIGNAXIRCNT", "100" },
{ "TRANS2PCIEREADALIGNAXIWADDR", "104" },
{ "TRANS2PCIEREADALIGNAXIRADDR", "108" },
{ "TRANS2PCIEREADALIGNAXICTRL", "112" },
{ "READROUNDTRIPMIN", "116" },
{ "READROUNDTRIPMAX", "120" },
{ "READROUNDTRIPLAST", "124" },
{ "CPTAW0", "128" },
{ "CPTAW1", "132" },
{ "CPTAR0", "136" },
{ "CPTAR1", "140" },
{ "CPTB0", "144" },
{ "CPTW0", "148" },
{ "CPTW1", "152" },
{ "CPTW2", "156" },
{ "CPTR0", "160" },
{ "CPTR1", "164" },
{ "CPTR2", "168" },
{ "CPTRES", "172" },
{ "CPTAWDELAY", "176" },
{ "CPTARDELAY", "180" },
{ "CPTSRTADDR", "184" },
{ "CPTENDADDR", "188" },
{ "CPTSZLTHID", "192" },
{ "CPTPHSEL", "196" },
{ "CPTRUN", "200" },
{ "FPGAVER", "204" },
};
/* Trigger boot of Curator over SDIO without Chip Power Manager present */
static ssize_t pcie_procfs_pcie_trg_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[128];
char *sptr, *token;
unsigned int len = 0, pass = 0;
u32 value = 0;
int i = 0;
int rc;
int match = 0, offset = 0;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
sptr = buf;
while ((token = strsep(&sptr, " ")) != NULL) {
switch (pass) {
/* register */
case 0:
SCSC_TAG_INFO(PCIE_MIF, "str %s\n", lookup_regs[0][0]);
SCSC_TAG_INFO(PCIE_MIF, "token %s\n", token);
SCSC_TAG_INFO(PCIE_MIF, "len %d\n", len);
for (i = 0; i < ROW; i++)
if (!strncmp(lookup_regs[i][0], token, len)) {
rc = kstrtou32(lookup_regs[i][1], 0, &offset);
if (rc)
match = 0;
else
match = 1;
break;
}
if (!match) {
SCSC_TAG_INFO(PCIE_MIF, "Register %s not Found!!\n", token);
SCSC_TAG_INFO(PCIE_MIF, "Type 'cat /proc/driver/pcie_ctrl/pcie_dbg' to get register names\n");
}
break;
/* value */
case 1:
if ((token[0] == '0') && (token[1] == 'x')) {
if (kstrtou32(token, 16, &value)) {
SCSC_TAG_INFO(PCIE_MIF, "Incorrect format,,,address should start by 0x\n");
SCSC_TAG_INFO(PCIE_MIF, "Example: \"0xaaaabbbb 256 8\"\n");
goto error;
}
} else {
SCSC_TAG_INFO(PCIE_MIF, "Incorrect format,,,address should start by 0x\n");
SCSC_TAG_INFO(PCIE_MIF, "Example: \"0xaaaabbbb 256 8\"\n");
goto error;
}
break;
}
pass++;
}
if (pass != 2 && !match) {
SCSC_TAG_INFO(PCIE_MIF, "Wrong format: <register> <value (hex)>\n");
SCSC_TAG_INFO(PCIE_MIF, "Example: \"DEBUGADDR 0xaaaabbbb\"\n");
goto error;
}
SCSC_TAG_INFO(PCIE_MIF, "Setting value 0x%x to register %s offset %d\n", value, lookup_regs[i][0], offset);
pcie_mif_set_bar0_register(pcie_global, value, offset);
error:
return count;
}
static int pcie_procfs_pcie_dbg_show(struct seq_file *m, void *v)
{
struct scsc_bar0_reg bar0;
if (!pcie_global) {
seq_puts(m, "endpoint not registered");
return 0;
}
pcie_mif_get_bar0(pcie_global, &bar0);
seq_puts(m, "\n---------BAR0---------\n");
seq_printf(m, "NEWMSG 0x%08X\n", bar0.NEWMSG);
seq_printf(m, "SIGNATURE 0x%08X\n", bar0.SIGNATURE);
seq_printf(m, "OFFSET 0x%08X\n", bar0.OFFSET);
seq_printf(m, "RUNEN 0x%08X\n", bar0.RUNEN);
seq_printf(m, "DEBUG 0x%08X\n", bar0.DEBUG);
seq_printf(m, "AXIWCNT 0x%08X\n", bar0.AXIWCNT);
seq_printf(m, "AXIRCNT 0x%08X\n", bar0.AXIRCNT);
seq_printf(m, "AXIWADDR 0x%08X\n", bar0.AXIWADDR);
seq_printf(m, "AXIRADDR 0x%08X\n", bar0.AXIRADDR);
seq_printf(m, "TBD 0x%08X\n", bar0.TBD);
seq_printf(m, "AXICTRL 0x%08X\n", bar0.AXICTRL);
seq_printf(m, "AXIDATA 0x%08X\n", bar0.AXIDATA);
seq_printf(m, "AXIRDBP 0x%08X\n", bar0.AXIRDBP);
seq_printf(m, "IFAXIWCNT 0x%08X\n", bar0.IFAXIWCNT);
seq_printf(m, "IFAXIRCNT 0x%08X\n", bar0.IFAXIRCNT);
seq_printf(m, "IFAXIWADDR 0x%08X\n", bar0.IFAXIWADDR);
seq_printf(m, "IFAXIRADDR 0x%08X\n", bar0.IFAXIRADDR);
seq_printf(m, "IFAXICTRL 0x%08X\n", bar0.IFAXICTRL);
seq_printf(m, "GRST 0x%08X\n", bar0.GRST);
seq_printf(m, "AMBA2TRANSAXIWCNT 0x%08X\n", bar0.AMBA2TRANSAXIWCNT);
seq_printf(m, "AMBA2TRANSAXIRCNT 0x%08X\n", bar0.AMBA2TRANSAXIRCNT);
seq_printf(m, "AMBA2TRANSAXIWADDR 0x%08X\n", bar0.AMBA2TRANSAXIWADDR);
seq_printf(m, "AMBA2TRANSAXIRADDR 0x%08X\n", bar0.AMBA2TRANSAXIRADDR);
seq_printf(m, "AMBA2TRANSAXICTR 0x%08X\n", bar0.AMBA2TRANSAXICTR);
seq_printf(m, "TRANS2PCIEREADALIGNAXIWCNT 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXIWCNT);
seq_printf(m, "TRANS2PCIEREADALIGNAXIRCNT 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXIRCNT);
seq_printf(m, "TRANS2PCIEREADALIGNAXIWADDR 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXIWADDR);
seq_printf(m, "TRANS2PCIEREADALIGNAXIRADDR 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXIRADDR);
seq_printf(m, "TRANS2PCIEREADALIGNAXICTRL 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXICTRL);
seq_printf(m, "READROUNDTRIPMIN 0x%08X\n", bar0.READROUNDTRIPMIN);
seq_printf(m, "READROUNDTRIPMAX 0x%08X\n", bar0.READROUNDTRIPMAX);
seq_printf(m, "READROUNDTRIPLAST 0x%08X\n", bar0.READROUNDTRIPLAST);
seq_printf(m, "CPTAW0 0x%08X\n", bar0.CPTAW0);
seq_printf(m, "CPTAW1 0x%08X\n", bar0.CPTAW1);
seq_printf(m, "CPTAR0 0x%08X\n", bar0.CPTAR0);
seq_printf(m, "CPTAR1 0x%08X\n", bar0.CPTAR1);
seq_printf(m, "CPTB0 0x%08X\n", bar0.CPTB0);
seq_printf(m, "CPTW0 0x%08X\n", bar0.CPTW0);
seq_printf(m, "CPTW1 0x%08X\n", bar0.CPTW1);
seq_printf(m, "CPTW2 0x%08X\n", bar0.CPTW2);
seq_printf(m, "CPTR0 0x%08X\n", bar0.CPTR0);
seq_printf(m, "CPTR1 0x%08X\n", bar0.CPTR1);
seq_printf(m, "CPTR2 0x%08X\n", bar0.CPTR2);
seq_printf(m, "CPTRES 0x%08X\n", bar0.CPTRES);
seq_printf(m, "CPTAWDELAY 0x%08X\n", bar0.CPTAWDELAY);
seq_printf(m, "CPTARDELAY 0x%08X\n", bar0.CPTARDELAY);
seq_printf(m, "CPTSRTADDR 0x%08X\n", bar0.CPTSRTADDR);
seq_printf(m, "CPTENDADDR 0x%08X\n", bar0.CPTENDADDR);
seq_printf(m, "CPTSZLTHID 0x%08X\n", bar0.CPTSZLTHID);
seq_printf(m, "CPTPHSEL 0x%08X\n", bar0.CPTPHSEL);
seq_printf(m, "CPTRUN 0x%08X\n", bar0.CPTRUN);
seq_printf(m, "FPGAVER 0x%08X\n", bar0.FPGAVER);
return 0;
}
static const char *procdir = "driver/pcie_ctrl";
#define PCIE_DIRLEN 128
int pcie_create_proc_dir(struct pcie_mif *pcie)
{
char dir[PCIE_DIRLEN];
struct proc_dir_entry *parent;
(void)snprintf(dir, sizeof(dir), "%s", procdir);
parent = proc_mkdir(dir, NULL);
if (parent) {
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
parent->data = NULL;
#endif
procfs_dir = parent;
PCIE_PROCFS_ADD_FILE(NULL, pcie_trg, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
PCIE_PROCFS_SEQ_ADD_FILE(NULL, pcie_dbg, parent, S_IRUSR | S_IRGRP | S_IROTH);
} else {
SCSC_TAG_INFO(PCIE_MIF, "failed to create /proc dir\n");
return -EINVAL;
}
pcie_global = pcie;
return 0;
err:
return -EINVAL;
}
void pcie_remove_proc_dir(void)
{
if (procfs_dir) {
char dir[PCIE_DIRLEN];
PCIE_PROCFS_REMOVE_FILE(pcie_trg, procfs_dir);
PCIE_PROCFS_REMOVE_FILE(pcie_dbg, procfs_dir);
(void)snprintf(dir, sizeof(dir), "%s", procdir);
remove_proc_entry(dir, NULL);
procfs_dir = NULL;
}
pcie_global = NULL;
}

View file

@ -0,0 +1,102 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
/*
* Chip Manager /proc interface
*/
#include <linux/proc_fs.h>
#include <linux/version.h>
#include <linux/seq_file.h>
#include "pcie_mif.h"
#ifndef SCSC_PCIE_PROC_H
#define SCSC_PCIE_PROC_H
#ifndef AID_WIFI
#define AID_WIFI 0444
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define PCIE_PDE_DATA(inode) PDE_DATA(inode)
#else
#define PCIE_PDE_DATA(inode) (PDE(inode)->data)
#endif
#define PCIE_PROCFS_SEQ_FILE_OPS(name) \
static int pcie_procfs_ ## name ## _show(struct seq_file *m, void *v); \
static int pcie_procfs_ ## name ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, pcie_procfs_ ## name ## _show, PCIE_PDE_DATA(inode)); \
} \
static const struct file_operations pcie_procfs_ ## name ## _fops = { \
.open = pcie_procfs_ ## name ## _open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
}
#define PCIE_PROCFS_SEQ_ADD_FILE(_sdev, name, parent, mode) \
do { \
struct proc_dir_entry *entry; \
entry = proc_create_data(# name, mode, parent, &pcie_procfs_ ## name ## _fops, _sdev); \
if (!entry) { \
goto err; \
} \
PCIE_PROCFS_SET_UID_GID(entry); \
} while (0)
#define PCIE_PROCFS_RW_FILE_OPS(name) \
static ssize_t pcie_procfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
static ssize_t pcie_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
static const struct file_operations pcie_procfs_ ## name ## _fops = { \
.read = pcie_procfs_ ## name ## _read, \
.write = pcie_procfs_ ## name ## _write, \
.open = pcie_procfs_open_file_generic, \
.llseek = generic_file_llseek \
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define PCIE_PROCFS_SET_UID_GID(_entry) \
do { \
kuid_t proc_kuid = KUIDT_INIT(AID_WIFI); \
kgid_t proc_kgid = KGIDT_INIT(AID_WIFI); \
proc_set_user(_entry, proc_kuid, proc_kgid); \
} while (0)
#else
#define PCIE_PROCFS_SET_UID_GID(entry) \
do { \
(entry)->uid = AID_WIFI; \
(entry)->gid = AID_WIFI; \
} while (0)
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
#define PCIE_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
do { \
struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &pcie_procfs_ ## name ## _fops, _sdev); \
PCIE_PROCFS_SET_UID_GID(entry); \
} while (0)
#else
#define PCIE_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
do { \
struct proc_dir_entry *entry; \
entry = create_proc_entry(# name, mode, parent); \
if (entry) { \
entry->proc_fops = &pcie_procfs_ ## name ## _fops; \
entry->data = _sdev; \
PCIE_PROCFS_SET_UID_GID(entry); \
} \
} while (0)
#endif
#define PCIE_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
int pcie_create_proc_dir(struct pcie_mif *pcie);
void pcie_remove_proc_dir(void);
#endif /* SCSC_PCIE_PROC_H */

View file

@ -0,0 +1,50 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __SCSC_PETERSON_H
#define __SCSC_PETERSON_H
#include <linux/delay.h>
#include "mif_reg.h"
#define R4_PROCESS 0
#define AP_PROCESS 1
#define DELAY_NS 100 /* delay in ns*/
static inline void peterson_mutex_init(struct peterson_mutex *p_mutex)
{
if (!p_mutex) {
pr_info("Mutex not declared\n");
return;
}
p_mutex->flag[0] = false;
p_mutex->flag[1] = false;
p_mutex->turn = 0;
}
static inline void peterson_mutex_lock(struct peterson_mutex *p_mutex, unsigned int process)
{
unsigned int other = 1 - process;
p_mutex->flag[process] = true;
/* write barrier */
smp_wmb();
p_mutex->turn = other;
/* write barrier */
smp_wmb();
while ((p_mutex->flag[other]) && (p_mutex->turn == other))
ndelay(DELAY_NS);
}
static inline void peterson_mutex_unlock(struct peterson_mutex *p_mutex, unsigned int process)
{
p_mutex->flag[process] = false;
/* write barrier */
smp_wmb();
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,24 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __PLATFORM_MIF_H
#define __PLATFORM_MIF_H
#include "scsc_mif_abs.h"
#define PLATFORM_MIF_MBOX 0
#define PLATFORM_MIF_ALIVE 1
#define PLATFORM_MIF_WDOG 2
struct platform_device;
struct scsc_mif_abs *platform_mif_create(struct platform_device *pdev);
void platform_mif_destroy_platform(struct platform_device *pdev, struct scsc_mif_abs *interface);
struct platform_device *platform_mif_get_platform_dev(struct scsc_mif_abs *interface);
struct device *platform_mif_get_dev(struct scsc_mif_abs *interface);
int platform_mif_suspend(struct scsc_mif_abs *interface);
void platform_mif_resume(struct scsc_mif_abs *interface);
#endif

View file

@ -0,0 +1,252 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <scsc/scsc_logring.h>
#include "platform_mif_module.h"
#include "platform_mif.h"
/* Implements */
#include "scsc_mif_abs.h"
/* Variables */
struct mif_abs_node {
struct list_head list;
struct scsc_mif_abs *mif_abs;
};
struct mif_driver_node {
struct list_head list;
struct scsc_mif_abs_driver *driver;
};
struct mif_mmap_node {
struct list_head list;
struct scsc_mif_mmap_driver *driver;
};
static struct platform_mif_module {
struct list_head mif_abs_list;
struct list_head mif_driver_list;
struct list_head mif_mmap_list;
} mif_module = {
.mif_abs_list = LIST_HEAD_INIT(mif_module.mif_abs_list),
.mif_driver_list = LIST_HEAD_INIT(mif_module.mif_driver_list),
.mif_mmap_list = LIST_HEAD_INIT(mif_module.mif_mmap_list),
};
/* Private Functions */
static void platform_mif_module_probe_registered_clients(struct scsc_mif_abs *mif_abs)
{
struct mif_driver_node *mif_driver_node, *next;
bool driver_registered = false;
/* Traverse Linked List for each mif_driver node */
list_for_each_entry_safe(mif_driver_node, next, &mif_module.mif_driver_list, list) {
mif_driver_node->driver->probe(mif_driver_node->driver, mif_abs);
driver_registered = true;
}
}
static int platform_mif_module_probe(struct platform_device *pdev)
{
struct mif_abs_node *mif_node;
struct scsc_mif_abs *mif_abs;
/* TODO: ADD EARLY BOARD INITIALIZATIONS IF REQUIRED */
/* platform_mif_init(); */
mif_node = kzalloc(sizeof(*mif_node), GFP_ATOMIC);
if (!mif_node)
return -ENODEV;
mif_abs = platform_mif_create(pdev);
if (!mif_abs) {
SCSC_TAG_ERR(PLAT_MIF, "Error creating platform interface\n");
kfree(mif_node);
return -ENODEV;
}
/* Add node */
mif_node->mif_abs = mif_abs;
list_add_tail(&mif_node->list, &mif_module.mif_abs_list);
platform_mif_module_probe_registered_clients(mif_abs);
return 0;
}
static int platform_mif_module_remove(struct platform_device *pdev)
{
struct mif_abs_node *mif_node, *next;
bool match = false;
/* Remove node */
list_for_each_entry_safe(mif_node, next, &mif_module.mif_abs_list, list) {
if (platform_mif_get_platform_dev(mif_node->mif_abs) == pdev) {
match = true;
platform_mif_destroy_platform(pdev, mif_node->mif_abs);
list_del(&mif_node->list);
kfree(mif_node);
}
}
if (match == false)
SCSC_TAG_ERR(PLAT_MIF, "No match for given scsc_mif_abs\n");
return 0;
}
static int platform_mif_module_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mif_abs_node *mif_node, *next;
int r;
SCSC_TAG_INFO(PLAT_MIF, "\n");
/* Traverse mif_abs list for this platform_device to suspend */
list_for_each_entry_safe(mif_node, next, &mif_module.mif_abs_list, list) {
if (platform_mif_get_platform_dev(mif_node->mif_abs) == pdev) {
/* Signal suspend, client can refuse */
r = platform_mif_suspend(mif_node->mif_abs);
if (r) {
SCSC_TAG_INFO(PLAT_MIF, "%d\n", r);
return r;
}
}
}
return 0;
}
static int platform_mif_module_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mif_abs_node *mif_node, *next;
SCSC_TAG_INFO(PLAT_MIF, "\n");
/* Traverse mif_abs list for this platform_device to resume */
list_for_each_entry_safe(mif_node, next, &mif_module.mif_abs_list, list) {
if (platform_mif_get_platform_dev(mif_node->mif_abs) == pdev) {
/* Signal resume */
platform_mif_resume(mif_node->mif_abs);
}
}
return 0;
}
static const struct dev_pm_ops platform_mif_pm_ops = {
.suspend = platform_mif_module_suspend,
.resume = platform_mif_module_resume,
};
static const struct of_device_id scsc_wifibt[] = {
{ .compatible = "samsung,scsc_wifibt" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, scsc_wifibt);
static struct platform_driver platform_mif_driver = {
.probe = platform_mif_module_probe,
.remove = platform_mif_module_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.pm = &platform_mif_pm_ops,
.of_match_table = of_match_ptr(scsc_wifibt),
},
};
/* Choose when the driver should be probed */
#if 1
module_platform_driver(platform_mif_driver);
#else
static int platform_mif_init(void)
{
SCSC_TAG_INFO(PLAT_MIF, "register platform driver\n");
return platform_driver_register(&platform_mif_driver);
}
core_initcall(platform_mif_init);
#endif
/* Public Functions */
void scsc_mif_abs_register(struct scsc_mif_abs_driver *driver)
{
struct mif_driver_node *mif_driver_node;
struct mif_abs_node *mif_node;
/* Add node in driver linked list */
mif_driver_node = kzalloc(sizeof(*mif_driver_node), GFP_ATOMIC);
if (!mif_driver_node)
return;
mif_driver_node->driver = driver;
list_add_tail(&mif_driver_node->list, &mif_module.mif_driver_list);
/* Traverse Linked List for each mif_abs node */
list_for_each_entry(mif_node, &mif_module.mif_abs_list, list) {
driver->probe(driver, mif_node->mif_abs);
}
}
EXPORT_SYMBOL(scsc_mif_abs_register);
void scsc_mif_abs_unregister(struct scsc_mif_abs_driver *driver)
{
struct mif_driver_node *mif_driver_node, *next;
/* Traverse Linked List for each mif_driver node */
list_for_each_entry_safe(mif_driver_node, next, &mif_module.mif_driver_list, list) {
if (mif_driver_node->driver == driver) {
list_del(&mif_driver_node->list);
kfree(mif_driver_node);
}
}
}
EXPORT_SYMBOL(scsc_mif_abs_unregister);
/* Register a mmap - debug driver - for this specific transport*/
void scsc_mif_mmap_register(struct scsc_mif_mmap_driver *mmap_driver)
{
struct mif_mmap_node *mif_mmap_node;
struct mif_abs_node *mif_node;
/* Add node in driver linked list */
mif_mmap_node = kzalloc(sizeof(*mif_mmap_node), GFP_ATOMIC);
if (!mif_mmap_node)
return;
mif_mmap_node->driver = mmap_driver;
list_add_tail(&mif_mmap_node->list, &mif_module.mif_mmap_list);
/* Traverse Linked List for each mif_abs node */
list_for_each_entry(mif_node, &mif_module.mif_abs_list, list) {
mmap_driver->probe(mmap_driver, mif_node->mif_abs);
}
}
EXPORT_SYMBOL(scsc_mif_mmap_register);
/* Unregister a mmap - debug driver - for this specific transport*/
void scsc_mif_mmap_unregister(struct scsc_mif_mmap_driver *mmap_driver)
{
struct mif_mmap_node *mif_mmap_node, *next;
/* Traverse Linked List for each mif_driver node */
list_for_each_entry_safe(mif_mmap_node, next, &mif_module.mif_mmap_list, list) {
if (mif_mmap_node->driver == mmap_driver) {
list_del(&mif_mmap_node->list);
kfree(mif_mmap_node);
}
}
}
EXPORT_SYMBOL(scsc_mif_mmap_unregister);
MODULE_DESCRIPTION("SCSC Platform device Maxwell MIF abstraction");
MODULE_AUTHOR("SCSC");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,12 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __PLATFORM_MIF_MODULE_H
#define __PLATFORM_MIF_MODULE_H
#define DRV_NAME "scsc_wlbt"
#endif /* __PLATFORM_MIF_MODULE_H */

View file

@ -0,0 +1,49 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __SCSC_APP_MSG_H__
#define __SCSC_APP_MSG_H__
#define BCSP_CSTOPB_MASK 0x0001
#define BCSP_PARENB_MASK 0x0002
#define BCSP_PAREVEN_MASK 0x0004
#define BCSP_CRTSCTS_MASK 0x0008
enum {
SCSC_APP_MSG_TYPE_APP_STARTED_REPLY = 0,
SCSC_APP_MSG_TYPE_GET_DB,
SCSC_APP_MSG_TYPE_GET_DB_REPLY,
SCSC_APP_MSG_TYPE_LD_REGISTER_LOW_RATE,
SCSC_APP_MSG_TYPE_LD_REGISTER_HIGH_RATE,
SCSC_APP_MSG_TYPE_LD_REGISTER_REPLY,
SCSC_APP_MSG_TYPE_LD_UNREGISTER,
SCSC_APP_MSG_TYPE_LD_UNREGISTER_BREAK,
SCSC_APP_MSG_TYPE_LD_UNREGISTER_REPLY,
SCSC_APP_MSG_TYPE_APP_EXIT,
SCSC_APP_MSG_TYPE_APP_EXIT_REPLY,
SCSC_APP_MSG_TYPE_SET_FAST_RATE,
SCSC_APP_MSG_TYPE_SET_FAST_RATE_REPLY,
};
enum {
SCSC_APP_MSG_STATUS_OK = 0,
SCSC_APP_MSG_STATUS_FAILURE,
};
struct scsc_app_msg_req {
__u16 type;
};
struct scsc_app_msg_resp {
__u16 type;
__u16 status;
__u32 len;
__u8 data[0];
};
#endif /* __SCSC_APP_MSG_H__ */

View file

@ -0,0 +1,48 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#ifndef __SCSC_LOGRING_COMMON_H__
#define __SCSC_LOGRING_COMMON_H__
enum {
SCSC_MIN_DBG = 0,
SCSC_EMERG = SCSC_MIN_DBG,
SCSC_ALERT,
SCSC_CRIT,
SCSC_ERR,
SCSC_WARNING,
SCSC_NOTICE,
SCSC_INFO,
SCSC_DEBUG,
SCSC_DBG1 = SCSC_DEBUG, /* 7 */
SCSC_DBG2,
SCSC_DBG3,
SCSC_DBG4, /* 10 */
SCSC_FULL_DEBUG
};
#define SCSC_SOH 0x01
#define DEFAULT_DBGLEVEL SCSC_INFO /* KERN_INFO */
#define DEFAULT_DROPLEVEL SCSC_FULL_DEBUG /* DBG4 + 1 */
#define DEFAULT_ALL_DISABLED -1
#define DEFAULT_DROP_ALL 0
#define DEFAULT_REDIRECT_DROPLVL SCSC_DEBUG
#define DEFAULT_NO_REDIRECT 0
#define DEFAULT_TBUF_SZ 4096
/**
* Nested macros needed to force expansion of 'defval'
* before stringification takes place. Allows for ONE level
* of indirection specifying params.
*/
#define SCSC_MODPARAM_DESC(kparam, descr, eff, defval) \
__SCSC_MODPARAM_DESC(kparam, descr, eff, defval)
#define __SCSC_MODPARAM_DESC(kparam, descr, eff, defval) \
MODULE_PARM_DESC(kparam, " "descr " Effective @"eff " default=" # defval ".")
#endif /* __SCSC_LOGRING_COMMON_H__ */

View file

@ -0,0 +1,611 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#include "scsc_logring_main.h"
#include "scsc_logring_debugfs.h"
static int scsc_max_records_per_read = SCSC_DEFAULT_MAX_RECORDS_PER_READ;
module_param(scsc_max_records_per_read, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(scsc_max_records_per_read,
"Number of records a reader can try to get in a shot. 0 is infinite",
"run-time", SCSC_DEFAULT_MAX_RECORDS_PER_READ);
static int scsc_double_buffer_sz = DEFAULT_TBUF_SZ;
module_param(scsc_double_buffer_sz, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(scsc_double_buffer_sz,
"Determines the size of the per-reader allocted double buffer.",
"run-time", DEFAULT_TBUF_SZ);
/**
* BIG NOTE on DOUBLE BUFFERING.
*
* In order to extract data from the ring buffer, protected by spinlocks,
* to user space we use a double buffer: data is so finally copied to
* userspace from a temporary double buffer, after having copied into it
* ALL the desired content and after all the spinlocks have been released.
* In order to avoid use of an additional mutex to protect such temporary
* buffer from multiple readers access we use a oneshot throwaway buffer
* dedicated to each reader and allocated at opening time.
* The most straightforward way to do this thing would have been to simply
* allocate such buffer inside the read method and throw it away on exit:
* this is what underlying printk mechanism does via a simple kmalloc.
* BUT we decided INSTEAD to use this buffer ALSO as a sort of caching
* area for each reader in order to cope with under-sized user read-request;
* basically no matter what the user has asked in term of size of the read
* request we'll ALWAYS RETRIEVE multiple of whole records from the ring,
* one record being the minimum internal ring-read-request this way.
* So no matter if the user ask for a few bytes, less than the next record
* size, we'll retrieve ONE WHOLE record from the ring into the double buffer:
* this way on the next read request we'll have already a cached copy of the
* record and we could deal with it inside the read callback without the
* need to access the ring anymore for such record.
* The main reason for this is that if we had instead accessed the ring and
* retrieved ONLY a fraction of the record, on the next request we could NOT
* be able to provide the remaining part of the record because, being the ring
* an overwriting buffer, it could have wrap in the meantime and we could have
* simply lost that data: this condition would have lead us to return to
* user partial truncated records when we hit this overwrap condition.
* Following instead the approach of WHOLE records retrieval we can instead be
* sure to always retrieve fully correct records, despite being vulnerable
* anyway to loss of data (whole records) while reading if fast writers
* overwrite our data. (since we'll never ever want to slow down and starve a
* writer.)
*/
static struct dentry *scsc_debugfs_root;
static atomic_t scsc_debugfs_root_refcnt;
static char *global_fmt_string = "%s";
/**
* Generic open/close calls to use with every logring debugfs file.
* Any file in debugfs has an underlying associated ring buffer:
* opening ANY of these with O_TRUNC leads to ring_buffer truncated
* to zero len.
*/
static int debugfile_open(struct inode *ino, struct file *filp)
{
struct scsc_ibox *i = NULL;
if (!filp->private_data) {
i = kzalloc(sizeof(*i), GFP_KERNEL);
if (!i)
return -EFAULT;
i->rb = ino->i_private;
filp->private_data = i;
} else {
i = filp->private_data;
}
/* tbuf size is now runtime-configurable so we try a few fallback methods */
i->tbuf = kmalloc(scsc_double_buffer_sz, GFP_KERNEL);
/* Making sure we fallback to a safe size DEFAULT_TBUF_SZ */
if (!i->tbuf) {
i->tbuf = vmalloc(scsc_double_buffer_sz);
pr_err("LogRing: FAILED tbuf allocation of %d bytes...retried vmalloc()...\n",
scsc_double_buffer_sz);
if (!i->tbuf) {
scsc_double_buffer_sz = DEFAULT_TBUF_SZ;
pr_err("LogRing: FAILED tbuf vmalloc...using DEFAULT %d bytes size.\n",
scsc_double_buffer_sz);
i->tbuf = kmalloc(scsc_double_buffer_sz, GFP_KERNEL);
if (!i->tbuf) {
pr_err("LogRing: FAILED DEFINITELY allocation...aborting\n");
kfree(i);
return -ENOMEM;
}
} else {
i->tbuf_vm = true;
}
}
i->tsz = scsc_double_buffer_sz;
pr_info("LogRing: Allocated per-reader tbuf of %d bytes\n",
scsc_double_buffer_sz);
/* Truncate when attempting to write RO files samlog and samsg */
if (filp->f_flags & (O_WRONLY | O_RDWR) &&
filp->f_flags & O_TRUNC) {
unsigned long flags;
raw_spin_lock_irqsave(&i->rb->lock, flags);
scsc_ring_truncate(i->rb);
raw_spin_unlock_irqrestore(&i->rb->lock, flags);
pr_info("LogRing Truncated to zerolen\n");
return -EACCES;
}
return 0;
}
static int debugfile_release(struct inode *ino, struct file *filp)
{
struct scsc_ibox *i = NULL;
if (!filp->private_data)
return -EFAULT;
i = filp->private_data;
if (!i->tbuf_vm)
kfree(i->tbuf);
else
vfree(i->tbuf);
i->tbuf = NULL;
/* Being paranoid... */
filp->private_data = NULL;
kfree(i);
return 0;
}
/**
* Initialize references for subsequent cached reads: in fact if
* data retrieved from the ring was more than the count-bytes required by
* the caller of this read, we can keep such data stored in tbuf and provide
* it to this same reader on its next read-call.
*
* @i: contains references useful to this reader
* @retrieved_bytes: how many bytes have been stored in tbuf
* @count: a pointer to the count bytes required by this reader
* for this call. We'll manipulate this to return an
* appropriate number of bytes.
*/
static inline
size_t init_cached_read(struct scsc_ibox *i,
size_t retrieved_bytes, size_t *count)
{
if (retrieved_bytes <= *count) {
*count = retrieved_bytes;
} else {
i->t_off = *count;
i->t_used = retrieved_bytes - *count;
i->cached_reads += *count;
}
return 0;
}
/**
* Here we'll serve to user space the next available chunk of
* record directly from the tbuf double buffer without
* accessing the ring anymore.
*
* @i: contains references useful to this reader
* @count: a pointer to the count bytes required by this reader
* for this call. We'll manipulate this to return an
* appropriate number of bytes.
*/
static inline
size_t process_cached_read_data(struct scsc_ibox *i, size_t *count)
{
size_t offset = 0;
offset = i->t_off;
if (i->t_used <= *count) {
/* this was the last chunk cached */
*count = i->t_used;
i->t_off = 0;
i->t_used = 0;
} else {
i->t_off += *count;
i->t_used -= *count;
i->cached_reads += *count;
}
return offset;
}
/**
* This file operation read from the ring using common routines, starting its
* read from head: in other words it immediately blocks waiting for some data to
* arrive. As soon as some data arrives and head moves away, the freshly
* available data is returned to userspace up to the required size , and this
* call goes back to sleeping waiting for more data.
*
* NOTE
* ----
* The need to copy_to_user imposes the use of a temp buffer tbuf which is used
* as a double buffer: being allocated to this reader on open() we do NOT need
* any additional form of mutual exclusion.
* Moreover we use such buffer here as an area to cache the retrieved records:
* if the retrieved record size is bigger than the count bytes required by user
* we'll return less data at first and then deal with the following requests
* pumping data directly from the double buffer without accessing the ring.
*/
static ssize_t samsg_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *f_pos)
{
unsigned long flags;
loff_t current_head = 0;
struct scsc_ibox *i = NULL;
size_t off = 0;
size_t retrieved_bytes = 0;
if (!filp->private_data || !access_ok(VERIFY_WRITE, ubuf, count))
return -ENOMEM;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
/* open() assures us that this private data is certainly non-NULL */
i = filp->private_data;
if (!i->t_used) {
raw_spin_lock_irqsave(&i->rb->lock, flags);
current_head = *f_pos ? i->f_pos : i->rb->head;
while (current_head == i->rb->head) {
raw_spin_unlock_irqrestore(&i->rb->lock, flags);
if (wait_event_interruptible(i->rb->wq,
current_head != i->rb->head))
return -ERESTARTSYS;
raw_spin_lock_irqsave(&i->rb->lock, flags);
}
retrieved_bytes = read_next_records(i->rb,
scsc_max_records_per_read,
&current_head, i->tbuf, i->tsz);
/* We MUST keep track of the the last known READ record
* in order to keep going from the same place on the next
* read call coming from the same userspace process...
* ...this could NOT necessarily be the HEAD at the end of this
* read if we asked for few records.
* So we must annotate the really last read record got back,
* returned in current_head, inside i->f_pos in order to have a
* reference for the next read call by the same reader.
*/
i->f_pos = current_head;
raw_spin_unlock_irqrestore(&i->rb->lock, flags);
/* ANYWAY we could have got back more data from the ring (ONLY
* multiple of whole records) than required by usersapce.
*/
off = init_cached_read(i, retrieved_bytes, &count);
} else {
/* Serve this read-request directly from cached data without
* accessing the ring
*/
off = process_cached_read_data(i, &count);
}
if (copy_to_user(ubuf, i->tbuf + off, count))
return -EFAULT;
*f_pos += count;
return count;
}
/**
* This seek op assumes let userspace believe that it's dealing with a regular
* plain file, so f_pos is modified accordingly (linearly till the maximum
* number SCSC_LOGGED_BYTES is reached); in fact it's up to
* the read/write ops to properly 'cast' this value to a modulus value as
* required by the underlying ring buffer. This operates only on samlog.
*/
loff_t debugfile_llseek(struct file *filp, loff_t off, int whence)
{
loff_t newpos, maxpos;
struct scsc_ibox *i = NULL;
unsigned long flags;
if (!filp->private_data)
return -EFAULT;
i = filp->private_data;
raw_spin_lock_irqsave(&i->rb->lock, flags);
maxpos = SCSC_LOGGED_BYTES(i->rb) >= 1 ?
SCSC_LOGGED_BYTES(i->rb) - 1 : 0;
raw_spin_unlock_irqrestore(&i->rb->lock, flags);
switch (whence) {
case 0: /* SEEK_SET */
newpos = (off <= maxpos) ? off : maxpos;
break;
case 1: /* SEEK_CUR */
newpos = (filp->f_pos + off <= maxpos) ?
filp->f_pos + off : maxpos;
break;
case 2: /* SEEK_END */
newpos = maxpos;
break;
default: /* can't happen */
return -EINVAL;
}
if (newpos < 0)
return -EINVAL;
filp->f_pos = newpos;
return newpos;
}
const struct file_operations samsg_fops = {
.owner = THIS_MODULE,
.open = debugfile_open,
.read = samsg_read,
.release = debugfile_release,
};
/**
* samlog_read - Reads from the ring buffer the required number of bytes
* starting from the start of the ring. It is usually used to dump the whole
* ring buffer taking a snapshot.
*
* This function as a usual .read fops returns the number of bytes effectively
* read, and this could:
* - equal the required count bytes
* - be less than the required bytes if: less data WAS available
* (since we only GOT whole records at time from the ring)
* Returning less bytes usually triggers the userapp to reissue the syscall
* to complete the read up to the originaly required number of bytes.
* - be ZERO if NO more data available..this causes the reading userspace
* process to stop reading usually.
*/
static ssize_t samlog_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *f_pos)
{
unsigned long flags;
struct scsc_ibox *i = NULL;
size_t off = 0, retrieved_bytes = 0;
if (!filp->private_data)
return -EFAULT;
i = filp->private_data;
if (!i->t_used) {
/* On first read from userspace f_pos will be ZERO and in this
* case we'll want to trigger a read from the very beginning of
* ring (tail) and set i->f_pos accordingly.
* Internal RING API returns in i->f_pos the next record to read:
* when reading process has wrapped over you'll get back an
* f_pos ZERO as next read.
*/
raw_spin_lock_irqsave(&i->rb->lock, flags);
if (*f_pos == 0)
i->f_pos = i->rb->tail;
retrieved_bytes = read_next_records(i->rb,
scsc_max_records_per_read,
&i->f_pos, i->tbuf, i->tsz);
raw_spin_unlock_irqrestore(&i->rb->lock, flags);
/* ANYWAY we could have got back more data from the ring (ONLY
* multiple of whole records) than required by userspace.
*/
off = init_cached_read(i, retrieved_bytes, &count);
} else {
/* Serve this read-request directly from cached data without
* accessing the ring
*/
off = process_cached_read_data(i, &count);
}
if (copy_to_user(ubuf, i->tbuf + off, count))
return -EFAULT;
*f_pos += count;
return count;
}
const struct file_operations samlog_fops = {
.owner = THIS_MODULE,
.open = debugfile_open,
.read = samlog_read,
.llseek = debugfile_llseek,
.release = debugfile_release,
};
static int statfile_open(struct inode *ino, struct file *filp)
{
if (!filp->private_data)
filp->private_data = ino->i_private;
if (!filp->private_data)
return -EFAULT;
return 0;
}
static int statfile_release(struct inode *ino, struct file *filp)
{
if (!filp->private_data)
filp->private_data = ino->i_private;
if (!filp->private_data)
return -EFAULT;
return 0;
}
/* A simple read to dump some stats about the ring buffer. */
static ssize_t statfile_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *f_pos)
{
unsigned long flags;
size_t bsz = 0;
loff_t head = 0, tail = 0, used = 0, max_chunk = 0, logged = 0,
last = 0;
int slen = 0, records = 0, wraps = 0, oos = 0;
u64 written = 0;
char statstr[STATSTR_SZ] = {};
struct scsc_ring_buffer *rb = filp->private_data;
raw_spin_lock_irqsave(&rb->lock, flags);
bsz = rb->bsz;
head = rb->head;
tail = rb->tail;
last = rb->last;
written = rb->written;
records = rb->records;
wraps = rb->wraps;
oos = rb->oos;
used = SCSC_USED_BYTES(rb);
max_chunk = SCSC_RING_FREE_BYTES(rb);
logged = SCSC_LOGGED_BYTES(rb);
raw_spin_unlock_irqrestore(&rb->lock, flags);
slen = snprintf(statstr, STATSTR_SZ,
"sz:%zd used:%lld free:%lld logged:%lld records:%d\nhead:%lld tail:%lld last:%lld written:%lld wraps:%d oos:%d\n",
bsz, used, max_chunk, logged, records,
head, tail, last, written, wraps, oos);
if (slen >= 0 && *f_pos < slen) {
count = (count <= slen - *f_pos) ? count : (slen - *f_pos);
if (copy_to_user(ubuf, statstr + *f_pos, count))
return -EFAULT;
*f_pos += count;
} else
count = 0;
return count;
}
const struct file_operations stat_fops = {
.owner = THIS_MODULE,
.open = statfile_open,
.read = statfile_read,
.release = statfile_release,
};
/**
* This implement samwrite interface to INJECT log lines into the ring from
* user space. The support, thought as an aid for testing mainly, is
* minimal, so the interface allows only for simple %s format string injection.
*/
static int samwritefile_open(struct inode *ino, struct file *filp)
{
if (!filp->private_data) {
struct write_config *wc =
kzalloc(sizeof(struct write_config), GFP_KERNEL);
if (wc) {
wc->fmt = global_fmt_string;
wc->buf_sz = SAMWRITE_BUFSZ;
}
filp->private_data = wc;
}
if (!filp->private_data)
return -EFAULT;
return 0;
}
static int samwritefile_release(struct inode *ino, struct file *filp)
{
kfree(filp->private_data);
filp->private_data = NULL;
return 0;
}
/**
* User injected string content is pushed to the ring as simple %s fmt string
* content using the TEST_ME tag. Default debuglevel (6 - INFO)will be used.
*/
static ssize_t samwritefile_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *f_pos)
{
ssize_t written_bytes = 0;
struct write_config *wc = filp->private_data;
if (wc) {
count = count < wc->buf_sz ? count : wc->buf_sz;
if (copy_from_user(wc->buf, ubuf, count))
return -EINVAL;
written_bytes = scsc_printk_tag(NO_ECHO_PRK, TEST_ME,
wc->fmt, wc->buf);
/* Handle the case where the message is filtered out by
* droplevel filters...zero is returned BUT we do NOT want
* the applications to keep trying...it's NOT a transient
* error...at least till someone changes droplevels.
*/
if (!written_bytes) {
pr_info("samwrite wrote 0 bytes...droplevels filtering ?\n");
return -EPERM;
}
/* Returned written bytes should be normalized since
* lower level functions returns the number of bytes
* effectively written including the prepended header
* file... IF, when required to write n, we return n+X,
* some applications could behave badly trying to access
* file at *fpos=n+X next time, ending up in a regular
* EFAULT error anyway.
*/
if (written_bytes > count)
written_bytes = count;
*f_pos += written_bytes;
}
return written_bytes;
}
const struct file_operations samwrite_fops = {
.owner = THIS_MODULE,
.open = samwritefile_open,
.write = samwritefile_write,
.release = samwritefile_release,
};
/**
* Initializes debugfs support build the proper debugfs file dentries:
* - entries in debugfs are created under /sys/kernel/debugfs/scsc/@name/
* - using the provided rb ring buffer as underlying ring buffer, storing it
* into inode ptr for future retrieval (via open)
* - registers the proper fops
*/
void __init *samlog_debugfs_init(const char *root_name, void *rb)
{
struct scsc_debugfs_info *di = NULL;
if (!rb || !root_name)
return NULL;
di = kmalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return NULL;
if (!scsc_debugfs_root) {
/* I could have multiple rings debugfs entry all rooted at
* the same /sys/kernel/debug/scsc/...so such entry could
* already exist.
*/
scsc_debugfs_root = debugfs_create_dir(SCSC_DEBUGFS_ROOT, NULL);
if (!scsc_debugfs_root)
goto no_root;
}
di->rootdir = scsc_debugfs_root;
di->bufdir = debugfs_create_dir(root_name, di->rootdir);
if (!di->bufdir)
goto no_buf;
atomic_inc(&scsc_debugfs_root_refcnt);
/* Saving ring ref @rb to Inode */
di->samsgfile = debugfs_create_file(SCSC_SAMSG_FNAME, 0444,
di->bufdir, rb, &samsg_fops);
if (!di->samsgfile)
goto no_samsg;
/* Saving ring ref @rb to Inode */
di->samlogfile = debugfs_create_file(SCSC_SAMLOG_FNAME, 0444,
di->bufdir, rb, &samlog_fops);
if (!di->samlogfile)
goto no_samlog;
di->statfile = debugfs_create_file(SCSC_STAT_FNAME, 0444,
di->bufdir, rb, &stat_fops);
if (!di->statfile)
goto no_statfile;
di->samwritefile = debugfs_create_file(SCSC_SAMWRITE_FNAME, 0222,
di->bufdir, NULL,
&samwrite_fops);
if (!di->samwritefile)
goto no_samwrite;
pr_info("Samlog Debugfs Initialized\n");
return di;
no_samwrite:
debugfs_remove(di->statfile);
no_statfile:
debugfs_remove(di->samlogfile);
no_samlog:
debugfs_remove(di->samsgfile);
no_samsg:
debugfs_remove(di->bufdir);
atomic_dec(&scsc_debugfs_root_refcnt);
no_buf:
if (!atomic_read(&scsc_debugfs_root_refcnt)) {
debugfs_remove(scsc_debugfs_root);
scsc_debugfs_root = NULL;
}
no_root:
kfree(di);
return NULL;
}
void __exit samlog_debugfs_exit(void **priv)
{
struct scsc_debugfs_info **di = NULL;
if (!priv)
return;
di = (struct scsc_debugfs_info **)priv;
if (di && *di) {
debugfs_remove_recursive(scsc_debugfs_root);
atomic_dec(&scsc_debugfs_root_refcnt);
if (!atomic_read(&scsc_debugfs_root_refcnt)) {
debugfs_remove(scsc_debugfs_root);
scsc_debugfs_root = NULL;
}
kfree(*di);
*di = NULL;
}
pr_info("Debugfs Cleaned Up\n");
}

View file

@ -0,0 +1,60 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#ifndef _SCSC_LOGRING_DEBUGFS_H_
#define _SCSC_LOGRING_DEBUGFS_H_
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <scsc/scsc_logring.h>
#include "scsc_logring_ring.h"
#define STATSTR_SZ 256
#define SCSC_DEBUGFS_ROOT "scsc"
#define SCSC_SAMSG_FNAME "samsg"
#define SCSC_SAMLOG_FNAME "samlog"
#define SCSC_STAT_FNAME "stat"
#define SCSC_SAMWRITE_FNAME "samwrite"
#define SAMWRITE_BUFSZ 2048
#define SCSC_DEFAULT_MAX_RECORDS_PER_READ 1
struct scsc_ibox {
struct scsc_ring_buffer *rb;
char *tbuf;
size_t tsz;
bool tbuf_vm;
size_t t_off;
size_t t_used;
size_t cached_reads;
loff_t f_pos;
};
struct scsc_debugfs_info {
struct dentry *rootdir;
struct dentry *bufdir;
struct dentry *samsgfile;
struct dentry *samlogfile;
struct dentry *statfile;
struct dentry *samwritefile;
};
struct write_config {
char *fmt;
size_t buf_sz;
char buf[SAMWRITE_BUFSZ];
};
void *samlog_debugfs_init(const char *name, void *rb) __init;
void samlog_debugfs_exit(void **priv) __exit;
#endif /* _SCSC_LOGRING_DEBUGFS_H_ */

View file

@ -0,0 +1,579 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#include "scsc_logring_main.h"
#include "scsc_logring_ring.h"
#include "scsc_logring_debugfs.h"
/* Global module parameters */
static int enable = DEFAULT_ENABLE_LOGRING;
static bool initialized;
#ifndef CONFIG_SCSC_STATIC_RING_SIZE
static int ringsize = DEFAULT_RING_BUFFER_SZ;
#endif
static int prepend_header = DEFAULT_ENABLE_HEADER;
static int default_dbglevel = DEFAULT_DBGLEVEL;
static int scsc_droplevel_wlbt = DEFAULT_DROPLEVEL;
static int scsc_droplevel_all = DEFAULT_ALL_DISABLED;
static int scsc_droplevel_atomic = DEFAULT_DROPLEVEL;
static int scsc_redirect_to_printk_droplvl = DEFAULT_REDIRECT_DROPLVL;
static int scsc_reset_all_droplevels_to;
struct scsc_ring_buffer *the_ringbuf;
/* Check for power of two compliance. */
static inline int is_power_of_two(unsigned int x)
{
return (x != 0) && !(x & (x - 1));
}
/* Module init and ring buffer allocation */
int __init samlog_init(void)
{
struct scsc_ring_buffer *rb = NULL;
pr_debug("Samlog Init\n");
if (!enable) {
pr_info("Samlog: module disabled...NOT starting.\n");
return 0;
}
if (the_ringbuf != NULL) {
pr_info("Samlog: Ring:%s already initialized...skipping.\n",
the_ringbuf->name);
return 0;
}
#ifndef CONFIG_SCSC_STATIC_RING_SIZE
if (!is_power_of_two(ringsize)) {
ringsize = DEFAULT_RING_BUFFER_SZ;
pr_info("Samlog: scsc_logring.ringsize MUST be power-of-two. Using default: %d\n",
ringsize);
}
rb = alloc_ring_buffer(ringsize, BASE_SPARE_SZ, DEBUGFS_RING0_ROOT);
#else
rb = alloc_ring_buffer(CONFIG_SCSC_STATIC_RING_SIZE,
BASE_SPARE_SZ, DEBUGFS_RING0_ROOT);
#endif
if (!rb)
goto tfail;
rb->private = samlog_debugfs_init(rb->name, rb);
if (!rb->private)
pr_info("Samlog: Cannot Initialize DebugFS.\n");
#ifndef CONFIG_SCSC_STATIC_RING_SIZE
pr_info("scsc_logring:: Allocated ring buffer of size %zd bytes.\n",
rb->bsz);
#else
pr_info("scsc_logring:: Allocated STATIC ring buffer of size %zd bytes.\n",
rb->bsz);
#endif
the_ringbuf = rb;
initialized = true;
pr_info("Samlog Loaded.\n");
scsc_printk_tag(FORCE_PRK, NO_TAG, "Samlog Started.\n");
scsc_printk_tag(NO_ECHO_PRK, NO_TAG,
"Allocated ring buffer of size %zd bytes.\n",
rb->bsz);
scsc_printk_tag(NO_ECHO_PRK, NO_TAG,
"Using THROWAWAY DYNAMIC per-reader buffer.\n");
return 0;
tfail:
pr_err("Samlog Initialization Failed. LogRing disabled.\n");
return -ENODEV;
}
void __exit samlog_exit(void)
{
if (!the_ringbuf) {
pr_err("Cannot UNLOAD ringbuf\n");
return;
}
if (the_ringbuf && the_ringbuf->private)
samlog_debugfs_exit(&the_ringbuf->private);
initialized = false;
free_ring_buffer(the_ringbuf);
the_ringbuf = NULL;
pr_info("Samlog Unloaded\n");
}
module_init(samlog_init);
module_exit(samlog_exit);
module_param(enable, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(enable, "Enable/Disable scsc_logring as a whole.",
"load-time", DEFAULT_ENABLE_LOGRING);
#ifndef CONFIG_SCSC_STATIC_RING_SIZE
module_param(ringsize, int, S_IRUGO);
SCSC_MODPARAM_DESC(ringsize,
"Ring buffer size. Available ONLY if ring is NOT statically allocated.",
"run-time", DEFAULT_RING_BUFFER_SZ);
#endif
module_param(prepend_header, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(prepend_header, "Enable/disable header prepending. ",
"run-time", DEFAULT_ENABLE_HEADER);
module_param(default_dbglevel, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(default_dbglevel,
"The default debug level assigned to messages when NOT explicitly specified.",
"run-time", DEFAULT_DBGLEVEL);
module_param(scsc_droplevel_wlbt, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(scsc_droplevel_wlbt,
"Droplevels for the 'no_tag/wlbt' tag family.", "run-time",
DEFAULT_DROP_ALL);
module_param(scsc_droplevel_all, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(scsc_droplevel_all,
"This droplevel overrides any other, if set to a value >= 0",
"run-time", DEFAULT_ALL_DISABLED);
module_param(scsc_redirect_to_printk_droplvl, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(scsc_redirect_to_printk_droplvl,
"Echoing messages up to the specified loglevel also to kernel standard ring buffer.",
"run-time", DEFAULT_REDIRECT_DROPLVL);
module_param(scsc_droplevel_atomic, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(scsc_droplevel_atomic,
"This droplevel is applied to logmsg emitted in atomic context.",
"run-time", DEFAULT_KEEP_ALL);
/**
* This macro code has been freely 'inspired' (read copied) from the
* slsi_ original/old debug.c implementaion: it takes care to register
* a new TAG with the debug subsystem including a module param to
* dynamically configure the droplevel for the specified tag at runtime.
*
* ALL debug is ON by default at FULL DEBUG LEVEL.
* There are THREE intended exceptions to this that instead stick to
* level 7: PLAT_MIF MBULK and MXLOG_TRANS tags.
*
* NOTE_CREATING_TAGS: when adding a tag here REMEMBER to add it also
* where required, taking care to maintain the same ordering.
* (Search 4 NOTE_CREATING_TAGS)
*/
/**
* This macros define module parameters used to configure per-subsystem
* filtering, and assign a default DROPLEVEL.
*
* NOTE THAT the default DROPLEVEL indicates the default value FROM which
* the system will start to discard messages, so as an example:
*
* - if set to SCSC_DBG1 (7) every message with a dbglvl >= 7 will be discarded
* - if set to SCSC_FULL_DEBUG (11) every message is logged
*
* Name, Default DROPLEVEL , FilterTag */
ADD_DEBUG_MODULE_PARAM(binary, SCSC_FULL_DEBUG, BINARY);
ADD_DEBUG_MODULE_PARAM(bin_wifi_ctrl_rx, SCSC_FULL_DEBUG, BIN_WIFI_CTRL_RX);
ADD_DEBUG_MODULE_PARAM(bin_wifi_data_rx, SCSC_FULL_DEBUG, BIN_WIFI_DATA_RX);
ADD_DEBUG_MODULE_PARAM(bin_wifi_ctrl_tx, SCSC_FULL_DEBUG, BIN_WIFI_CTRL_TX);
ADD_DEBUG_MODULE_PARAM(bin_wifi_data_tx, SCSC_FULL_DEBUG, BIN_WIFI_DATA_TX);
ADD_DEBUG_MODULE_PARAM(wifi_rx, SCSC_FULL_DEBUG, WIFI_RX);
ADD_DEBUG_MODULE_PARAM(wifi_tx, SCSC_FULL_DEBUG, WIFI_TX);
ADD_DEBUG_MODULE_PARAM(bt_common, SCSC_FULL_DEBUG, BT_COMMON);
ADD_DEBUG_MODULE_PARAM(bt_h4, SCSC_FULL_DEBUG, BT_H4);
ADD_DEBUG_MODULE_PARAM(bt_fw, SCSC_FULL_DEBUG, BT_FW);
ADD_DEBUG_MODULE_PARAM(bt_rx, SCSC_FULL_DEBUG, BT_RX);
ADD_DEBUG_MODULE_PARAM(bt_tx, SCSC_FULL_DEBUG, BT_TX);
ADD_DEBUG_MODULE_PARAM(cpktbuff, SCSC_FULL_DEBUG, CPKTBUFF);
ADD_DEBUG_MODULE_PARAM(fw_load, SCSC_FULL_DEBUG, FW_LOAD);
ADD_DEBUG_MODULE_PARAM(fw_panic, SCSC_FULL_DEBUG, FW_PANIC);
ADD_DEBUG_MODULE_PARAM(gdb_trans, SCSC_FULL_DEBUG, GDB_TRANS);
ADD_DEBUG_MODULE_PARAM(mif, SCSC_FULL_DEBUG, MIF);
ADD_DEBUG_MODULE_PARAM(clk20, SCSC_FULL_DEBUG, CLK20);
ADD_DEBUG_MODULE_PARAM(clk20_test, SCSC_FULL_DEBUG, CLK20_TEST);
ADD_DEBUG_MODULE_PARAM(mx_file, SCSC_FULL_DEBUG, MX_FILE);
ADD_DEBUG_MODULE_PARAM(mx_fw, SCSC_FULL_DEBUG, MX_FW);
ADD_DEBUG_MODULE_PARAM(mx_sampler, SCSC_FULL_DEBUG, MX_SAMPLER);
ADD_DEBUG_MODULE_PARAM(mxlog_trans, SCSC_DBG1, MXLOG_TRANS);
ADD_DEBUG_MODULE_PARAM(mxman, SCSC_FULL_DEBUG, MXMAN);
ADD_DEBUG_MODULE_PARAM(mxman_test, SCSC_FULL_DEBUG, MXMAN_TEST);
ADD_DEBUG_MODULE_PARAM(mxmgt_trans, SCSC_FULL_DEBUG, MXMGT_TRANS);
ADD_DEBUG_MODULE_PARAM(mx_mmap, SCSC_FULL_DEBUG, MX_MMAP);
ADD_DEBUG_MODULE_PARAM(mx_proc, SCSC_FULL_DEBUG, MX_PROC);
ADD_DEBUG_MODULE_PARAM(panic_mon, SCSC_FULL_DEBUG, PANIC_MON);
ADD_DEBUG_MODULE_PARAM(pcie_mif, SCSC_FULL_DEBUG, PCIE_MIF);
ADD_DEBUG_MODULE_PARAM(plat_mif, SCSC_DBG1, PLAT_MIF);
ADD_DEBUG_MODULE_PARAM(kic_common, SCSC_FULL_DEBUG, KIC_COMMON);
#ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
ADD_DEBUG_MODULE_PARAM(init_deinit, SCSC_FULL_DEBUG, SLSI_INIT_DEINIT);
ADD_DEBUG_MODULE_PARAM(netdev, SCSC_DBG4, SLSI_NETDEV);
ADD_DEBUG_MODULE_PARAM(cfg80211, SCSC_FULL_DEBUG, SLSI_CFG80211);
ADD_DEBUG_MODULE_PARAM(mlme, SCSC_FULL_DEBUG, SLSI_MLME);
ADD_DEBUG_MODULE_PARAM(summary_frames, SCSC_FULL_DEBUG, SLSI_SUMMARY_FRAMES);
ADD_DEBUG_MODULE_PARAM(hydra, SCSC_FULL_DEBUG, SLSI_HYDRA);
ADD_DEBUG_MODULE_PARAM(tx, SCSC_FULL_DEBUG, SLSI_TX);
ADD_DEBUG_MODULE_PARAM(rx, SCSC_FULL_DEBUG, SLSI_RX);
ADD_DEBUG_MODULE_PARAM(udi, SCSC_FULL_DEBUG, SLSI_UDI);
ADD_DEBUG_MODULE_PARAM(wifi_fcq, SCSC_DBG4, SLSI_WIFI_FCQ);
ADD_DEBUG_MODULE_PARAM(hip, SCSC_FULL_DEBUG, SLSI_HIP);
ADD_DEBUG_MODULE_PARAM(hip_init_deinit, SCSC_FULL_DEBUG, SLSI_HIP_INIT_DEINIT);
ADD_DEBUG_MODULE_PARAM(hip_fw_dl, SCSC_FULL_DEBUG, SLSI_HIP_FW_DL);
ADD_DEBUG_MODULE_PARAM(hip_sdio_op, SCSC_FULL_DEBUG, SLSI_HIP_SDIO_OP);
ADD_DEBUG_MODULE_PARAM(hip_ps, SCSC_FULL_DEBUG, SLSI_HIP_PS);
ADD_DEBUG_MODULE_PARAM(hip_th, SCSC_FULL_DEBUG, SLSI_HIP_TH);
ADD_DEBUG_MODULE_PARAM(hip_fh, SCSC_FULL_DEBUG, SLSI_HIP_FH);
ADD_DEBUG_MODULE_PARAM(hip_sig, SCSC_FULL_DEBUG, SLSI_HIP_SIG);
ADD_DEBUG_MODULE_PARAM(func_trace, SCSC_FULL_DEBUG, SLSI_FUNC_TRACE);
ADD_DEBUG_MODULE_PARAM(test, SCSC_FULL_DEBUG, SLSI_TEST);
ADD_DEBUG_MODULE_PARAM(src_sink, SCSC_FULL_DEBUG, SLSI_SRC_SINK);
ADD_DEBUG_MODULE_PARAM(fw_test, SCSC_FULL_DEBUG, SLSI_FW_TEST);
ADD_DEBUG_MODULE_PARAM(rx_ba, SCSC_FULL_DEBUG, SLSI_RX_BA);
ADD_DEBUG_MODULE_PARAM(tdls, SCSC_FULL_DEBUG, SLSI_TDLS);
ADD_DEBUG_MODULE_PARAM(gscan, SCSC_FULL_DEBUG, SLSI_GSCAN);
ADD_DEBUG_MODULE_PARAM(mbulk, SCSC_DBG1, SLSI_MBULK);
ADD_DEBUG_MODULE_PARAM(flowc, SCSC_FULL_DEBUG, SLSI_FLOWC);
#endif
ADD_DEBUG_MODULE_PARAM(test_me, SCSC_FULL_DEBUG, TEST_ME);
int *scsc_droplevels[] = {
&scsc_droplevel_binary,
&scsc_droplevel_bin_wifi_ctrl_rx,
&scsc_droplevel_bin_wifi_data_rx,
&scsc_droplevel_bin_wifi_ctrl_tx,
&scsc_droplevel_bin_wifi_data_tx,
&scsc_droplevel_wlbt,
&scsc_droplevel_wifi_rx,
&scsc_droplevel_wifi_tx,
&scsc_droplevel_bt_common,
&scsc_droplevel_bt_h4,
&scsc_droplevel_bt_fw,
&scsc_droplevel_bt_rx,
&scsc_droplevel_bt_tx,
&scsc_droplevel_cpktbuff,
&scsc_droplevel_fw_load,
&scsc_droplevel_fw_panic,
&scsc_droplevel_gdb_trans,
&scsc_droplevel_mif,
&scsc_droplevel_clk20,
&scsc_droplevel_clk20_test,
&scsc_droplevel_mx_file,
&scsc_droplevel_mx_fw,
&scsc_droplevel_mx_sampler,
&scsc_droplevel_mxlog_trans,
&scsc_droplevel_mxman,
&scsc_droplevel_mxman_test,
&scsc_droplevel_mxmgt_trans,
&scsc_droplevel_mx_mmap,
&scsc_droplevel_mx_proc,
&scsc_droplevel_panic_mon,
&scsc_droplevel_pcie_mif,
&scsc_droplevel_plat_mif,
&scsc_droplevel_kic_common,
#ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
&scsc_droplevel_init_deinit,
&scsc_droplevel_netdev,
&scsc_droplevel_cfg80211,
&scsc_droplevel_mlme,
&scsc_droplevel_summary_frames,
&scsc_droplevel_hydra,
&scsc_droplevel_tx,
&scsc_droplevel_rx,
&scsc_droplevel_udi,
&scsc_droplevel_wifi_fcq,
&scsc_droplevel_hip,
&scsc_droplevel_hip_init_deinit,
&scsc_droplevel_hip_fw_dl,
&scsc_droplevel_hip_sdio_op,
&scsc_droplevel_hip_ps,
&scsc_droplevel_hip_th,
&scsc_droplevel_hip_fh,
&scsc_droplevel_hip_sig,
&scsc_droplevel_func_trace,
&scsc_droplevel_test,
&scsc_droplevel_src_sink,
&scsc_droplevel_fw_test,
&scsc_droplevel_rx_ba,
&scsc_droplevel_tdls,
&scsc_droplevel_gscan,
&scsc_droplevel_mbulk,
&scsc_droplevel_flowc,
#endif
&scsc_droplevel_test_me,
};
static int scsc_reset_all_droplevels_to_set_param_cb(const char *val,
const struct kernel_param *kp)
{
int ret = 0, i = 0;
long rval = 0;
if (!val)
return -EINVAL;
ret = kstrtol(val, 10, &rval);
if (!ret) {
if (rval < 0)
return -EINVAL;
scsc_droplevel_wlbt = (int)rval;
for (i = 0; i < ARRAY_SIZE(scsc_droplevels); i++)
*scsc_droplevels[i] = (int)rval;
scsc_reset_all_droplevels_to = (int)rval;
scsc_printk_tag(FORCE_PRK, NO_TAG,
KERN_INFO"Reset ALL droplevels to %d\n",
scsc_reset_all_droplevels_to);
}
return ret;
}
static struct kernel_param_ops scsc_reset_droplvl_ops = {
.set = scsc_reset_all_droplevels_to_set_param_cb,
.get = NULL,
};
module_param_cb(scsc_reset_all_droplevels_to, &scsc_reset_droplvl_ops,
NULL, 0200);
MODULE_PARM_DESC(scsc_reset_all_droplevels_to,
"Reset ALL droplevels to the requested value. Effective @run-time.");
/* SCSC_PRINTK API and Helpers */
static inline int get_debug_level(const char *fmt)
{
int level;
if (fmt && *fmt == SCSC_SOH && *(fmt + 1))
level = *(fmt + 1) - '0';
else
level = default_dbglevel;
return level;
}
static inline void drop_message_level_macro(const char *fmt, char **msg)
{
if (fmt && *fmt == SCSC_SOH && *(fmt + 1)) {
if (msg)
*msg = (char *)(fmt + 2);
} else if (msg) {
*msg = (char *)fmt;
}
}
/**
* The workhorse function that receiving a droplevel to enforce, and a pair
* format_string/va_list decides what to do:
* - drop
* OR
* - insert into ring buffer accounting for wrapping
*
* ... then wakes up any waiting reading process
*/
static inline int _scsc_printk(int level, int tag,
const char *fmt, va_list args)
{
int written = 0;
char *msg_head = NULL;
if (!initialized || !enable || !fmt ||
((scsc_droplevel_all < 0 && level >= *scsc_droplevels[tag]) ||
(scsc_droplevel_all >= 0 && level >= scsc_droplevel_all)))
return written;
drop_message_level_macro(fmt, &msg_head);
written = push_record_string(the_ringbuf, tag, level,
prepend_header, msg_head, args);
return written;
}
/**
* Embeds the filtering behaviour towards std kenrel ring buffer for
* non binary stuff, and decides what to do based on current user config.
* Note that printk redirect doplevel filter is now completely disjoint
* from normal LogRing droplevel filters.
*/
static inline
void handle_klogbuf_out_string(int level, struct device *dev, int tag,
const char *fmt, va_list args, int force)
{
if (IS_PRINTK_REDIRECT_ALLOWED(force, level, tag)) {
if (!dev)
vprintk_emit(0, level, NULL, 0, fmt, args);
else
dev_vprintk_emit(level, dev, fmt, args);
}
}
const char *map2kern[] = {
KERN_EMERG,
KERN_ALERT,
KERN_CRIT,
KERN_ERR,
KERN_WARNING,
KERN_NOTICE,
KERN_INFO,
KERN_DEBUG
};
/**
* Embeds the filtering behaviour towards std kenrel ring buffer for
* BINARY stuff, and decides what to do based on current user config.
* Note that printk redirect doplevel filter is now completely disjoint
* from normal LogRing droplevel filters.
*/
static inline
void handle_klogbuf_out_binary(int level, int tag, const void *start,
size_t len, int force)
{
if (IS_PRINTK_REDIRECT_ALLOWED(force, level, tag)) {
if (level < SCSC_MIN_DBG || level >= ARRAY_SIZE(map2kern))
level = ARRAY_SIZE(map2kern) - 1;
print_hex_dump(map2kern[level], "SCSC_HEX->|",
DUMP_PREFIX_ADDRESS, 16, 4, start, len, true);
}
}
/*
* scsc_printk - it's main API entry to the event logging mechanism. Prints
* the specified format string to the underlying ring_buffer, injecting
* timestamp and context information at the start of the line while
* classifying and filtering the message suing the specified TAG identifier.
*
* This function assumes that you'll never write a line longer than
* BASE_SPARE_SZ bytes; if this limit is obeyed any input string is correctly
* wrapped when placed at the end of the buffer. Any longer line will be
* trucated.
* This function recognize Kernel style debug level KERN_, checking the FIRST
* byte for ASCII SOH in order to recognize if some printk style kernel debug
* level has been specified.
* SO you can use KERN_INFO KERN_ERR etc etc INLINE macros to specify the
* desired debug level: that will be checked against the droplevel specified.
* If NOT specified a default debug level is assigned following what specified
* in module parameter default_dbglevel.
*
* It's usually NOT used directly but through the means of utility macros that
* can be easily compiled out in production builds.
*/
int scsc_printk_tag(int force, int tag, const char *fmt, ...)
{
int ret = 0, level = 0;
va_list args;
/* Cannot use BINARY tag with strings logging */
if (tag < NO_TAG || tag > MAX_TAG)
return ret;
level = get_debug_level(fmt);
if ((in_interrupt() && level >= scsc_droplevel_atomic))
return ret;
va_start(args, fmt);
handle_klogbuf_out_string(level, NULL, tag, fmt, args, force);
va_end(args);
/* restart varargs */
va_start(args, fmt);
ret = _scsc_printk(level, tag, fmt, args);
va_end(args);
return ret;
}
EXPORT_SYMBOL(scsc_printk_tag);
/**
* This is a variation on the main API that allows to specify loglevel
* by number.
*/
int scsc_printk_tag_lvl(int tag, int level, const char *fmt, ...)
{
int ret = 0;
va_list args;
/* Cannot use BINARY tag with strings logging */
if (tag < NO_TAG || tag > MAX_TAG)
return ret;
if ((in_interrupt() && level >= scsc_droplevel_atomic))
return ret;
va_start(args, fmt);
handle_klogbuf_out_string(level, NULL, tag, fmt, args, NO_FORCE_PRK);
va_end(args);
/* restart varargs */
va_start(args, fmt);
ret = _scsc_printk(level, tag, fmt, args);
va_end(args);
return ret;
}
EXPORT_SYMBOL(scsc_printk_tag_lvl);
/**
* This is a variation on the main API that allows to specify a
* struct device reference.
*/
int scsc_printk_tag_dev(int force, int tag, struct device *dev,
const char *fmt, ...)
{
int ret = 0, level = 0;
va_list args;
/* Cannot use BINARY tag with strings logging */
if (tag < NO_TAG || tag > MAX_TAG)
return ret;
level = get_debug_level(fmt);
if ((in_interrupt() && level >= scsc_droplevel_atomic))
return ret;
va_start(args, fmt);
handle_klogbuf_out_string(level, dev, tag, fmt, args, force);
va_end(args);
/* restart varargs */
va_start(args, fmt);
ret = _scsc_printk(level, tag, fmt, args);
va_end(args);
return ret;
}
EXPORT_SYMBOL(scsc_printk_tag_dev);
/**
* This is a variation on the main API that allows to specify a
* struct device reference and an explicit numerical debug level.
*/
int scsc_printk_tag_dev_lvl(int force, int tag, struct device *dev,
int level, const char *fmt, ...)
{
int ret = 0;
va_list args;
/* Cannot use BINARY tag with strings logging */
if (tag < NO_TAG || tag > MAX_TAG)
return ret;
if ((in_interrupt() && level >= scsc_droplevel_atomic))
return ret;
va_start(args, fmt);
handle_klogbuf_out_string(level, dev, tag, fmt, args, force);
va_end(args);
/* restart varargs */
va_start(args, fmt);
ret = _scsc_printk(level, tag, fmt, args);
va_end(args);
return ret;
}
EXPORT_SYMBOL(scsc_printk_tag_dev_lvl);
/**
* This is a variation on the main API used to push binary blob into the ring.
*/
int scsc_printk_bin(int force, int tag, int dlev, const void *start, size_t len)
{
int ret = 0;
/* Cannot use NON BINARY tag with strings logging
* or NULLs start/len
*/
if (!start || !len || tag < FIRST_BIN_TAG || tag > LAST_BIN_TAG)
return ret;
dlev = (dlev >= 0) ? dlev : default_dbglevel;
if ((in_interrupt() && dlev >= scsc_droplevel_atomic))
return ret;
handle_klogbuf_out_binary(dlev, tag, start, len, force);
/* consider proper tag droplevel */
if (!initialized || !enable || !start ||
(scsc_droplevel_all < 0 && *scsc_droplevels[tag] <= dlev) ||
(scsc_droplevel_all >= 0 && scsc_droplevel_all <= dlev))
return ret;
ret = push_record_blob(the_ringbuf, tag, dlev,
prepend_header, start, len);
return ret;
}
EXPORT_SYMBOL(scsc_printk_bin);
MODULE_DESCRIPTION("SCSC Event Logger");
MODULE_AUTHOR("SLSI");
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,33 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#ifndef _SCSC_LOGRING_MAIN_H_
#define _SCSC_LOGRING_MAIN_H_
#include <linux/types.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/uaccess.h>
#include <linux/device.h>
#include "scsc_logring_common.h"
#define ADD_DEBUG_MODULE_PARAM(tagname, default_level, filter) \
int scsc_droplevel_ ## tagname = default_level; \
module_param(scsc_droplevel_ ## tagname, int, S_IRUGO | S_IWUSR); \
SCSC_MODPARAM_DESC(scsc_droplevel_ ## tagname, \
"Droplevels for the '" # tagname "' family.", \
"run-time", default_level)
#define IS_PRINTK_REDIRECT_ALLOWED(ff, level, tag) \
((ff) == FORCE_PRK || \
((ff) != NO_ECHO_PRK && (level) < scsc_redirect_to_printk_droplvl))
#endif /* _SCSC_LOGRING_MAIN_H_ */

View file

@ -0,0 +1,738 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#include "scsc_logring_ring.h"
#ifdef CONFIG_SCSC_STATIC_RING_SIZE
static char a_ring[CONFIG_SCSC_STATIC_RING_SIZE + BASE_SPARE_SZ] __aligned(4);
#endif
static int scsc_decode_binary_len = DEFAULT_BIN_DECODE_LEN;
module_param(scsc_decode_binary_len, int, S_IRUGO | S_IWUSR);
SCSC_MODPARAM_DESC(scsc_decode_binary_len,
"When reading a binary record dump these bytes-len in ASCII human readable form when reading",
"run-time", DEFAULT_BIN_DECODE_LEN);
/*
* NOTE_CREATING_TAGS: when adding a tag string here REMEMBER to add
* it also where required, taking care to maintain the same ordering.
* (Search 4 NOTE_CREATING_TAGS)
*/
const char *tagstr[] = {
"binary",
"bin_wifi_ctrl_rx",
"bin_wifi_data_rx",
"bin_wifi_ctrl_tx",
"bin_wifi_data_tx",
"wlbt", /* this is the generic one...NO_TAG */
"wifi_rx",
"wifi_tx",
"bt_common",
"bt_h4",
"bt_fw",
"bt_rx",
"bt_tx",
"cpktbuff",
"fw_load",
"fw_panic",
"gdb_trans",
"mif",
"clk20",
"clk20_test",
"mx_file",
"mx_fw",
"mx_sampler",
"mxlog_trans",
"mxman",
"mxman_test",
"mxmgt_trans",
"mx_mmap",
"mx_proc",
"panic_mon",
"pcie_mif",
"plat_mif",
"kic_common",
#ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
"init_deinit",
"netdev",
"cfg80211",
"mlme",
"summary_frames",
"hydra",
"tx",
"rx",
"udi",
"wifi_fcq",
"hip",
"hip_init_deinit",
"hip_fw_dl",
"hip_sdio_op",
"hip_ps",
"hip_th",
"hip_fh",
"hip_sig",
"func_trace",
"test",
"src_sink",
"fw_test",
"rx_ba",
"tdls",
"gscan",
"mbulk",
"flowc",
#endif
"test_me"
};
/**
* Calculate and returns the CRC32 for the provided record and record pos.
* Before calculating the CRC32 the crc field is temporarily substituted
* with the 32 LSB record relative starting position.
* Assumes the rec ptr area-validity has been checked upstream in the
* caller chain.
* We SKIP the fixed blob of the SYNC field that is placed ahead of
* CRC field.
* Assumes the related ring buffer is currently atomically accessed by
* caller. MUST NOT SLEEP.
* */
static inline uint32_t get_calculated_crc(struct scsc_ring_record *rec,
loff_t pos)
{
uint32_t calculated_crc = 0;
uint32_t saved_crc = 0;
saved_crc = rec->crc;
rec->crc = (uint32_t)pos;
/* we skip the fixed sync calculating crc */
calculated_crc =
crc32_le(~0, (unsigned char const *)&rec->crc,
SCSC_CRC_RINGREC_SZ);
rec->crc = saved_crc;
return calculated_crc;
}
/**
* Checks for record CRC sanity.
* Assumes the related ring buffer is currently atomically accessed by
* caller. MUST NOT SLEEP.
*/
static inline bool is_record_crc_valid(struct scsc_ring_record *rec,
loff_t pos)
{
uint32_t calculated_crc = 0;
calculated_crc = get_calculated_crc(rec, pos);
return calculated_crc == rec->crc;
}
/**
* Calculate the proper CRC and set it into the crc field
* Assumes the related ring buffer is currently atomically accessed by
* caller. MUST NOT SLEEP.
*/
static inline void finalize_record_crc(struct scsc_ring_record *rec,
loff_t pos)
{
uint32_t calculated_crc = 0;
if (!rec)
return;
rec->crc = (uint32_t)pos;
calculated_crc =
crc32_le(~0, (unsigned char const *)&rec->crc,
SCSC_CRC_RINGREC_SZ);
rec->crc = calculated_crc;
}
/**
* This function analyzes the pos provided relative to the provided
* ring, just to understand if it can be safely dereferenced.
* Assumes RING is already locked.
*/
static inline bool is_ring_pos_safe(struct scsc_ring_buffer *rb,
loff_t pos)
{
if (!rb || pos > rb->bsz || pos < 0)
return false;
/* NOT Wrapped */
if (rb->head > rb->tail && pos > rb->head)
return false;
/* Wrapped... */
if (rb->head < rb->tail &&
(pos > rb->head && pos < rb->tail))
return false;
return true;
}
/**
* This sanitizes record header before using it.
* It must be in the proper area related to head and tail and
* the CRC must fit the header.
*/
static inline bool is_ring_read_pos_valid(struct scsc_ring_buffer *rb,
loff_t pos)
{
if (!is_ring_pos_safe(rb, pos))
goto oos;
/* We do not check for SYNC before CRC since most of the time
* you are NOT OutOfSync and so you MUST check CRC anyway.
* It will be useful only for resync.
* At last...Check CRC ... doing this check LAST avoids the risk of
* dereferencing an already dangling pos pointer.
*/
if (!is_record_crc_valid(SCSC_GET_REC(rb, pos), pos))
goto oos;
return true;
oos:
if (rb)
rb->oos++;
return false;
}
/**
* Buid a header into the provided buffer,
* and append the optional trail string
*/
static inline
int build_header(char *buf, int blen, struct scsc_ring_record *r,
const char *trail)
{
int written = 0;
struct timeval tval = {};
tval = ns_to_timeval(r->nsec);
written = scnprintf(buf, blen,
"<%d>[%6lu.%06ld] [c%d] [%c] [%s] :: %s",
r->lev, tval.tv_sec, tval.tv_usec,
r->core, (char)r->ctx, tagstr[r->tag],
(trail) ? : "");
return written;
}
/**
* We're going to overwrite something writing from the head toward the tail
* so we must search for the next tail far enough from head in oder not to be
* overwritten: that will be our new tail after the wrap over.
*/
static inline
loff_t find_next_tail_far_enough_from_start(struct scsc_ring_buffer *rb,
loff_t start, int len)
{
loff_t new_tail = rb->tail;
while (start + len >= new_tail && new_tail != rb->last) {
new_tail = SCSC_GET_NEXT_REC_ENTRY_POS(rb, new_tail);
rb->records--;
}
if (start + len >= new_tail) {
new_tail = 0;
rb->records--;
}
return new_tail;
}
/**
* This handles the just plain append of a record to head without
* any need of wrapping or overwriting current tail
* You can provide two buffer here: the second, hbuf, is optional
* and will be written first. This is to account for the binary case
* in which the record data are written at first into the spare area
* (like we do with var strings, BUT then the bulk of binary data is
* written directly in place into the ring without double copies.
*/
static inline
void scsc_ring_buffer_plain_append(struct scsc_ring_buffer *rb,
const char *srcbuf, int slen,
const char *hbuf, int hlen)
{
/* empty condition is special case */
if (rb->records)
rb->head += SCSC_GET_SLOT_LEN(rb, rb->head);
if (hbuf)
memcpy(SCSC_GET_HEAD_PTR(rb), hbuf, hlen);
else
hlen = 0;
memcpy(SCSC_GET_HEAD_PTR(rb) + hlen, srcbuf, slen);
finalize_record_crc((struct scsc_ring_record *)SCSC_GET_HEAD_PTR(rb),
rb->head);
rb->records++;
if (rb->head > rb->last)
rb->last = rb->head;
}
/**
* This handles the case in which appending current record must account
* for overwriting: this sitiation can happen at the end of ring if we do NOT
* have enough space for the current record, or in any place when the buffer
* has wrapped, head is before tail and there's not enough space to write
* between current head and tail.
*/
static inline
void scsc_ring_buffer_overlap_append(struct scsc_ring_buffer *rb,
const char *srcbuf, int slen,
const char *hbuf, int hlen)
{
if (rb->head < rb->tail &&
slen + hlen < rb->bsz - SCSC_GET_NEXT_SLOT_POS(rb, rb->head))
rb->head += SCSC_GET_SLOT_LEN(rb, rb->head);
else {
rb->last = rb->head;
rb->head = 0;
rb->tail = 0;
rb->wraps++;
}
rb->tail =
find_next_tail_far_enough_from_start(rb, rb->head, slen + hlen);
if (hbuf)
memcpy(SCSC_GET_HEAD_PTR(rb), hbuf, hlen);
else
hlen = 0;
memcpy(SCSC_GET_HEAD_PTR(rb) + hlen, srcbuf, slen);
finalize_record_crc((struct scsc_ring_record *)SCSC_GET_HEAD_PTR(rb),
rb->head);
rb->records++;
if (rb->head > rb->last)
rb->last = rb->head;
}
/**
* This uses the spare area to prepare the record descriptor and to expand
* the format string into the spare area in order to get the final lenght of
* the whole record+data. Data is pre-pended with a header representing the
* data hold in binary form in the record descriptor.
* This data duplication helps when we'll read back a record holding string
* data, we won't have to build the header on the fly during the read.
*/
static inline
int tag_writer_string(char *spare, int tag, int lev,
int prepend_header, const char *msg_head, va_list args)
{
int written;
char bheader[SCSC_HBUF_LEN] = {};
struct scsc_ring_record *rrec;
/* Fill record in place */
rrec = (struct scsc_ring_record *)spare;
SCSC_FILL_RING_RECORD(rrec, tag, lev);
if (prepend_header)
build_header(bheader, SCSC_HBUF_LEN, rrec, NULL);
written = scnprintf(SCSC_GET_REC_BUF(spare),
BASE_SPARE_SZ - SCSC_RINGREC_SZ, "%s", bheader);
/**
* NOTE THAT
* ---------
* vscnprintf retvalue is the number of characters which have been
* written into the @buf NOT including the trailing '\0'.
* If @size is == 0 the function returns 0.
* Here we enforce a line lenght limit equal to
* BASE_SPARE_SZ - SCSC_RINGREC_SZ.
*/
written += vscnprintf(SCSC_GET_REC_BUF(spare) + written,
BASE_SPARE_SZ - SCSC_RINGREC_SZ - written,
msg_head, args);
/* complete record metadata */
rrec->len = written;
return written;
}
/**
* A ring API function to push variable length format string into the buffer
* After the record has been created and pushed into the ring any process
* waiting on the related waiting queue is awakened.
*/
int push_record_string(struct scsc_ring_buffer *rb, int tag, int lev,
int prepend_header, const char *msg_head, va_list args)
{
int rec_len = 0;
loff_t free_bytes;
unsigned long flags;
/* Prepare ring_record and header if needed */
raw_spin_lock_irqsave(&rb->lock, flags);
rec_len = tag_writer_string(rb->spare, tag, lev, prepend_header,
msg_head, args);
/* Line too long anyway drop */
if (rec_len == BASE_SPARE_SZ) {
raw_spin_unlock_irqrestore(&rb->lock, flags);
return 0;
}
free_bytes = SCSC_RING_FREE_BYTES(rb);
/**
* Evaluate if it's a trivial append or if we must account for
* any overwrap. Note that we do NOT truncate record across ring
* boundaries, if a record does NOT fit at the end of buffer,
* we'll write it from start directly.
*/
if (rec_len + SCSC_RINGREC_SZ < free_bytes)
scsc_ring_buffer_plain_append(rb, rb->spare,
SCSC_RINGREC_SZ + rec_len,
NULL, 0);
else
scsc_ring_buffer_overlap_append(rb, rb->spare,
SCSC_RINGREC_SZ + rec_len,
NULL, 0);
rb->written += rec_len;
raw_spin_unlock_irqrestore(&rb->lock, flags);
/* WAKEUP EVERYONE WAITING ON THIS BUFFER */
wake_up_interruptible(&rb->wq);
return rec_len;
}
/* This simply builds up a record descriptor for a binary entry. */
static inline
int tag_writer_binary(char *spare, int tag, int lev, size_t hexlen)
{
struct scsc_ring_record *rrec;
rrec = (struct scsc_ring_record *)spare;
SCSC_FILL_RING_RECORD(rrec, tag, lev);
rrec->len = hexlen;
return hexlen;
}
/**
* A ring API function to push binary data into the ring buffer. Binary data
* is copied from the start/len specified location.
* After the record has been created and pushed into the ring any process
* waiting on the related waiting queue is awakened.
*/
int push_record_blob(struct scsc_ring_buffer *rb, int tag, int lev,
int prepend_header, const void *start, size_t len)
{
loff_t free_bytes;
unsigned long flags;
if (len > SCSC_MAX_BIN_BLOB_SZ)
len = SCSC_MAX_BIN_BLOB_SZ;
/* Prepare ring_record and header if needed */
raw_spin_lock_irqsave(&rb->lock, flags);
memset(rb->spare, 0x00, rb->ssz);
tag_writer_binary(rb->spare, tag, lev, len);
free_bytes = SCSC_RING_FREE_BYTES(rb);
if (len + SCSC_RINGREC_SZ < free_bytes)
scsc_ring_buffer_plain_append(rb, start, len,
rb->spare, SCSC_RINGREC_SZ);
else
scsc_ring_buffer_overlap_append(rb, start, len,
rb->spare, SCSC_RINGREC_SZ);
rb->written += len;
raw_spin_unlock_irqrestore(&rb->lock, flags);
/* WAKEUP EVERYONE WAITING ON THIS BUFFER */
wake_up_interruptible(&rb->wq);
return len;
}
/* A simple reader used to retrieve a string from the record
* It always return ONE WHOLE RECORD if it fits the provided tbuf OR NOTHING.
*/
static inline
size_t tag_reader_string(char *tbuf, struct scsc_ring_buffer *rb,
int start_rec, size_t tsz)
{
size_t max_chunk = SCSC_GET_REC_LEN(SCSC_GET_PTR(rb, start_rec));
if (max_chunk <= tsz)
memcpy(tbuf, SCSC_GET_REC_BUF(rb->buf + start_rec), max_chunk);
else
max_chunk = 0;
return max_chunk;
}
/*
* Helper to dump binary data in ASCII readable form up to
* scsc_decode_binary_len bytes: when such modparam is set to -1
* this will dump all the available data. Data is dumped onto the
* output buffer with an endianity that conforms to the data as
* dumped by the print_hex_dump() kernel standard facility.
*/
static inline
int binary_hexdump(char *tbuf, int tsz, struct scsc_ring_record *rrec,
int start, int dlen)
{
int i, j, b;
unsigned char *blob = SCSC_GET_REC_BUF(rrec);
char *hmap = "0123456789abcdef";
for (j = start, i = 0;
j < tsz && i < rrec->len && i < dlen; i += 4) {
for (b = (rrec->len - i >= 3) ? 3 : rrec->len - i;
b >= 0; b--) {
if (i + (3 - b) >= dlen)
break;
tbuf[j++] = hmap[blob[i + b] >> 4 & 0x0f];
tbuf[j++] = hmap[blob[i + b] & 0x0f];
}
}
return j;
}
/**
* A reader used to dump binary records: this function first of all
* builds a proper human readable header to identify the record with the
* usual debuglevel and timestamps and then DUMPS some of the binary blob
* in ASCII human readable form: how much is dumped depends on the module
* param scsc_decode_binary_len (default 8 bytes).
* ANYWAY ONLY ONE WHOLE RECORD IS DUMPED OR NOTHING IF IT DOES NOT FIT
* THE PROVIDED DESTINATION BUFFER TBUF.
*/
static inline
size_t tag_reader_binary(char *tbuf, struct scsc_ring_buffer *rb,
int start_rec, size_t tsz)
{
size_t written;
int declen = scsc_decode_binary_len;
struct scsc_ring_record *rrec;
char bheader[SCSC_HBUF_LEN] = {};
char binfo[SCSC_BINFO_LEN] = {};
size_t max_chunk;
rrec = (struct scsc_ring_record *)SCSC_GET_PTR(rb, start_rec);
if (declen < 0 || declen > rrec->len)
declen = rrec->len;
if (declen)
snprintf(binfo, SCSC_BINFO_LEN, "HEX[%d/%d]: ",
declen, rrec->len);
written = build_header(bheader, SCSC_HBUF_LEN, rrec,
declen ? binfo : "");
/* Account for byte decoding: two ASCII char for each byte */
max_chunk = written + (declen * 2);
if (max_chunk <= tsz) {
memcpy(tbuf, bheader, written);
if (declen)
written = binary_hexdump(tbuf, tsz - written,
rrec, written, declen);
tbuf[written] = '\n';
written++;
} else {
written = 0;
}
return written;
}
/**
* This is a utility function to read from the specified ring_buffer
* up to 'tsz' amount of data starting from position record 'start_rec'.
* This function reads ONLY UP TO ONE RECORD and returns the effective
* amount of data bytes read; it invokes the proper tag_reader_* helper
* depending on the specific record is handling.
* Data is copied to a TEMP BUFFER provided by user of this function,
* IF AND ONLY IF a whole record CAN fit into the space available in the
* destination buffer, otherwise record is NOT copied and 0 is returned.
* This function DOES NOT SLEEP.
* Caller IS IN CHARGE to SOLVE any sync issue on provided tbuf and
* underlying ring buffer.
*
* @tbuf: a temp buffer destination for the read data
* @rb: the ring_buffer to use.
* @start_rec: the record from which to start expressed as a record
* starting position.
* @tsz: the available space in tbuf
* @return size_t: returns the bytes effectively read.
*/
static inline size_t
_read_one_whole_record(void *tbuf, struct scsc_ring_buffer *rb,
int start_rec, size_t tsz)
{
if (SCSC_GET_REC_TAG(SCSC_GET_PTR(rb, start_rec)) > LAST_BIN_TAG)
return tag_reader_string(tbuf, rb, start_rec, tsz);
else
return tag_reader_binary(tbuf, rb, start_rec, tsz);
}
/**
* This just inject a string into the buffer to signal we've gone
* OUT OF SYNC due to Ring WRAPPING too FAST, noting how many bytes
* we resynced.
*/
static inline size_t mark_out_of_sync(char *tbuf, size_t tsz,
int resynced_bytes)
{
size_t written = 0;
struct timeval tval = {};
tval = ns_to_timeval(local_clock());
/* We should write something even if truncated ... */
written = scnprintf(tbuf, tsz,
"<7>[%6lu.%06ld] [c%d] [P] [OOS] :: [[[ OUT OF SYNC -- RESYNC'ED BYTES %d ]]]\n",
tval.tv_sec, tval.tv_usec, smp_processor_id(),
resynced_bytes);
return written;
}
/**
* Attempt resync searching for SYNC pattern and verifying CRC.
* ASSUMES that the invalid_pos provided is anyway safe to access, since
* it should be checked by the caller in advance.
* The amount of resynced bytes are not necessarily the number of bytes
* effectively lost....they could be much more...imagine the ring had
* overwrap multiple times before detecting OUT OF SYNC.
*/
static inline loff_t reader_resync(struct scsc_ring_buffer *rb,
loff_t invalid_pos, int *resynced_bytes)
{
int bytes = 0;
loff_t sync_pos = rb->head;
struct scsc_ring_record *candidate = SCSC_GET_REC(rb, invalid_pos);
*resynced_bytes = 0;
/* Walking thorugh the ring in search of the sync
* pattern one byte at time */
while (invalid_pos != rb->head &&
!SCSC_IS_REC_SYNC_VALID(candidate)) {
invalid_pos = (invalid_pos < rb->last) ?
(invalid_pos + sizeof(u8)) : 0;
bytes += sizeof(u8);
candidate = SCSC_GET_REC(rb, invalid_pos);
}
if (invalid_pos == rb->head ||
(SCSC_IS_REC_SYNC_VALID(candidate) &&
is_record_crc_valid(candidate, invalid_pos))) {
sync_pos = invalid_pos;
*resynced_bytes = bytes;
}
return sync_pos;
}
/**
* An Internal API ring function to retrieve into the provided tbuf
* up to N WHOLE RECORDS starting from *next_rec.
* It STOPS collecting records if:
* - NO MORE RECORDS TO READ: last_read_record record is head
* - NO MORE SPACE: on provided destination tbuf to collect
* one more WHOLE record
* - MAX NUMBER OF REQUIRED RECORDS READ: if max_recs was passed in
* as ZERO it means read as much as you can till head is reached.
*
* If at start it detects and OUT OF SYNC, so that next_rec is
* NO MORE pointing to a valid record, it tries to RE-SYNC on next
* GOOD KNOWN record or to HEAD as last resource and injects into
* the user buffer an OUT OF SYNC marker record.
*
* ASSUMES proper locking and syncing ALREADY inplace...does NOT SLEEP.
*/
size_t read_next_records(struct scsc_ring_buffer *rb, int max_recs,
loff_t *last_read_rec, void *tbuf, size_t tsz)
{
size_t bytes_read = 0, last_read = -1;
int resynced_bytes = 0, records = 0;
loff_t next_rec = 0;
/* Nothing to read...simply return 0 causing reader to exit */
if (*last_read_rec == rb->head)
return bytes_read;
if (!is_ring_read_pos_valid(rb, *last_read_rec)) {
if (is_ring_pos_safe(rb, *last_read_rec)) {
/* Try to resync from *last_read_rec INVALID POS */
next_rec = reader_resync(rb, *last_read_rec,
&resynced_bytes);
} else {
/* Skip to head...ONLY safe place known in tis case. */
resynced_bytes = 0;
next_rec = rb->head;
}
bytes_read += mark_out_of_sync(tbuf, tsz, resynced_bytes);
} else {
/* next to read....we're surely NOT already at rb->head here */
next_rec = (*last_read_rec != rb->last) ?
SCSC_GET_NEXT_SLOT_POS(rb, *last_read_rec) : 0;
}
do {
/* Account for last read */
last_read = bytes_read;
bytes_read +=
_read_one_whole_record(tbuf + bytes_read, rb,
next_rec, tsz - bytes_read);
/* Did a WHOLE record fit into available tbuf ? */
if (bytes_read != last_read) {
records++;
*last_read_rec = next_rec;
if (*last_read_rec != rb->head)
next_rec = (next_rec != rb->last) ?
SCSC_GET_NEXT_SLOT_POS(rb, next_rec) : 0;
}
} while (*last_read_rec != rb->head &&
last_read != bytes_read &&
(!max_recs || records <= max_recs));
return bytes_read;
}
/* Assumes ring is already spinlocked. */
void scsc_ring_truncate(struct scsc_ring_buffer *rb)
{
rb->head = 0;
rb->tail = 0;
rb->records = 0;
rb->written = 0;
rb->wraps = 0;
rb->last = 0;
memset(rb->buf + rb->head, 0x00, SCSC_RINGREC_SZ);
}
/**
* alloc_ring_buffer - Allocates and initializes a basic ring buffer,
* including a basic spare area where to handle strings-splitting when
* buffer wraps. Basic spinlock/mutex init takes place here too.
*
* @bsz: the size of the ring buffer to allocate in bytes
* @ssz: the size of the spare area to allocate in bytes
* @name: a name for this ring buffer
*/
struct scsc_ring_buffer __init *alloc_ring_buffer(size_t bsz, size_t ssz,
const char *name)
{
struct scsc_ring_buffer *rb = kmalloc(sizeof(*rb), GFP_KERNEL);
if (!rb)
return NULL;
rb->bsz = bsz;
rb->ssz = ssz;
#ifndef CONFIG_SCSC_STATIC_RING_SIZE
rb->buf = kzalloc(rb->bsz + rb->ssz, GFP_KERNEL);
if (!rb->buf) {
kfree(rb);
return NULL;
}
#else
rb->buf = a_ring;
#endif
rb->head = 0;
rb->tail = 0;
rb->last = 0;
rb->written = 0;
rb->records = 0;
rb->wraps = 0;
rb->oos = 0;
rb->spare = rb->buf + rb->bsz;
memset(rb->name, 0x00, RNAME_SZ);
strncpy(rb->name, name, RNAME_SZ - 1);
raw_spin_lock_init(&rb->lock);
init_waitqueue_head(&rb->wq);
return rb;
}
/*
* free_ring_buffer - Free the ring what else...
* ...does NOT account for spinlocks existence currently
*
* @rb: a pointer to the ring buffer to free
*/
void free_ring_buffer(struct scsc_ring_buffer *rb)
{
if (!rb)
return;
#ifndef CONFIG_SCSC_STATIC_RING_SIZE
kfree(rb->buf);
#endif
kfree(rb);
}

View file

@ -0,0 +1,241 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#ifndef _SCSC_LOGRING_RING_H_
#define _SCSC_LOGRING_RING_H_
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/hardirq.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/crc32.h>
#include <scsc/scsc_logring.h>
#include "scsc_logring_common.h"
#define SCSC_BINFO_LEN 32
#define SCSC_HBUF_LEN 128
/* A safe size to enforce on ingressing binary blobs; this accounts
* for possible binary expansion while reading, in order to fit the reader
* DEFAULT_TBUF_SZ in any possible case: this way we avoid to have truncated
* data also on read while dumping big binary blobs.
* */
#define SCSC_MAX_BIN_BLOB_SZ 1920
/**
* This spare area is used to prepare a logging entry before pushing it into
* the ring and so it's the maximum length allowed for a log entry.
* When this change (hugely) you should check the size of len field
* in the following struct scsc_ring_record.
*/
#define BASE_SPARE_SZ 2048
#define RNAME_SZ 16
#define DEFAULT_RING_BUFFER_SZ 8192
#define DEFAULT_ENABLE_HEADER 1
#define DEFAULT_ENABLE_LOGRING 1
/* The default len, in bytes, of the binary blob to decode in ASCII
* Human readable form. -1 means DECODE EVERYTHING !
*/
#define DEFAULT_BIN_DECODE_LEN -1
#define DEBUGFS_ROOT "/sys/kernel/debug"
#define DEBUGFS_RING0_ROOT "ring0"
/**
* Our ring buffer is allocated simply as a bunch of contiguos bytes.
* Data is stored as a contiguos stream of concatenated records, each one
* starting with a record descriptor of type scsc_ring_record: data content
* is then appended to the record descriptor; in this way we can account
* for different types of content, pushing the TAG describing the content
* into the record descriptor itself, being then able to operate differently
* on read depending on the type of content.
* The tail and head references 'points' to the start of the first (oldest)
* and the last (newest) record: any write will happen after the end
* of the current head: these references in fact points to the starting byte
* of the record descriptor modulus the ring size (they're NOT abosolute
* pointers). Since our 'metadata' is embedded inside the ring itself (like
* printk does) we never write variable string content in place BUT use
* instead the spare area (allocated contiguosly at the end of the ring)
* to expand the provided format string and then memcpy the content to
* the final position after having properly updated the record descriptors
* and eventually moved away the tail when overwriting.
* Moreover we never wrap a record across the ring boundary: if there's NOT
* enough space at the end of the ring, we simply place it at the start.
* Moreover this structs holds a kcache reference to allocate temporary
* buffers to use when double buffering is needed, a spinlock_t for
* protection and a wait_queue_t for blocking I/O.
*
* @buf: the ring-buffer itself starts here
* @spare: start of spare area (buf[bsz])
* @name: a simple named identifier
* @bsz: ring size
* @ssz: size of spare (fixed at BASE_SPARE_SZ)
* @head: newest record written (first byte)...next write after it
* @tail: odelst record written (first byte)...full dump read will start
* from here
* @last: the last record before the end of the ring.
* @records: the number of records
* @written: a general progressive counter of total bytes written into
* the ring
* @lock: a spinlock_t to protetc concurrent access
* @wq: a wait queue where to put sleeping processes waiting for input.
* They're woken up at the end os scsc_printk().
* @refc: a reference counter...currently unused.
* @private: useful to hold some user provided data (used to hold debugfs
* initdata related to this ring)
* @kcache: a reference to a kmem_cache created at initialization time
* to get fresh temporary buffers on the fly when copying to user and in
* need of a double buffer
*/
struct scsc_ring_buffer {
char *buf;
char *spare;
char name[RNAME_SZ];
size_t bsz;
size_t ssz;
loff_t head;
loff_t tail;
loff_t last;
int records;
int wraps;
int oos;
u64 written;
raw_spinlock_t lock;
wait_queue_head_t wq;
atomic_t refc;
void *private;
};
/**
* Our ring buffer is now built concatenating entries prepended by a record
* that describes the content itself. This will allow us to store different
* types of data (NOT only string) and to interpret it.
* Each record is described by this struct that is laid out in front of the
* effective content:
*
* | SYNC | CRC | tag | len | lev | ctx | core | nsec | <buffer len - - |
*
* @SYNC: a fixed pattern to search for when re-syncing after a reader
* has got lost.
* @CRC: CRC32 calculated, using kernel crc32_le, on the whole record header,
* taking care to substitute this field with the 32 LSB of this record
* relative starting position (relative to the absolute ring buffer
* start.
* @tag: type of this record...matters expecially to identify binary data
* record
* @len: this is the length in bytes of buffer. All string content should
* be NULL terminated. This length will anyway NEVER exceed
* BASE_SPARE_SZ that's currently a few KB.
* @lev: the debuglevel associated to this message.
* @ctx: the execution context of the logged line:
* SoftIRQ / Interrupt / Process
* @core: the CPU core id
* @nsec: the timestamp in nanoseconds
*/
struct scsc_ring_record {
u32 sync;
u32 crc;
u8 tag;
u16 len;
u8 lev;
u8 ctx;
u8 core;
s64 nsec;
} __packed; /* should NOT be needed */
#define SYNC_MAGIC 0xDEADBEEF
/**
* Fill a scsc_ring_record descriptor
* local_clock() is from the same family of time-func used
* by printk returns nanoseconds
*/
#define SCSC_FILL_RING_RECORD(r, tag, lev) \
do { \
(r)->sync = SYNC_MAGIC; \
(r)->crc = 0; \
(r)->nsec = local_clock(); \
(r)->tag = tag; \
(r)->len = 0; \
(r)->lev = lev; \
(r)->ctx = ((in_interrupt()) ? \
((in_softirq()) ? 'S' : 'I') : 'P'); \
(r)->core = smp_processor_id(); \
} while (0)
#define SCSC_RINGREC_SZ (sizeof(struct scsc_ring_record))
#define SCSC_CRC_RINGREC_SZ (SCSC_RINGREC_SZ - sizeof(SYNC_MAGIC))
#define SCSC_IS_RING_IN_USE(ring) \
((atomic_read(&((struct scsc_ring_buffer *)(ring))->refc)) != 0)
#define SCSC_GET_RING_REFC(ring) \
atomic_inc(&((struct scsc_ring_buffer *)(ring))->refc)
#define SCSC_PUT_RING_REFC(ring) \
atomic_dec(&((struct scsc_ring_buffer *)(ring))->refc)
#define SCSC_GET_REC_BUF(p) (((char *)(p)) + SCSC_RINGREC_SZ)
#define SCSC_GET_REC_LEN(recp) (((struct scsc_ring_record *)(recp))->len)
#define SCSC_GET_REC_TAG(recp) (((struct scsc_ring_record *)(recp))->tag)
#define SCSC_GET_REC_CRC(recp) (((struct scsc_ring_record *)(recp))->crc)
#define SCSC_GET_PTR(ring, pos) ((ring)->buf + (pos))
#define SCSC_GET_REC(ring, pos) \
((struct scsc_ring_record *)(SCSC_GET_PTR((ring), (pos))))
#define SCSC_IS_REC_SYNC_VALID(recp) ((recp)->sync == SYNC_MAGIC)
#define SCSC_GET_HEAD_PTR(ring) SCSC_GET_PTR((ring), (ring)->head)
#define SCSC_GET_NEXT_FREE_SLOT_PTR(ring) \
(SCSC_GET_HEAD_PTR((ring)) + SCSC_RINGREC_SZ + \
SCSC_GET_REC_LEN(SCSC_GET_HEAD_PTR(ring)))
#define SCSC_GET_SLOT_LEN(ring, pos) \
(((SCSC_GET_REC_LEN(SCSC_GET_PTR((ring), (pos)))) != 0) ? \
(SCSC_RINGREC_SZ + SCSC_GET_REC_LEN(SCSC_GET_PTR((ring), (pos)))) : 0)
#define SCSC_GET_NEXT_SLOT_POS(ring, pos) \
((pos) + SCSC_GET_SLOT_LEN((ring), (pos)))
#define SCSC_RING_FREE_BYTES(rb) \
(((rb)->head >= (rb)->tail) ? \
((rb)->bsz - SCSC_GET_NEXT_SLOT_POS(rb, rb->head)) : \
((rb)->tail - SCSC_GET_NEXT_SLOT_POS(rb, rb->head)))
#define SCSC_USED_BYTES(rb) ((rb)->bsz - SCSC_RING_FREE_BYTES(rb))
#define SCSC_LOGGED_BYTES(rb) (SCSC_USED_BYTES(rb) - \
((rb)->records * SCSC_RINGREC_SZ))
#define SCSC_GET_NEXT_REC_ENTRY_POS(ring, rpos) \
(rpos + SCSC_RINGREC_SZ + \
SCSC_GET_REC_LEN(SCSC_GET_PTR((ring), (rpos))))
/* Ring buffer API */
struct scsc_ring_buffer *alloc_ring_buffer(size_t bsz, size_t ssz,
const char *name) __init;
void free_ring_buffer(struct scsc_ring_buffer *rb);
void scsc_ring_truncate(struct scsc_ring_buffer *rb);
int push_record_string(struct scsc_ring_buffer *rb, int tag, int lev,
int prepend_header, const char *msg_head, va_list args);
int push_record_blob(struct scsc_ring_buffer *rb, int tag, int lev,
int prepend_header, const void *start, size_t len);
size_t read_next_records(struct scsc_ring_buffer *rb, int max_recs,
loff_t *last_read_rec, void *tbuf, size_t tsz);
#endif /* _SCSC_LOGRING_RING_H_ */

View file

@ -0,0 +1,48 @@
/**
* Loopback Protocol (Implementation)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <hydra/trace.h>
#include "scsc_loopback.h"
/*****************************************************************************/
/**
* Handle data received on port by sending it back.
*/
static void scsc_loopback_port_recv(
struct scsc_mport *port,
const unsigned char *data,
size_t count)
{
os_trace_dbg("%s: @%p, count %zu", __func__, port, count);
scsc_mport_emit(port, data, count);
}
static const struct scsc_mport_ops scsc_loopback_port_ops = {
scsc_loopback_port_recv
};
/*****************************************************************************/
void scsc_loopback_init(struct scsc_loopback *loopback)
{
os_trace_dbg("%s: @%p", __func__, loopback);
scsc_mport_init(&loopback->port, &scsc_loopback_port_ops);
}
void scsc_loopback_deinit(struct scsc_loopback *loopback)
{
}
struct scsc_mport *scsc_loopback_get_port(
struct scsc_loopback *loopback)
{
return &loopback->port;
}

View file

@ -0,0 +1,24 @@
#ifndef __HCI_LOOPBACK_H
#define __HCI_LOOPBACK_H
/**
* Loopback Protocol (Interface)
*
* Bounces anything send straight back.
*/
#include "scsc_mport.h"
/*****************************************************************************/
struct scsc_loopback {
struct scsc_mport port;
};
/*****************************************************************************/
void scsc_loopback_init(struct scsc_loopback *loopback);
void scsc_loopback_deinit(struct scsc_loopback *loopback);
struct scsc_mport *scsc_loopback_get_port(struct scsc_loopback *loopback);
#endif /* __HCI_LOOPBACK_H */

View file

@ -0,0 +1,195 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef __SCSC_MIF_ABS_H
#define __SCSC_MIF_ABS_H
#include <linux/types.h>
#include <scsc/scsc_mifram.h>
struct device;
/* To R4/M4 */
enum scsc_mif_abs_target {
SCSC_MIF_ABS_TARGET_R4 = 0,
SCSC_MIF_ABS_TARGET_M4 = 1
};
/**
* Abstraction of the Maxwell "Memory Interface" aka MIF.
*
* There will be at least two implementations of this
* interface - The native AXI one and a PCIe based emulation.
*
* A reference to an interface will be passed to the
* scsc_mx driver when the system startsup.
*/
struct scsc_mif_abs {
/**
* Destroy this interface.
*
* This should be called when the underlying device is
* removed.
*/
void (*destroy)(struct scsc_mif_abs *interface);
/* Return an unique id for this host, and prefreabrly identifies specific device (example pcie0, pcie1) */
char *(*get_uid)(struct scsc_mif_abs *interface);
/**
* Controls the hardware "reset" state of the Maxwell
* subsystem.
*
* Setting reset=TRUE places the subsystem in its low
* power "reset" state. This function is called
* by the Maxwell Manager near the end of the subsystem
* shutdown process, before "unmapping" the interface.
*
* Setting reset=FALSE release the subsystem reset state.
* The subystem will then start its cold boot sequence. This
* function is called
* by the Subsystem Manager near the end of the subsystem
* startup process after installing the maxwell firmware and
* other resources in MIF RAM.
*/
int (*reset)(struct scsc_mif_abs *interface, bool reset);
/**
* This function maps the Maxwell interface hardware (MIF
* DRAM) into kernel memory space.
*
* Amount of memory allocated must be defined and returned
* on (*allocated) by the abstraction layer implemenation.
*
* This returns kernel-space pointer to the start of the
* shared MIF DRAM. The Maxwell Manager will load firmware
* to this location and configure the MIF Heap Manager to
* manage any unused memory at the end of the DRAM region.
*
* The scsc_mx driver should call this when the Maxwell
* subsystem is required by any service client.
*
* The mailbox, irq and dram functions are only usable once
* this call has returned. HERE: Should we rename this to
* "open" and return a handle to these conditional methods?
*/
void *(*map)(struct scsc_mif_abs *interface, size_t *allocated);
/**
* The inverse of "map". Should be called once the maxwell
* subsystem is no longer required and has been placed into
* "reset" state (see reset method).
*/
void (*unmap)(struct scsc_mif_abs *interface, void *mem);
#ifdef MAILBOX_SETGET
void (*mailbox_set)(struct scsc_mif_abs *interface, u32 mbox_num, u32 value);
u32 (*mailbox_get)(struct scsc_mif_abs *interface, u32 mbox_num);
#endif
/**
* The Mailbox pointer returned can be used for direct access
* to the hardware register for efficiency.
* The pointer is gauranteed to remain valid between map and unmap calls.
* HERE: If we are not assuming AP v R4 same-endianess then this
* should be explicitly leu32 or u8[4] (or something equivalent).
*/
u32 *(*get_mbox_ptr)(struct scsc_mif_abs *interface, u32 mbox_index);
/**
* MIF Interrupt Hardware Controls
*/
#ifdef M4_INT
u32 (*irq_bit_mask_status_get)(struct scsc_mif_abs *interface, u32 irq_reg);
u32 (*irq_bit_get)(struct scsc_mif_abs *interface, u32 irq_reg);
void (*irq_bit_set)(struct scsc_mif_abs *interface, u32 irq_reg, int bit_num);
void (*irq_bit_clear)(struct scsc_mif_abs *interface, u32 irq_reg, int bit_num);
void (*irq_bit_mask)(struct scsc_mif_abs *interface, u32 irq_reg, int bit_num);
void (*irq_bit_unmask)(struct scsc_mif_abs *interface, u32 irq_reg, int bit_num);
/**
* Register handler for the interrupt from the
* MIF Interrupt Hardware.
*
* This is used by the MIF Interrupt Manager to
* register a handler that demultiplexes the
* individual interrupt sources (MIF Interrupt Bits)
* to source-specific handlers.
*/
void (*irq_reg_handler)(struct scsc_mif_abs *interface, u32 irq_reg, void (*handler)(int irq, void *data), void *dev);
void (*irq_unreg_handler)(struct scsc_mif_abs *interface, u32 irq_reg);
#else
u32 (*irq_bit_mask_status_get)(struct scsc_mif_abs *interface);
u32 (*irq_get)(struct scsc_mif_abs *interface);
void (*irq_bit_set)(struct scsc_mif_abs *interface, int bit_num, enum scsc_mif_abs_target target);
void (*irq_bit_clear)(struct scsc_mif_abs *interface, int bit_num);
void (*irq_bit_mask)(struct scsc_mif_abs *interface, int bit_num);
void (*irq_bit_unmask)(struct scsc_mif_abs *interface, int bit_num);
/**
* Register handler for the interrupt from the
* MIF Interrupt Hardware.
*
* This is used by the MIF Interrupt Manager to
* register a handler that demultiplexes the
* individual interrupt sources (MIF Interrupt Bits)
* to source-specific handlers.
*/
void (*irq_reg_handler)(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev);
void (*irq_unreg_handler)(struct scsc_mif_abs *interface);
#endif
/* Clear HW interrupt line */
void (*irq_clear)(void);
void (*irq_reg_reset_request_handler)(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev);
void (*irq_unreg_reset_request_handler)(struct scsc_mif_abs *interface);
/**
* Install suspend/resume handlers for the MIF abstraction driver
*/
void (*suspend_reg_handler)(struct scsc_mif_abs *abs,
int (*suspend)(struct scsc_mif_abs *abs, void *data),
void (*resume)(struct scsc_mif_abs *abs, void *data),
void *data);
void (*suspend_unreg_handler)(struct scsc_mif_abs *abs);
/**
* Return kernel-space pointer to MIF ram.
* The pointer is guaranteed to remain valid between map and unmap calls.
*/
void *(*get_mifram_ptr)(struct scsc_mif_abs *interface, scsc_mifram_ref ref);
/* Maps kernel-space pointer to MIF RAM to portable reference */
int (*get_mifram_ref)(struct scsc_mif_abs *interface, void *ptr, scsc_mifram_ref *ref);
/* Return physical page frame number corresponding to the physical addres to which
* the virtual address is mapped . Needed in mmap file operations*/
uintptr_t (*get_mifram_pfn)(struct scsc_mif_abs *interface);
/** Return a kernel device associated 1:1 with the Maxwell instance.
* This is published only for the purpose of associating service drivers with a Maxwell instance
* for logging purposes. Clients should not make any assumptions about the device type.
* In some configurations this may be the associated host-interface device (AXI/PCIe),
* but this may change in future.
*/
struct device *(*get_mif_device)(struct scsc_mif_abs *interface);
void (*mif_dump_registers)(struct scsc_mif_abs *interface);
void (*mif_cleanup)(struct scsc_mif_abs *interface);
void (*mif_restart)(struct scsc_mif_abs *interface);
};
struct device;
struct scsc_mif_abs_driver {
char *name;
void (*probe)(struct scsc_mif_abs_driver *abs_driver, struct scsc_mif_abs *abs);
void (*remove)(struct scsc_mif_abs *abs);
};
extern void scsc_mif_abs_register(struct scsc_mif_abs_driver *driver);
extern void scsc_mif_abs_unregister(struct scsc_mif_abs_driver *driver);
/* mmap-debug driver */
struct scsc_mif_mmap_driver {
char *name;
void (*probe)(struct scsc_mif_mmap_driver *mmap_driver, struct scsc_mif_abs *abs);
void (*remove)(struct scsc_mif_abs *abs);
};
extern void scsc_mif_mmap_register(struct scsc_mif_mmap_driver *mmap_driver);
extern void scsc_mif_mmap_unregister(struct scsc_mif_mmap_driver *mmap_driver);
#endif

View file

@ -0,0 +1,150 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/module.h>
#include <linux/slab.h>
#include <scsc/scsc_logring.h>
#include "scsc_mx_impl.h"
#include "mifintrbit.h"
#include "miframman.h"
#include "mifmboxman.h"
#include "mifproc.h"
#include "mxman.h"
#include "mxproc.h"
#include "srvman.h"
#include "mxmgmt_transport.h"
#include "gdb_transport.h"
#include "mxlog.h"
#include "panicmon.h"
#include "mxlog_transport.h"
#include "suspendmon.h"
struct scsc_mx {
struct scsc_mif_abs *mif_abs;
struct mifintrbit intr;
struct miframman ram;
struct mifmboxman mbox;
struct mifproc proc;
struct mxman mxman;
struct srvman srvman;
struct mxmgmt_transport mxmgmt_transport;
struct gdb_transport gdb_transport_r4;
struct gdb_transport gdb_transport_m4;
int users;
struct mxlog mxlog;
struct panicmon panicmon;
struct mxlog_transport mxlog_transport;
struct suspendmon suspendmon;
};
struct scsc_mx *scsc_mx_create(struct scsc_mif_abs *mif)
{
struct scsc_mx *mx;
mx = kzalloc(sizeof(*mx), GFP_ATOMIC);
if (!mx)
return NULL;
mx->mif_abs = mif;
mifintrbit_init(&mx->intr, mif);
mifmboxman_init(&mx->mbox);
panicmon_init(&mx->panicmon, mx);
suspendmon_init(&mx->suspendmon, mx);
mxman_init(&mx->mxman, mx);
srvman_init(&mx->srvman, mx);
mifproc_create_proc_dir(mx->mif_abs);
SCSC_TAG_DEBUG(MXMAN, "Hurray Maxwell is here with %p\n", mx);
return mx;
}
void scsc_mx_destroy(struct scsc_mx *mx)
{
SCSC_TAG_DEBUG(MXMAN, "\n");
BUG_ON(mx == NULL);
mifintrbit_deinit(&mx->intr);
mifmboxman_deinit(scsc_mx_get_mboxman(mx));
suspendmon_deinit(scsc_mx_get_suspendmon(mx));
panicmon_deinit(scsc_mx_get_panicmon(mx));
mifproc_remove_proc_dir();
srvman_deinit(&mx->srvman);
mxman_deinit(&mx->mxman);
kfree(mx);
SCSC_TAG_DEBUG(MXMAN, "OK\n");
}
struct scsc_mif_abs *scsc_mx_get_mif_abs(struct scsc_mx *mx)
{
return mx->mif_abs;
}
struct mifintrbit *scsc_mx_get_intrbit(struct scsc_mx *mx)
{
return &mx->intr;
}
struct miframman *scsc_mx_get_ramman(struct scsc_mx *mx)
{
return &mx->ram;
}
struct mifmboxman *scsc_mx_get_mboxman(struct scsc_mx *mx)
{
return &mx->mbox;
}
struct device *scsc_mx_get_device(struct scsc_mx *mx)
{
return mx->mif_abs->get_mif_device(mx->mif_abs);
}
EXPORT_SYMBOL_GPL(scsc_mx_get_device); /* TODO: export a top-level API for this */
struct mxman *scsc_mx_get_mxman(struct scsc_mx *mx)
{
return &mx->mxman;
}
struct srvman *scsc_mx_get_srvman(struct scsc_mx *mx)
{
return &mx->srvman;
}
struct mxmgmt_transport *scsc_mx_get_mxmgmt_transport(struct scsc_mx *mx)
{
return &mx->mxmgmt_transport;
}
struct gdb_transport *scsc_mx_get_gdb_transport_r4(struct scsc_mx *mx)
{
return &mx->gdb_transport_r4;
}
struct gdb_transport *scsc_mx_get_gdb_transport_m4(struct scsc_mx *mx)
{
return &mx->gdb_transport_m4;
}
struct mxlog *scsc_mx_get_mxlog(struct scsc_mx *mx)
{
return &mx->mxlog;
}
struct panicmon *scsc_mx_get_panicmon(struct scsc_mx *mx)
{
return &mx->panicmon;
}
struct mxlog_transport *scsc_mx_get_mxlog_transport(struct scsc_mx *mx)
{
return &mx->mxlog_transport;
}
struct suspendmon *scsc_mx_get_suspendmon(struct scsc_mx *mx)
{
return &mx->suspendmon;
}

View file

@ -0,0 +1,47 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef _CORE_H_
#define _CORE_H_
#include <linux/firmware.h>
#include "scsc_mif_abs.h"
struct device;
struct scsc_mx;
struct mifintrbit;
struct miframman;
struct mifmboxman;
struct mxman;
struct srvman;
struct mxmgmt_transport;
struct mxproc;
struct scsc_mx *scsc_mx_create(struct scsc_mif_abs *mif);
void scsc_mx_destroy(struct scsc_mx *mx);
struct scsc_mif_abs *scsc_mx_get_mif_abs(struct scsc_mx *mx);
struct mifintrbit *scsc_mx_get_intrbit(struct scsc_mx *mx);
struct mifmuxman *scsc_mx_get_muxman(struct scsc_mx *mx);
struct miframman *scsc_mx_get_ramman(struct scsc_mx *mx);
struct mifmboxman *scsc_mx_get_mboxman(struct scsc_mx *mx);
struct device *scsc_mx_get_device(struct scsc_mx *mx);
struct mxman *scsc_mx_get_mxman(struct scsc_mx *mx);
struct srvman *scsc_mx_get_srvman(struct scsc_mx *mx);
struct mxproc *scsc_mx_get_mxproc(struct scsc_mx *mx);
struct mxmgmt_transport *scsc_mx_get_mxmgmt_transport(struct scsc_mx *mx);
struct gdb_transport *scsc_mx_get_gdb_transport_r4(struct scsc_mx *mx);
struct gdb_transport *scsc_mx_get_gdb_transport_m4(struct scsc_mx *mx);
struct mxlog *scsc_mx_get_mxlog(struct scsc_mx *mx);
struct mxlog_transport *scsc_mx_get_mxlog_transport(struct scsc_mx *mx);
struct panicmon *scsc_mx_get_panicmon(struct scsc_mx *mx);
struct suspendmon *scsc_mx_get_suspendmon(struct scsc_mx *mx);
int mx140_file_download_fw(struct scsc_mx *mx, void *dest, size_t dest_size, u32 *fw_image_size);
int mx140_request_file(struct scsc_mx *mx, char *path, const struct firmware **firmp);
int mx140_release_file(struct scsc_mx *mx, const struct firmware *firmp);
void mx140_basedir_file(struct scsc_mx *mx);
int mx140_file_select_fw(struct scsc_mx *mx, u32 suffix);
bool mx140_file_supported_hw(struct scsc_mx *mx, u32 hw_ver);
#endif

View file

@ -0,0 +1,196 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/slab.h>
#include <linux/module.h>
#include <scsc/scsc_mx.h>
#include <scsc/scsc_release.h>
#include <scsc/scsc_logring.h>
#include "scsc_mif_abs.h"
#include "scsc_mx_impl.h"
#define SCSC_MX_CORE_MODDESC "mx140 Core Driver"
struct clients_node {
struct list_head list;
struct scsc_mx_module_client *module_client;
};
struct mx_node {
struct list_head list;
struct scsc_mx *mx;
};
static struct mx_module {
struct list_head clients_list;
struct list_head mx_list;
} mx_module = {
.clients_list = LIST_HEAD_INIT(mx_module.clients_list),
.mx_list = LIST_HEAD_INIT(mx_module.mx_list)
};
static void scsc_mx_module_probe_registered_clients(struct scsc_mx *new_mx)
{
bool client_registered = false;
struct clients_node *client_node, *client_next;
/* Traverse Linked List for each mif_driver node */
list_for_each_entry_safe(client_node, client_next, &mx_module.clients_list, list) {
client_node->module_client->probe(client_node->module_client, new_mx, SCSC_MODULE_CLIENT_REASON_HW_PROBE);
client_registered = true;
}
if (client_registered == false)
SCSC_TAG_INFO(MXMAN, "No clients registered\n");
}
static void scsc_mx_module_probe(struct scsc_mif_abs_driver *abs_driver, struct scsc_mif_abs *mif_abs)
{
struct scsc_mx *new_mx;
struct mx_node *mx_node;
/* Avoid unused parm error */
(void)abs_driver;
mx_node = kzalloc(sizeof(*mx_node), GFP_ATOMIC);
if (!mx_node)
return;
/* Create new mx instance */
new_mx = scsc_mx_create(mif_abs);
if (!new_mx) {
kfree(mx_node);
SCSC_TAG_ERR(MXMAN, "Error allocating new_mx\n");
return;
}
/* Add instance in mx_node linked list */
mx_node->mx = new_mx;
list_add_tail(&mx_node->list, &mx_module.mx_list);
scsc_mx_module_probe_registered_clients(new_mx);
}
static void scsc_mx_module_remove(struct scsc_mif_abs *abs)
{
bool match = false;
struct mx_node *mx_node, *next;
/* Traverse Linked List for each mx node */
list_for_each_entry_safe(mx_node, next, &mx_module.mx_list, list) {
/* If there is a match, call destroy */
if (scsc_mx_get_mif_abs(mx_node->mx) == abs) {
match = true;
scsc_mx_destroy(mx_node->mx);
list_del(&mx_node->list);
kfree(mx_node);
}
}
if (match == false)
SCSC_TAG_ERR(MXMAN, "FATAL, no match for given scsc_mif_abs\n");
}
static struct scsc_mif_abs_driver mx_module_mif_if = {
.name = "mx140 driver",
.probe = scsc_mx_module_probe,
.remove = scsc_mx_module_remove,
};
static int __init scsc_mx_module_init(void)
{
SCSC_TAG_INFO(MXMAN, SCSC_MX_CORE_MODDESC " scsc_release %d.%d.%d\n",
SCSC_RELEASE_PRODUCT,
SCSC_RELEASE_ITERATION,
SCSC_RELEASE_CANDIDATE);
scsc_mif_abs_register(&mx_module_mif_if);
return 0;
}
static void __exit scsc_mx_module_exit(void)
{
struct mx_node *mx_node, *next_mx;
/* Traverse Linked List for each mx node */
list_for_each_entry_safe(mx_node, next_mx, &mx_module.mx_list, list) {
scsc_mx_destroy(mx_node->mx);
list_del(&mx_node->list);
kfree(mx_node);
}
scsc_mif_abs_unregister(&mx_module_mif_if);
SCSC_TAG_INFO(MXMAN, SCSC_MX_CORE_MODDESC " unloaded\n");
}
/**
* Reset all registered service drivers by first calling the remove callback and
* then the probe callback. This function is used during recovery operations,
* where the chip has been reset as part of the recovery and the service drivers
* has to do the same.
*/
int scsc_mx_module_reset(void)
{
struct clients_node *clients_node;
struct mx_node *mx_node, *next_mx;
/* Traverse Linked List and call registered removed callbacks */
list_for_each_entry_safe(mx_node, next_mx, &mx_module.mx_list, list)
list_for_each_entry(clients_node, &mx_module.clients_list, list)
clients_node->module_client->remove(clients_node->module_client, mx_node->mx, SCSC_MODULE_CLIENT_REASON_RECOVERY);
/* Traverse Linked List and call registered probed callbacks */
list_for_each_entry_safe(mx_node, next_mx, &mx_module.mx_list, list)
list_for_each_entry(clients_node, &mx_module.clients_list, list)
clients_node->module_client->probe(clients_node->module_client, mx_node->mx, SCSC_MODULE_CLIENT_REASON_RECOVERY);
return 0;
}
EXPORT_SYMBOL(scsc_mx_module_reset);
int scsc_mx_module_register_client_module(struct scsc_mx_module_client *module_client)
{
struct clients_node *module_client_node;
struct mx_node *mx_node;
/* Add node in modules linked list */
module_client_node = kzalloc(sizeof(*module_client_node), GFP_ATOMIC);
if (!module_client_node)
return -ENOMEM;
module_client_node->module_client = module_client;
list_add_tail(&module_client_node->list, &mx_module.clients_list);
/* Traverse Linked List for each mx node */
list_for_each_entry(mx_node, &mx_module.mx_list, list) {
module_client->probe(module_client, mx_node->mx, SCSC_MODULE_CLIENT_REASON_HW_PROBE);
}
return 0;
}
EXPORT_SYMBOL(scsc_mx_module_register_client_module);
void scsc_mx_module_unregister_client_module(struct scsc_mx_module_client *module_client)
{
struct clients_node *client_node, *client_next;
struct mx_node *mx_node, *next_mx;
/* Traverse Linked List for each client_list */
list_for_each_entry_safe(client_node, client_next, &mx_module.clients_list, list) {
if (client_node->module_client == module_client) {
list_for_each_entry_safe(mx_node, next_mx, &mx_module.mx_list, list) {
module_client->remove(module_client, mx_node->mx, SCSC_MODULE_CLIENT_REASON_HW_REMOVE);
}
list_del(&client_node->list);
kfree(client_node);
}
}
}
EXPORT_SYMBOL(scsc_mx_module_unregister_client_module);
module_init(scsc_mx_module_init);
module_exit(scsc_mx_module_exit);
MODULE_DESCRIPTION(SCSC_MX_CORE_MODDESC);
MODULE_AUTHOR("SCSC");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,698 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/firmware.h>
#include <linux/wakelock.h>
#include <scsc/scsc_mx.h>
#include <scsc/scsc_logring.h>
#include "mxman.h"
#include "scsc_mx_impl.h"
#include "mifintrbit.h"
#include "miframman.h"
#include "mifmboxman.h"
#include "srvman.h"
#include "servman_messages.h"
#include "mxmgmt_transport.h"
static ulong sm_completion_timeout_ms = 1000;
module_param(sm_completion_timeout_ms, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sm_completion_timeout_ms, "Timeout Service Manager start/stop (ms) - default 1000. 0 = infinite");
#define SCSC_MX_SERVICE_RECOVERY_TIMEOUT 20000 /* 20 seconds */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
#define reinit_completion(completion) INIT_COMPLETION(*(completion))
#endif
struct scsc_service {
struct list_head list;
struct scsc_mx *mx;
enum scsc_service_id id;
struct scsc_service_client *client;
struct completion sm_msg_start_completion;
struct completion sm_msg_stop_completion;
};
void srvman_init(struct srvman *srvman, struct scsc_mx *mx)
{
SCSC_TAG_INFO(MXMAN, "\n");
srvman->mx = mx;
INIT_LIST_HEAD(&srvman->service_list);
mutex_init(&srvman->service_list_mutex);
mutex_init(&srvman->api_access_mutex);
wake_lock_init(&srvman->sm_wake_lock, WAKE_LOCK_SUSPEND, "srvman_wakelock");
}
void srvman_deinit(struct srvman *srvman)
{
struct scsc_service *service, *next;
SCSC_TAG_INFO(MXMAN, "\n");
list_for_each_entry_safe(service, next, &srvman->service_list, list) {
list_del(&service->list);
kfree(service);
}
mutex_destroy(&srvman->api_access_mutex);
mutex_destroy(&srvman->service_list_mutex);
wake_lock_destroy(&srvman->sm_wake_lock);
}
void srvman_set_error(struct srvman *srvman)
{
struct scsc_service *service;
SCSC_TAG_INFO(MXMAN, "\n");
srvman->error = true;
mutex_lock(&srvman->service_list_mutex);
list_for_each_entry(service, &srvman->service_list, list) {
complete(&service->sm_msg_start_completion);
complete(&service->sm_msg_stop_completion);
}
mutex_unlock(&srvman->service_list_mutex);
}
void srvman_clear_error(struct srvman *srvman)
{
SCSC_TAG_INFO(MXMAN, "\n");
srvman->error = false;
}
#ifndef MAXWELL_SKIP_MANAGER
static int wait_for_sm_msg_start_cfm(struct scsc_service *service)
{
int r;
if (0 == sm_completion_timeout_ms) {
/* Zero implies infinite wait, for development use only.
* r = -ERESTARTSYS if interrupted (e.g. Ctrl-C), 0 if completed
*/
r = wait_for_completion_interruptible(&service->sm_msg_start_completion);
if (r == -ERESTARTSYS) {
/* Paranoid sink of any pending event skipped by the interrupted wait */
r = wait_for_completion_timeout(&service->sm_msg_start_completion, HZ / 2);
if (r == 0) {
SCSC_TAG_ERR(MXMAN, "timed out\n");
return -ETIMEDOUT;
}
}
return r;
}
r = wait_for_completion_timeout(&service->sm_msg_start_completion, msecs_to_jiffies(sm_completion_timeout_ms));
if (r == 0) {
SCSC_TAG_ERR(MXMAN, "timeout\n");
return -ETIMEDOUT;
}
return 0;
}
#endif
static int wait_for_sm_msg_stop_cfm(struct scsc_service *service)
{
int r;
if (0 == sm_completion_timeout_ms) {
/* Zero implies infinite wait, for development use only.
* r = -ERESTARTSYS if interrupted (e.g. Ctrl-C), 0 if completed
*/
r = wait_for_completion_interruptible(&service->sm_msg_stop_completion);
if (r == -ERESTARTSYS) {
/* Paranoid sink of any pending event skipped by the interrupted wait */
r = wait_for_completion_timeout(&service->sm_msg_stop_completion, HZ / 2);
if (r == 0) {
SCSC_TAG_ERR(MXMAN, "timed out\n");
return -ETIMEDOUT;
}
}
return r;
}
r = wait_for_completion_timeout(&service->sm_msg_stop_completion, msecs_to_jiffies(sm_completion_timeout_ms));
if (r == 0) {
SCSC_TAG_ERR(MXMAN, "timeout\n");
return -ETIMEDOUT;
}
return 0;
}
#ifndef MAXWELL_SKIP_MANAGER
static int send_sm_msg_start_blocking(struct scsc_service *service, scsc_mifram_ref ref)
{
struct scsc_mx *mx = service->mx;
struct mxmgmt_transport *mxmgmt_transport = scsc_mx_get_mxmgmt_transport(mx);
int r;
struct sm_msg_packet message = { .service_id = service->id,
.msg = SM_MSG_START_REQ,
.optional_data = ref };
reinit_completion(&service->sm_msg_start_completion);
/* Send to FW in MM stream */
mxmgmt_transport_send(mxmgmt_transport, MMTRANS_CHAN_ID_SERVICE_MANAGEMENT, &message, sizeof(message));
r = wait_for_sm_msg_start_cfm(service);
if (r)
SCSC_TAG_ERR(MXMAN, "wait_for_sm_msg_start_cfm() failed: r=%d\n", r);
return r;
}
#endif
static int send_sm_msg_stop_blocking(struct scsc_service *service)
{
struct scsc_mx *mx = service->mx;
struct mxman *mxman = scsc_mx_get_mxman(mx);
struct mxmgmt_transport *mxmgmt_transport = scsc_mx_get_mxmgmt_transport(mx);
int r;
struct sm_msg_packet message = { .service_id = service->id,
.msg = SM_MSG_STOP_REQ,
.optional_data = 0 };
if (mxman->mxman_state == MXMAN_STATE_FAILED)
return 0;
reinit_completion(&service->sm_msg_stop_completion);
/* Send to FW in MM stream */
mxmgmt_transport_send(mxmgmt_transport, MMTRANS_CHAN_ID_SERVICE_MANAGEMENT, &message, sizeof(message));
r = wait_for_sm_msg_stop_cfm(service);
if (r)
SCSC_TAG_ERR(MXMAN, "wait_for_sm_msg_stop_cfm() for service=%p service->id=%d failed: r=%d\n", service, service->id, r);
return r;
}
#ifndef MAXWELL_SKIP_MANAGER
/*
* Receive handler for messages from the FW along the maxwell management transport
*/
static void srv_message_handler(const void *message, void *data)
{
struct srvman *srvman = (struct srvman *)data;
struct scsc_service *service;
const struct sm_msg_packet *msg = message;
bool found = false;
mutex_lock(&srvman->service_list_mutex);
list_for_each_entry(service, &srvman->service_list, list) {
if (service->id == msg->service_id) {
found = true;
break;
}
}
mutex_unlock(&srvman->service_list_mutex);
if (!found) {
SCSC_TAG_ERR(MXMAN, "No service for msg->service_id=%d", msg->service_id);
return;
}
/* Forward the message to the applicable service to deal with */
switch (msg->msg) {
case SM_MSG_START_CFM:
SCSC_TAG_INFO(MXMAN, "Received SM_MSG_START_CFM message service=%p with service_id=%d from the firmware\n",
service, msg->service_id);
complete(&service->sm_msg_start_completion);
break;
case SM_MSG_STOP_CFM:
SCSC_TAG_INFO(MXMAN, "Received SM_MSG_STOP_CFM message for service=%p with service_id=%d from the firmware\n",
service, msg->service_id);
complete(&service->sm_msg_stop_completion);
break;
default:
/* HERE: Unknown message, raise fault */
SCSC_TAG_WARNING(MXMAN, "Received unknown message for service=%p with service_id=%d from the firmware: msg->msg=%d\n",
service, msg->msg, msg->service_id);
break;
}
}
#endif
int scsc_mx_service_start(struct scsc_service *service, scsc_mifram_ref ref)
{
struct scsc_mx *mx = service->mx;
struct srvman *srvman = scsc_mx_get_srvman(mx);
#ifndef MAXWELL_SKIP_MANAGER
int r;
#endif
SCSC_TAG_INFO(MXMAN, "\n");
#ifdef CONFIG_SCSC_CHV_SUPPORT
if (chv_run)
return 0;
#endif
mutex_lock(&srvman->api_access_mutex);
wake_lock(&srvman->sm_wake_lock);
if (srvman->error) {
SCSC_TAG_ERR(MXMAN, "error: refused due to previous f/w failure\n");
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
return -EILSEQ;
}
#ifndef MAXWELL_SKIP_MANAGER
r = send_sm_msg_start_blocking(service, ref);
if (r) {
SCSC_TAG_ERR(MXMAN, "send_sm_msg_start_blocking() failed: r=%d\n", r);
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
return r;
}
#endif
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
return 0;
}
EXPORT_SYMBOL(scsc_mx_service_start);
int scsc_mx_service_stop(struct scsc_service *service)
{
struct scsc_mx *mx = service->mx;
struct srvman *srvman = scsc_mx_get_srvman(mx);
int r;
SCSC_TAG_INFO(MXMAN, "\n");
#ifndef MAXWELL_SKIP_MANAGER
#ifdef CONFIG_SCSC_CHV_SUPPORT
if (chv_run)
return 0;
#endif
mutex_lock(&srvman->api_access_mutex);
wake_lock(&srvman->sm_wake_lock);
if (srvman->error) {
SCSC_TAG_ERR(MXMAN, "error: refused due to previous f/w failure\n");
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
return -EILSEQ;
}
r = send_sm_msg_stop_blocking(service);
if (r) {
SCSC_TAG_ERR(MXMAN, "send_sm_msg_stop_blocking() failed: r=%d\n", r);
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
return -EIO;
}
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
#endif /* MAXWELL_SKIP_MANAGER */
return 0;
}
EXPORT_SYMBOL(scsc_mx_service_stop);
void srvman_freeze_services(struct srvman *srvman)
{
struct scsc_service *service;
struct mxman *mxman = scsc_mx_get_mxman(srvman->mx);
SCSC_TAG_INFO(MXMAN, "\n");
mutex_lock(&srvman->service_list_mutex);
list_for_each_entry(service, &srvman->service_list, list) {
service->client->stop_on_failure(service->client);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
reinit_completion(&mxman->recovery_completion);
#else
INIT_COMPLETION(mxman->recovery_completion);
#endif
mutex_unlock(&srvman->service_list_mutex);
SCSC_TAG_INFO(MXMAN, "OK\n");
}
void srvman_unfreeze_services(struct srvman *srvman, u16 scsc_panic_code)
{
struct scsc_service *service;
SCSC_TAG_INFO(MXMAN, "\n");
mutex_lock(&srvman->service_list_mutex);
list_for_each_entry(service, &srvman->service_list, list) {
service->client->failure_reset(service->client, scsc_panic_code);
}
mutex_unlock(&srvman->service_list_mutex);
SCSC_TAG_INFO(MXMAN, "OK\n");
}
/** Signal a failure detected by the Client. This will trigger the systemwide
* failure handling procedure: _All_ Clients will be called back via
* their stop_on_failure() handler as a side-effect.
*/
void scsc_mx_service_service_failed(struct scsc_service *service)
{
struct scsc_mx *mx = service->mx;
struct srvman *srvman = scsc_mx_get_srvman(mx);
SCSC_TAG_INFO(MXMAN, "\n");
srvman_set_error(srvman);
mxman_fail(scsc_mx_get_mxman(mx), SCSC_PANIC_CODE_HOST << 15);
}
EXPORT_SYMBOL(scsc_mx_service_service_failed);
void scsc_mx_service_close(struct scsc_service *service)
{
struct mxman *mxman = scsc_mx_get_mxman(service->mx);
struct scsc_mx *mx = service->mx;
struct srvman *srvman = scsc_mx_get_srvman(mx);
#ifndef MAXWELL_SKIP_MANAGER
bool empty;
#endif
SCSC_TAG_INFO(MXMAN, "\n");
mutex_lock(&srvman->api_access_mutex);
wake_lock(&srvman->sm_wake_lock);
if (srvman->error) {
SCSC_TAG_ERR(MXMAN, "error: refused due to previous f/w failure\n");
mutex_unlock(&srvman->api_access_mutex);
wake_unlock(&srvman->sm_wake_lock);
return;
}
/* remove the service from the list and deallocate the service memory */
mutex_lock(&srvman->service_list_mutex);
list_del(&service->list);
#ifndef MAXWELL_SKIP_MANAGER
empty = list_empty(&srvman->service_list);
#endif
mutex_unlock(&srvman->service_list_mutex);
#ifndef MAXWELL_SKIP_MANAGER
if (empty)
/* unregister channel handler */
mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mx), MMTRANS_CHAN_ID_SERVICE_MANAGEMENT,
NULL, NULL);
#endif
kfree(service);
mxman_close(mxman);
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
}
EXPORT_SYMBOL(scsc_mx_service_close);
/* Consider move to a public scsc_mx interface */
struct scsc_service *scsc_mx_service_open(struct scsc_mx *mx, enum scsc_service_id id, struct scsc_service_client *client, int *status)
{
int ret;
struct scsc_service *service;
struct srvman *srvman = scsc_mx_get_srvman(mx);
struct mxman *mxman = scsc_mx_get_mxman(mx);
#ifndef MAXWELL_SKIP_MANAGER
bool empty;
#endif
SCSC_TAG_INFO(MXMAN, "\n");
mutex_lock(&srvman->api_access_mutex);
wake_lock(&srvman->sm_wake_lock);
if (srvman->error) {
SCSC_TAG_ERR(MXMAN, "error: refused due to previous f/w failure\n");
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
*status = -EILSEQ;
return NULL;
}
if (mxman->mxman_state == MXMAN_STATE_FAILED) {
int r;
mutex_unlock(&srvman->api_access_mutex);
r = wait_for_completion_timeout(&mxman->recovery_completion,
msecs_to_jiffies(SCSC_MX_SERVICE_RECOVERY_TIMEOUT));
if (r == 0) {
SCSC_TAG_ERR(MXMAN, "Recovery timeout\n");
wake_unlock(&srvman->sm_wake_lock);
*status = -EIO;
return NULL;
}
mutex_lock(&srvman->api_access_mutex);
}
service = kmalloc(sizeof(struct scsc_service), GFP_KERNEL);
if (service) {
/* MaxwellManager Should allocate Mem and download FW */
ret = mxman_open(mxman);
if (ret) {
kfree(service);
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
*status = ret;
return NULL;
}
/* Initialise service struct here */
service->mx = mx;
service->id = id;
service->client = client;
init_completion(&service->sm_msg_start_completion);
init_completion(&service->sm_msg_stop_completion);
mutex_lock(&srvman->service_list_mutex);
#ifndef MAXWELL_SKIP_MANAGER
empty = list_empty(&srvman->service_list);
#endif
mutex_unlock(&srvman->service_list_mutex);
#ifndef MAXWELL_SKIP_MANAGER
if (empty)
mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mx), MMTRANS_CHAN_ID_SERVICE_MANAGEMENT,
&srv_message_handler, srvman);
#endif
mutex_lock(&srvman->service_list_mutex);
list_add_tail(&service->list, &srvman->service_list);
mutex_unlock(&srvman->service_list_mutex);
} else
*status = -ENOMEM;
wake_unlock(&srvman->sm_wake_lock);
mutex_unlock(&srvman->api_access_mutex);
return service;
}
EXPORT_SYMBOL(scsc_mx_service_open);
/** Allocate a contiguous block of SDRAM accessible to Client Driver */
int scsc_mx_service_mifram_alloc(struct scsc_service *service, size_t nbytes, scsc_mifram_ref *ref, u32 align)
{
struct scsc_mx *mx = service->mx;
void *mem;
int ret;
mem = miframman_alloc(scsc_mx_get_ramman(mx), nbytes, align);
if (!mem) {
SCSC_TAG_ERR(MXMAN, "miframman_alloc() failed\n");
return -ENOMEM;
}
SCSC_TAG_DEBUG(MXMAN, "Allocated mem %p\n", mem);
/* Transform native pointer and get mifram_ref type */
ret = scsc_mx_service_mif_ptr_to_addr(service, mem, ref);
if (ret) {
SCSC_TAG_ERR(MXMAN, "scsc_mx_service_mif_ptr_to_addr() failed: ret=%d", ret);
miframman_free(scsc_mx_get_ramman(mx), mem);
} else
SCSC_TAG_DEBUG(MXMAN, "mem %p ref %d\n", mem, *ref);
return ret;
}
EXPORT_SYMBOL(scsc_mx_service_mifram_alloc);
/** Free a contiguous block of SDRAM */
void scsc_mx_service_mifram_free(struct scsc_service *service, scsc_mifram_ref ref)
{
struct scsc_mx *mx = service->mx;
void *mem;
mem = scsc_mx_service_mif_addr_to_ptr(service, ref);
SCSC_TAG_DEBUG(MXMAN, "**** Freeing %p\n", mem);
miframman_free(scsc_mx_get_ramman(mx), mem);
}
EXPORT_SYMBOL(scsc_mx_service_mifram_free);
/* MIF ALLOCATIONS */
bool scsc_mx_service_alloc_mboxes(struct scsc_service *service, int n, int *first_mbox_index)
{
struct scsc_mx *mx = service->mx;
return mifmboxman_alloc_mboxes(scsc_mx_get_mboxman(mx), n, first_mbox_index);
}
EXPORT_SYMBOL(scsc_mx_service_alloc_mboxes);
void scsc_service_free_mboxes(struct scsc_service *service, int n, int first_mbox_index)
{
struct scsc_mx *mx = service->mx;
return mifmboxman_free_mboxes(scsc_mx_get_mboxman(mx), first_mbox_index, n);
}
EXPORT_SYMBOL(scsc_service_free_mboxes);
u32 *scsc_mx_service_get_mbox_ptr(struct scsc_service *service, int mbox_index)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
return mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif_abs, mbox_index);
}
EXPORT_SYMBOL(scsc_mx_service_get_mbox_ptr);
int scsc_service_mifintrbit_bit_mask_status_get(struct scsc_service *service)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
return mif_abs->irq_bit_mask_status_get(mif_abs);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_bit_mask_status_get);
int scsc_service_mifintrbit_get(struct scsc_service *service)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
return mif_abs->irq_get(mif_abs);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_get);
void scsc_service_mifintrbit_bit_set(struct scsc_service *service, int which_bit, enum scsc_mifintr_target dir)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
return mif_abs->irq_bit_set(mif_abs, which_bit, dir);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_bit_set);
void scsc_service_mifintrbit_bit_clear(struct scsc_service *service, int which_bit)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
return mif_abs->irq_bit_clear(mif_abs, which_bit);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_bit_clear);
void scsc_service_mifintrbit_bit_mask(struct scsc_service *service, int which_bit)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
return mif_abs->irq_bit_mask(mif_abs, which_bit);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_bit_mask);
void scsc_service_mifintrbit_bit_unmask(struct scsc_service *service, int which_bit)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
return mif_abs->irq_bit_unmask(mif_abs, which_bit);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_bit_unmask);
int scsc_service_mifintrbit_alloc_fromhost(struct scsc_service *service, enum scsc_mifintr_target dir)
{
struct scsc_mx *mx = service->mx;
return mifintrbit_alloc_fromhost(scsc_mx_get_intrbit(mx), dir);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_alloc_fromhost);
int scsc_service_mifintrbit_free_fromhost(struct scsc_service *service, int which_bit, enum scsc_mifintr_target dir)
{
struct scsc_mx *mx = service->mx;
return mifintrbit_free_fromhost(scsc_mx_get_intrbit(mx), which_bit, dir);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_free_fromhost);
int scsc_service_mifintrbit_register_tohost(struct scsc_service *service, void (*handler)(int irq, void *data), void *data)
{
struct scsc_mx *mx = service->mx;
SCSC_TAG_DEBUG(MXMAN, "Registering %pS\n", handler);
return mifintrbit_alloc_tohost(scsc_mx_get_intrbit(mx), handler, data);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_register_tohost);
int scsc_service_mifintrbit_unregister_tohost(struct scsc_service *service, int which_bit)
{
struct scsc_mx *mx = service->mx;
SCSC_TAG_DEBUG(MXMAN, "Deregistering int for bit %d\n", which_bit);
return mifintrbit_free_tohost(scsc_mx_get_intrbit(mx), which_bit);
}
EXPORT_SYMBOL(scsc_service_mifintrbit_unregister_tohost);
void *scsc_mx_service_mif_addr_to_ptr(struct scsc_service *service, scsc_mifram_ref ref)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
return mif_abs->get_mifram_ptr(mif_abs, ref);
}
EXPORT_SYMBOL(scsc_mx_service_mif_addr_to_ptr);
int scsc_mx_service_mif_ptr_to_addr(struct scsc_service *service, void *mem_ptr, scsc_mifram_ref *ref)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
/* Transform native pointer and get mifram_ref type */
if (mif_abs->get_mifram_ref(mif_abs, mem_ptr, ref)) {
SCSC_TAG_ERR(MXMAN, "ooops somethig went wrong");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(scsc_mx_service_mif_ptr_to_addr);
int scsc_mx_service_mif_dump_registers(struct scsc_service *service)
{
struct scsc_mx *mx = service->mx;
struct scsc_mif_abs *mif_abs;
mif_abs = scsc_mx_get_mif_abs(mx);
/* Dump registers */
mif_abs->mif_dump_registers(mif_abs);
return 0;
}
EXPORT_SYMBOL(scsc_mx_service_mif_dump_registers);
struct device *scsc_service_get_device(struct scsc_service *service)
{
return scsc_mx_get_device(service->mx);
}
EXPORT_SYMBOL(scsc_service_get_device);
int scsc_service_force_panic(struct scsc_service *service)
{
struct mxman *mxman = scsc_mx_get_mxman(service->mx);
return mxman_force_panic(mxman);
}
EXPORT_SYMBOL(scsc_service_force_panic);

View file

@ -0,0 +1,35 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef SERVMAN_MESSAGES_H__
#define SERVMAN_MESSAGES_H__
#include <scsc/scsc_mifram.h>
/**
* Maxwell Service Management Messages.
*
* TODO: common defn with host, generated.
*/
enum {
SM_MSG_START_REQ,
SM_MSG_START_CFM,
SM_MSG_STOP_REQ,
SM_MSG_STOP_CFM,
} sm_msg;
/* Transport format for service management messages across the
* Maxwell management transport.
*
* TODO: common defn with host, generated.
*/
struct sm_msg_packet {
uint8_t service_id;
uint8_t msg;
scsc_mifram_ref optional_data;
} __packed;
#endif /* SERVMAN_MESSAGES_H__ */

View file

@ -0,0 +1,31 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#ifndef _SRVMAN_H
#define _SRVMAN_H
#include <linux/wakelock.h>
struct srvman;
void srvman_init(struct srvman *srvman, struct scsc_mx *mx);
void srvman_freeze_services(struct srvman *srvman);
void srvman_unfreeze_services(struct srvman *srvman, u16 scsc_panic_code);
void srvman_set_error(struct srvman *srvman);
void srvman_clear_error(struct srvman *srvman);
void srvman_deinit(struct srvman *srvman);
struct srvman {
struct scsc_mx *mx;
struct list_head service_list;
struct mutex service_list_mutex;
struct mutex api_access_mutex;
bool error;
struct wake_lock sm_wake_lock;
};
#endif

View file

@ -0,0 +1,53 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
#include <scsc/scsc_logring.h>
#include "suspendmon.h"
#include "scsc_mif_abs.h"
#include "mxman.h"
static int suspendmon_suspend(struct scsc_mif_abs *mif, void *data)
{
struct suspendmon *suspendmon = (struct suspendmon *)data;
SCSC_TAG_DEBUG(MXMAN, "suspendmon=%p suspendmon->mx=%p mxman=%p\n",
suspendmon, suspendmon->mx, scsc_mx_get_mxman(suspendmon->mx));
return mxman_suspend(scsc_mx_get_mxman(suspendmon->mx));
}
static void suspendmon_resume(struct scsc_mif_abs *mif, void *data)
{
struct suspendmon *suspendmon = (struct suspendmon *)data;
SCSC_TAG_DEBUG(MXMAN, "suspendmon=%p suspendmon->mx=%p mxman=%p\n",
suspendmon, suspendmon->mx, scsc_mx_get_mxman(suspendmon->mx));
mxman_resume(scsc_mx_get_mxman(suspendmon->mx));
}
void suspendmon_init(struct suspendmon *suspendmon, struct scsc_mx *mx)
{
struct scsc_mif_abs *mif;
suspendmon->mx = mx;
mif = scsc_mx_get_mif_abs(mx);
/* register callbacks with mif abstraction */
if (mif->suspend_reg_handler)
mif->suspend_reg_handler(mif, suspendmon_suspend, suspendmon_resume, (void *)suspendmon);
}
void suspendmon_deinit(struct suspendmon *suspendmon)
{
struct scsc_mif_abs *mif;
mif = scsc_mx_get_mif_abs(suspendmon->mx);
if (mif->suspend_unreg_handler)
mif->suspend_unreg_handler(mif);
}

View file

@ -0,0 +1,23 @@
/****************************************************************************
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
*
****************************************************************************/
/* Baroque suspend/resume handler registration interface */
#ifndef _SUSPENDMON_H
#define _SUSPENDMON_H
#include "mxman.h"
struct suspendmon;
void suspendmon_init(struct suspendmon *suspendmon, struct scsc_mx *mx);
void suspendmon_deinit(struct suspendmon *suspendmon);
struct suspendmon {
struct scsc_mx *mx;
};
#endif /* _SUSPENDMON_H */

View file

@ -0,0 +1,3 @@
config SCSC_BT
tristate "SCSC MX BT support"
depends on SCSC_CORE

View file

@ -0,0 +1,18 @@
# Needed since this subdir is symlinked in the main Kernel tree
# without this our samsung subdir is NOT cleaned.
clean-files := *.o *.ko
#
# Maxwell BT
obj-$(CONFIG_SCSC_BT) += scsc_bt.o
scsc_bt-y += scsc_bt_module.o scsc_shm.o scsc_avdtp_detect.o scsc_bt_dump.o
ccflags-y += $(CONFIG_SAMSUNG_MAXWELL_EXTRA)
## See sibling scsc/ Makefile for an explanation of the reasons of
## the following ifeq/else
ifeq ($(CONFIG_SCSC_DEBUG), m)
ccflags-y += -DCONFIG_SCSC_PRINTK
else
ccflags-$(CONFIG_SCSC_DEBUG) += -DCONFIG_SCSC_PRINTK
endif

View file

@ -0,0 +1,344 @@
/****************************************************************************
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd
*
****************************************************************************/
/* MX BT shared memory interface */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/kthread.h>
#include <asm/io.h>
#include <linux/wakelock.h>
#include <scsc/scsc_mx.h>
#include <scsc/scsc_mifram.h>
#include <scsc/api/bsmhcp.h>
#include <scsc/scsc_logring.h>
#include "scsc_bt_priv.h"
#include "scsc_shm.h"
/**
* Coex AVDTP detection.
*
* Strategy:
*
* - On the L2CAP signaling CID, look for connect requests with the AVDTP PSM
*
* - Assume the first AVDTP connection is the signaling channel.
* (AVDTP 1.3, section 5.4.6 "Transport and Signaling Channel Establishment")
*
* - If a signaling channel exists, assume the next connection is the streaming channel
*
* - If a streaming channel exists, look for AVDTP start, suspend, abort and close signals
* -- When one of these is found, signal the FW with updated acl_id and cid
*
* - If the ACL is torn down, make sure to clean up.
*
* */
/* All of the functions are duplicated for the Rx and Tx directions, since these run in separate
* threads, and might otherwise step on each other's toes by changing the "data" pointer in
* mid-inspection */
/* All four return true if the bsmhcp header should be updated */
bool scsc_avdtp_detect_connection_tx(uint16_t hci_connection_handle, const unsigned char *data, uint16_t length)
{
uint8_t code = 0;
if (length < AVDTP_DETECT_MIN_DATA_LENGTH) {
SCSC_TAG_DEBUG(BT_H4, "Ignoring L2CAP signal, length %u)\n", length);
return false;
}
code = HCI_L2CAP_CODE(data);
switch (code) {
case L2CAP_CODE_CONNECT_REQ:
{
if (HCI_L2CAP_CON_REQ_PSM(data) == L2CAP_AVDTP_PSM) {
if (bt_service.avdtp_signaling_src_cid == 0) {
bt_service.avdtp_signaling_src_cid = HCI_L2CAP_SOURCE_CID(data);
SCSC_TAG_DEBUG(BT_H4, "Signaling dst CID: 0x%04X, src CID: 0x%04X)\n",
bt_service.avdtp_signaling_dst_cid,
bt_service.avdtp_signaling_src_cid);
} else {
bt_service.avdtp_streaming_src_cid = HCI_L2CAP_SOURCE_CID(data);
SCSC_TAG_DEBUG(BT_H4, "Streaming dst CID: 0x%04X, src CID: 0x%04X)\n",
bt_service.avdtp_streaming_dst_cid,
bt_service.avdtp_streaming_src_cid);
}
}
break;
}
case L2CAP_CODE_CONNECT_RSP:
{
if (length < AVDTP_DETECT_MIN_DATA_LENGTH_CON_RSP) {
SCSC_TAG_WARNING(BT_H4, "Ignoring L2CAP CON RSP in short packet, length %u)\n",
length);
return false;
}
if (bt_service.avdtp_signaling_src_cid == HCI_L2CAP_SOURCE_CID(data) &&
HCI_L2CAP_CON_RSP_RESULT(data) == HCI_L2CAP_CON_RSP_RESULT_SUCCESS &&
bt_service.avdtp_signaling_dst_cid == 0) {
/* We're responding, so the src cid is actually the remote cid. flip them */
bt_service.avdtp_signaling_dst_cid = bt_service.avdtp_signaling_src_cid;
bt_service.avdtp_signaling_src_cid = HCI_L2CAP_RSP_DEST_CID(data);
bt_service.avdtp_hci_connection_handle = hci_connection_handle;
SCSC_TAG_DEBUG(BT_H4, "Signaling dst CID: 0x%04X, src CID: 0x%04X)\n",
bt_service.avdtp_signaling_dst_cid, bt_service.avdtp_signaling_src_cid);
} else if (bt_service.avdtp_streaming_src_cid == HCI_L2CAP_SOURCE_CID(data) &&
HCI_L2CAP_CON_RSP_RESULT(data) == HCI_L2CAP_CON_RSP_RESULT_SUCCESS &&
bt_service.avdtp_streaming_dst_cid == 0) {
/* We're responding, so the src cid is actually the remote cid. flip them */
bt_service.avdtp_streaming_dst_cid = bt_service.avdtp_streaming_src_cid;
bt_service.avdtp_streaming_src_cid = HCI_L2CAP_RSP_DEST_CID(data);
SCSC_TAG_DEBUG(BT_H4, "Streaming dst CID: 0x%04X, src CID: 0x%04X)\n",
bt_service.avdtp_streaming_dst_cid, bt_service.avdtp_streaming_src_cid);
}
break;
}
case L2CAP_CODE_DISCONNECT_REQ:
{
/* Our src cid always holds the local CID. When looking at the disconnect req in the Tx
* direction, that's also termed the src cid in the AVDTP signal */
if (bt_service.avdtp_signaling_src_cid == HCI_L2CAP_SOURCE_CID(data)) {
SCSC_TAG_DEBUG(BT_H4, "Signaling src CID disconnected: 0x%04X (dst CID: 0x%04X)\n",
bt_service.avdtp_signaling_src_cid,
bt_service.avdtp_signaling_dst_cid);
bt_service.avdtp_signaling_src_cid = bt_service.avdtp_signaling_dst_cid = 0;
bt_service.avdtp_streaming_src_cid = bt_service.avdtp_streaming_dst_cid = 0;
bt_service.avdtp_hci_connection_handle = 0;
} else if (bt_service.avdtp_streaming_src_cid == HCI_L2CAP_SOURCE_CID(data)) {
SCSC_TAG_DEBUG(BT_H4, "Streaming src CID disconnected: 0x%04X (dst CID: 0x%04X)\n",
bt_service.avdtp_streaming_src_cid,
bt_service.avdtp_streaming_dst_cid);
bt_service.avdtp_streaming_src_cid = bt_service.avdtp_streaming_dst_cid = 0;
return true;
}
break;
}
default:
break;
}
return false;
}
bool scsc_avdtp_detect_connection_rx(uint16_t hci_connection_handle, const unsigned char *data, uint16_t length)
{
uint8_t code = 0;
if (length < AVDTP_DETECT_MIN_DATA_LENGTH) {
SCSC_TAG_DEBUG(BT_H4, "Ignoring L2CAP signal, length %u)\n",
length);
return false;
}
code = HCI_L2CAP_CODE(data);
switch (code) {
case L2CAP_CODE_CONNECT_REQ:
{
if (HCI_L2CAP_CON_REQ_PSM(data) == L2CAP_AVDTP_PSM) {
if (bt_service.avdtp_signaling_src_cid == 0) {
/* In the Rx direction, the AVDTP source cid is the remote cid, but we'll save it
* as the source cid for now, and flip them when we see a response. */
bt_service.avdtp_signaling_src_cid = HCI_L2CAP_SOURCE_CID(data);
SCSC_TAG_DEBUG(BT_H4, "Signaling dst CID: 0x%04X, src CID: 0x%04X\n",
bt_service.avdtp_signaling_dst_cid,
bt_service.avdtp_signaling_src_cid);
} else {
/* In the Rx direction, the AVDTP source cid is the remote cid, but we'll save it
* as the source cid for now, and flip them when we see a response. */
bt_service.avdtp_streaming_src_cid = HCI_L2CAP_SOURCE_CID(data);
SCSC_TAG_DEBUG(BT_H4, "Streaming dst CID: 0x%04X, src CID: 0x%04X\n",
bt_service.avdtp_streaming_dst_cid,
bt_service.avdtp_streaming_src_cid);
}
}
break;
}
case L2CAP_CODE_CONNECT_RSP:
{
if (length < AVDTP_DETECT_MIN_DATA_LENGTH_CON_RSP) {
SCSC_TAG_WARNING(BT_H4, "Ignoring L2CAP CON RSP in short packet, length %u)\n",
length);
return false;
}
if (bt_service.avdtp_signaling_src_cid == HCI_L2CAP_SOURCE_CID(data) &&
HCI_L2CAP_CON_RSP_RESULT(data) == HCI_L2CAP_CON_RSP_RESULT_SUCCESS &&
bt_service.avdtp_signaling_dst_cid == 0) {
bt_service.avdtp_signaling_dst_cid = HCI_L2CAP_RSP_DEST_CID(data);
bt_service.avdtp_hci_connection_handle = hci_connection_handle;
SCSC_TAG_DEBUG(BT_H4, "Signaling dst CID: 0x%04X, src CID: 0x%04X)\n",
bt_service.avdtp_signaling_dst_cid, bt_service.avdtp_signaling_src_cid);
} else if (bt_service.avdtp_streaming_src_cid == HCI_L2CAP_SOURCE_CID(data) &&
HCI_L2CAP_CON_RSP_RESULT(data) == HCI_L2CAP_CON_RSP_RESULT_SUCCESS &&
bt_service.avdtp_streaming_dst_cid == 0){
bt_service.avdtp_streaming_dst_cid = HCI_L2CAP_RSP_DEST_CID(data);
SCSC_TAG_DEBUG(BT_H4, "Streaming dst CID: 0x%04X, src CID: 0x%04X)\n",
bt_service.avdtp_streaming_dst_cid, bt_service.avdtp_streaming_src_cid);
}
break;
}
case L2CAP_CODE_DISCONNECT_REQ:
{
/* Our "dst" variable always holds the remote CID. This may be the source or destination CID
* in the signal, depending on the direction of traffic we're snooping... */
if (bt_service.avdtp_signaling_src_cid == HCI_L2CAP_RSP_DEST_CID(data)) {
SCSC_TAG_DEBUG(BT_H4, "Signaling src CID disconnected: 0x%04X (dst CID: 0x%04X)\n",
bt_service.avdtp_signaling_src_cid,
bt_service.avdtp_signaling_dst_cid);
bt_service.avdtp_signaling_src_cid = bt_service.avdtp_signaling_dst_cid = 0;
bt_service.avdtp_streaming_src_cid = bt_service.avdtp_streaming_dst_cid = 0;
bt_service.avdtp_hci_connection_handle = 0;
} else if (bt_service.avdtp_streaming_src_cid == HCI_L2CAP_RSP_DEST_CID(data)) {
SCSC_TAG_DEBUG(BT_H4, "Streaming CID disconnected: 0x%04X (dst CID: 0x%04X)\n",
bt_service.avdtp_streaming_src_cid,
bt_service.avdtp_streaming_dst_cid);
bt_service.avdtp_streaming_src_cid = bt_service.avdtp_streaming_dst_cid = 0;
return true;
}
break;
}
default:
break;
}
return false;
}
uint8_t scsc_avdtp_detect_signaling_tx(const unsigned char *data)
{
u8 signal_id = AVDTP_SIGNAL_ID(data);
u8 message_type = AVDTP_MESSAGE_TYPE(data);
SCSC_TAG_DEBUG(BT_H4, "id: 0x%02X, type: 0x%02X)\n", signal_id, message_type);
if (message_type == AVDTP_MESSAGE_TYPE_RSP_ACCEPT) {
if (signal_id == AVDTP_SIGNAL_ID_START)
return AVDTP_DETECT_SIGNALING_ACTIVE;
else if (signal_id == AVDTP_SIGNAL_ID_CLOSE || signal_id == AVDTP_SIGNAL_ID_SUSPEND ||
signal_id == AVDTP_SIGNAL_ID_ABORT)
return AVDTP_DETECT_SIGNALING_INACTIVE;
}
return AVDTP_DETECT_SIGNALING_IGNORE;
}
uint8_t scsc_avdtp_detect_signaling_rx(const unsigned char *data)
{
u8 signal_id = AVDTP_SIGNAL_ID(data);
#ifdef CONFIG_SCSC_PRINTK
u8 message_type = AVDTP_MESSAGE_TYPE(data);
#endif
SCSC_TAG_DEBUG(BT_H4, "id: 0x%02X, type: 0x%02X)\n", signal_id, message_type);
if (AVDTP_MESSAGE_TYPE(data) == AVDTP_MESSAGE_TYPE_RSP_ACCEPT) {
if (signal_id == AVDTP_SIGNAL_ID_START)
return AVDTP_DETECT_SIGNALING_ACTIVE;
else if (signal_id == AVDTP_SIGNAL_ID_CLOSE || signal_id == AVDTP_SIGNAL_ID_SUSPEND ||
signal_id == AVDTP_SIGNAL_ID_ABORT)
return AVDTP_DETECT_SIGNALING_INACTIVE;
}
return AVDTP_DETECT_SIGNALING_IGNORE;
}
void scsc_avdtp_detect_tx(u16 hci_connection_handle, const unsigned char *data, uint16_t length)
{
/* Look for AVDTP connections */
bool avdtp_gen_bg_int = false;
uint16_t cid_to_fw = 0;
if (HCI_L2CAP_RX_CID((const unsigned char *)(data)) == L2CAP_SIGNALING_CID) {
if (scsc_avdtp_detect_connection_tx(hci_connection_handle, data, length)) {
avdtp_gen_bg_int = true;
cid_to_fw = bt_service.avdtp_streaming_dst_cid;
}
} else if (bt_service.avdtp_signaling_dst_cid != 0 &&
bt_service.avdtp_signaling_dst_cid == HCI_L2CAP_RX_CID((const unsigned char *)(data)) &&
length >= AVDTP_DETECT_MIN_AVDTP_LENGTH) {
uint8_t result = scsc_avdtp_detect_signaling_tx(data);
if (result != AVDTP_DETECT_SIGNALING_IGNORE) {
avdtp_gen_bg_int = true;
if (result != AVDTP_DETECT_SIGNALING_INACTIVE)
cid_to_fw = bt_service.avdtp_streaming_dst_cid;
}
}
if (avdtp_gen_bg_int) {
bt_service.bsmhcp_protocol->header.avdtp_detect_stream_id = cid_to_fw |
(bt_service.avdtp_hci_connection_handle << 16);
SCSC_TAG_DEBUG(BT_H4, "Found AVDTP signal. aclid: 0x%04X, cid: 0x%04X, streamid: 0x%08X\n",
bt_service.avdtp_hci_connection_handle,
cid_to_fw,
bt_service.bsmhcp_protocol->header.avdtp_detect_stream_id);
mmiowb();
scsc_service_mifintrbit_bit_set(bt_service.service,
bt_service.bsmhcp_protocol->header.ap_to_bg_int_src, SCSC_MIFINTR_TARGET_R4);
}
}
void scsc_avdtp_detect_rx(u16 hci_connection_handle, const unsigned char *data, uint16_t length)
{
bool avdtp_gen_bg_int = false;
uint16_t cid_to_fw = 0;
/* Look for AVDTP connections */
if (HCI_L2CAP_RX_CID((const unsigned char *)(data)) == L2CAP_SIGNALING_CID) {
if (scsc_avdtp_detect_connection_rx(hci_connection_handle,
(const unsigned char *)(data), length)) {
avdtp_gen_bg_int = true;
cid_to_fw = bt_service.avdtp_streaming_dst_cid;
}
} else if (bt_service.avdtp_signaling_src_cid != 0 &&
HCI_L2CAP_RX_CID((const unsigned char *)(data)) == bt_service.avdtp_signaling_src_cid &&
length >= AVDTP_DETECT_MIN_AVDTP_LENGTH) {
uint8_t result = scsc_avdtp_detect_signaling_rx((const unsigned char *)(data));
if (result != AVDTP_DETECT_SIGNALING_IGNORE) {
avdtp_gen_bg_int = true;
if (result != AVDTP_DETECT_SIGNALING_INACTIVE)
cid_to_fw = bt_service.avdtp_streaming_dst_cid;
}
}
if (avdtp_gen_bg_int) {
bt_service.bsmhcp_protocol->header.avdtp_detect_stream_id =
cid_to_fw | (bt_service.avdtp_hci_connection_handle << 16);
SCSC_TAG_DEBUG(BT_H4, "Found AVDTP signal. aclid: 0x%04X, cid: 0x%04X, streamid: 0x%08X\n",
bt_service.avdtp_hci_connection_handle,
cid_to_fw,
bt_service.bsmhcp_protocol->header.avdtp_detect_stream_id);
mmiowb();
scsc_service_mifintrbit_bit_set(bt_service.service,
bt_service.bsmhcp_protocol->header.ap_to_bg_int_src, SCSC_MIFINTR_TARGET_R4);
}
}

View file

@ -0,0 +1,529 @@
/****************************************************************************
*
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
* BT information dumper
*
****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/poll.h>
#include <linux/wakelock.h>
#include "scsc_bt_priv.h"
#include "scsc_bt_hci.h"
static char scsc_hci_evt_decode_buffer[512];
const char *scsc_hci_evt_decode_command_code(u16 hci_command_code)
{
const char *ret = "NA";
switch (hci_command_code) {
HCI_CMD_DECODE(HCI_INQUIRY);
HCI_CMD_DECODE(HCI_INQUIRY_CANCEL);
HCI_CMD_DECODE(HCI_PERIODIC_INQUIRY_MODE);
HCI_CMD_DECODE(HCI_EXIT_PERIODIC_INQUIRY_MODE);
HCI_CMD_DECODE(HCI_CREATE_CONNECTION);
HCI_CMD_DECODE(HCI_DISCONNECT);
HCI_CMD_DECODE(HCI_ADD_SCO_CONNECTION);
HCI_CMD_DECODE(HCI_CREATE_CONNECTION_CANCEL);
HCI_CMD_DECODE(HCI_ACCEPT_CONNECTION_REQ);
HCI_CMD_DECODE(HCI_REJECT_CONNECTION_REQ);
HCI_CMD_DECODE(HCI_LINK_KEY_REQ_REPLY);
HCI_CMD_DECODE(HCI_LINK_KEY_REQ_NEG_REPLY);
HCI_CMD_DECODE(HCI_PIN_CODE_REQ_REPLY);
HCI_CMD_DECODE(HCI_PIN_CODE_REQ_NEG_REPLY);
HCI_CMD_DECODE(HCI_CHANGE_CONN_PKT_TYPE);
HCI_CMD_DECODE(HCI_AUTH_REQ);
HCI_CMD_DECODE(HCI_SET_CONN_ENCRYPTION);
HCI_CMD_DECODE(HCI_CHANGE_CONN_LINK_KEY);
HCI_CMD_DECODE(HCI_MASTER_LINK_KEY);
HCI_CMD_DECODE(HCI_REMOTE_NAME_REQ);
HCI_CMD_DECODE(HCI_REMOTE_NAME_REQ_CANCEL);
HCI_CMD_DECODE(HCI_READ_REMOTE_SUPP_FEATURES);
HCI_CMD_DECODE(HCI_READ_REMOTE_EXT_FEATURES);
HCI_CMD_DECODE(HCI_READ_REMOTE_VER_INFO);
HCI_CMD_DECODE(HCI_READ_CLOCK_OFFSET);
HCI_CMD_DECODE(HCI_READ_LMP_HANDLE);
HCI_CMD_DECODE(HCI_EXCHANGE_FIXED_INFO);
HCI_CMD_DECODE(HCI_EXCHANGE_ALIAS_INFO);
HCI_CMD_DECODE(HCI_PRIVATE_PAIRING_REQ_REPLY);
HCI_CMD_DECODE(HCI_PRIVATE_PAIRING_REQ_NEG_REPLY);
HCI_CMD_DECODE(HCI_GENERATED_ALIAS);
HCI_CMD_DECODE(HCI_ALIAS_ADDRESS_REQ_REPLY);
HCI_CMD_DECODE(HCI_ALIAS_ADDRESS_REQ_NEG_REPLY);
HCI_CMD_DECODE(HCI_SETUP_SYNCHRONOUS_CONN);
HCI_CMD_DECODE(HCI_ACCEPT_SYNCHRONOUS_CONN_REQ);
HCI_CMD_DECODE(HCI_REJECT_SYNCHRONOUS_CONN_REQ);
HCI_CMD_DECODE(HCI_IO_CAPABILITY_REQUEST_REPLY);
HCI_CMD_DECODE(HCI_USER_CONFIRMATION_REQUEST_REPLY);
HCI_CMD_DECODE(HCI_USER_CONFIRMATION_REQUEST_NEG_REPLY);
HCI_CMD_DECODE(HCI_USER_PASSKEY_REQUEST_REPLY);
HCI_CMD_DECODE(HCI_USER_PASSKEY_REQUEST_NEG_REPLY);
HCI_CMD_DECODE(HCI_REMOTE_OOB_DATA_REQUEST_REPLY);
HCI_CMD_DECODE(HCI_REMOTE_OOB_DATA_REQUEST_NEG_REPLY);
HCI_CMD_DECODE(HCI_IO_CAPABILITY_REQUEST_NEG_REPLY);
HCI_CMD_DECODE(HCI_ENHANCED_SETUP_SYNC_CONN);
HCI_CMD_DECODE(HCI_ENHANCED_ACCEPT_SYNC_CONN_REQ);
HCI_CMD_DECODE(HCI_TRUNCATED_PAGE);
HCI_CMD_DECODE(HCI_TRUNCATED_PAGE_CANCEL);
HCI_CMD_DECODE(HCI_SET_CSB);
HCI_CMD_DECODE(HCI_SET_CSB_RECEIVE);
HCI_CMD_DECODE(HCI_START_SYNCHRONIZATION_TRAIN);
HCI_CMD_DECODE(HCI_RECEIVE_SYNCHRONIZATION_TRAIN);
HCI_CMD_DECODE(HCI_REMOTE_OOB_EXTENDED_DATA_REQUEST_REPLY);
HCI_CMD_DECODE(HCI_HOLD_MODE);
HCI_CMD_DECODE(HCI_SNIFF_MODE);
HCI_CMD_DECODE(HCI_EXIT_SNIFF_MODE);
HCI_CMD_DECODE(HCI_PARK_MODE);
HCI_CMD_DECODE(HCI_EXIT_PARK_MODE);
HCI_CMD_DECODE(HCI_QOS_SETUP);
HCI_CMD_DECODE(HCI_ROLE_DISCOVERY);
HCI_CMD_DECODE(HCI_SWITCH_ROLE);
HCI_CMD_DECODE(HCI_READ_LINK_POLICY_SETTINGS);
HCI_CMD_DECODE(HCI_WRITE_LINK_POLICY_SETTINGS);
HCI_CMD_DECODE(HCI_READ_DEFAULT_LINK_POLICY_SETTINGS);
HCI_CMD_DECODE(HCI_WRITE_DEFAULT_LINK_POLICY_SETTINGS);
HCI_CMD_DECODE(HCI_FLOW_SPEC);
HCI_CMD_DECODE(HCI_SNIFF_SUB_RATE);
HCI_CMD_DECODE(HCI_SET_EVENT_MASK);
HCI_CMD_DECODE(HCI_RESET);
HCI_CMD_DECODE(HCI_SET_EVENT_FILTER);
HCI_CMD_DECODE(HCI_FLUSH);
HCI_CMD_DECODE(HCI_READ_PIN_TYPE);
HCI_CMD_DECODE(HCI_WRITE_PIN_TYPE);
HCI_CMD_DECODE(HCI_CREATE_NEW_UNIT_KEY);
HCI_CMD_DECODE(HCI_READ_STORED_LINK_KEY);
HCI_CMD_DECODE(HCI_WRITE_STORED_LINK_KEY);
HCI_CMD_DECODE(HCI_DELETE_STORED_LINK_KEY);
HCI_CMD_DECODE(HCI_CHANGE_LOCAL_NAME);
HCI_CMD_DECODE(HCI_READ_LOCAL_NAME);
HCI_CMD_DECODE(HCI_READ_CONN_ACCEPT_TIMEOUT);
HCI_CMD_DECODE(HCI_WRITE_CONN_ACCEPT_TIMEOUT);
HCI_CMD_DECODE(HCI_READ_PAGE_TIMEOUT);
HCI_CMD_DECODE(HCI_WRITE_PAGE_TIMEOUT);
HCI_CMD_DECODE(HCI_READ_SCAN_ENABLE);
HCI_CMD_DECODE(HCI_WRITE_SCAN_ENABLE);
HCI_CMD_DECODE(HCI_READ_PAGESCAN_ACTIVITY);
HCI_CMD_DECODE(HCI_WRITE_PAGESCAN_ACTIVITY);
HCI_CMD_DECODE(HCI_READ_INQUIRYSCAN_ACTIVITY);
HCI_CMD_DECODE(HCI_WRITE_INQUIRYSCAN_ACTIVITY);
HCI_CMD_DECODE(HCI_READ_AUTH_ENABLE);
HCI_CMD_DECODE(HCI_WRITE_AUTH_ENABLE);
HCI_CMD_DECODE(HCI_READ_ENC_MODE);
HCI_CMD_DECODE(HCI_WRITE_ENC_MODE);
HCI_CMD_DECODE(HCI_READ_CLASS_OF_DEVICE);
HCI_CMD_DECODE(HCI_WRITE_CLASS_OF_DEVICE);
HCI_CMD_DECODE(HCI_READ_VOICE_SETTING);
HCI_CMD_DECODE(HCI_WRITE_VOICE_SETTING);
HCI_CMD_DECODE(HCI_READ_AUTO_FLUSH_TIMEOUT);
HCI_CMD_DECODE(HCI_WRITE_AUTO_FLUSH_TIMEOUT);
HCI_CMD_DECODE(HCI_READ_NUM_BCAST_RETXS);
HCI_CMD_DECODE(HCI_WRITE_NUM_BCAST_RETXS);
HCI_CMD_DECODE(HCI_READ_HOLD_MODE_ACTIVITY);
HCI_CMD_DECODE(HCI_WRITE_HOLD_MODE_ACTIVITY);
HCI_CMD_DECODE(HCI_READ_TX_POWER_LEVEL);
HCI_CMD_DECODE(HCI_READ_SCO_FLOW_CON_ENABLE);
HCI_CMD_DECODE(HCI_WRITE_SCO_FLOW_CON_ENABLE);
HCI_CMD_DECODE(HCI_SET_HCTOHOST_FLOW_CONTROL);
HCI_CMD_DECODE(HCI_HOST_BUFFER_SIZE);
HCI_CMD_DECODE(HCI_HOST_NUM_COMPLETED_PACKETS);
HCI_CMD_DECODE(HCI_READ_LINK_SUPERV_TIMEOUT);
HCI_CMD_DECODE(HCI_WRITE_LINK_SUPERV_TIMEOUT);
HCI_CMD_DECODE(HCI_READ_NUM_SUPPORTED_IAC);
HCI_CMD_DECODE(HCI_READ_CURRENT_IAC_LAP);
HCI_CMD_DECODE(HCI_WRITE_CURRENT_IAC_LAP);
HCI_CMD_DECODE(HCI_READ_PAGESCAN_PERIOD_MODE);
HCI_CMD_DECODE(HCI_WRITE_PAGESCAN_PERIOD_MODE);
HCI_CMD_DECODE(HCI_READ_PAGESCAN_MODE);
HCI_CMD_DECODE(HCI_WRITE_PAGESCAN_MODE);
HCI_CMD_DECODE(HCI_SET_AFH_CHANNEL_CLASS);
HCI_CMD_DECODE(HCI_READ_INQUIRY_SCAN_TYPE);
HCI_CMD_DECODE(HCI_WRITE_INQUIRY_SCAN_TYPE);
HCI_CMD_DECODE(HCI_READ_INQUIRY_MODE);
HCI_CMD_DECODE(HCI_WRITE_INQUIRY_MODE);
HCI_CMD_DECODE(HCI_READ_PAGE_SCAN_TYPE);
HCI_CMD_DECODE(HCI_WRITE_PAGE_SCAN_TYPE);
HCI_CMD_DECODE(HCI_READ_AFH_CHANNEL_CLASS_M);
HCI_CMD_DECODE(HCI_WRITE_AFH_CHANNEL_CLASS_M);
HCI_CMD_DECODE(HCI_READ_ANON_MODE);
HCI_CMD_DECODE(HCI_WRITE_ANON_MODE);
HCI_CMD_DECODE(HCI_READ_ALIAS_AUTH_ENABLE);
HCI_CMD_DECODE(HCI_WRITE_ALIAS_AUTH_ENABLE);
HCI_CMD_DECODE(HCI_READ_ANON_ADDR_CHANGE_PARAMS);
HCI_CMD_DECODE(HCI_WRITE_ANON_ADDR_CHANGE_PARAMS);
HCI_CMD_DECODE(HCI_RESET_FIXED_ADDRESS_ATTEMPTS_COUNTER);
HCI_CMD_DECODE(HCI_READ_EXTENDED_INQUIRY_RESPONSE_DATA);
HCI_CMD_DECODE(HCI_WRITE_EXTENDED_INQUIRY_RESPONSE_DATA);
HCI_CMD_DECODE(HCI_REFRESH_ENCRYPTION_KEY);
HCI_CMD_DECODE(HCI_READ_SIMPLE_PAIRING_MODE);
HCI_CMD_DECODE(HCI_WRITE_SIMPLE_PAIRING_MODE);
HCI_CMD_DECODE(HCI_READ_LOCAL_OOB_DATA);
HCI_CMD_DECODE(HCI_READ_INQUIRY_RESPONSE_TX_POWER_LEVEL);
HCI_CMD_DECODE(HCI_WRITE_INQUIRY_TRANSMIT_POWER_LEVEL);
HCI_CMD_DECODE(HCI_ENHANCED_FLUSH);
HCI_CMD_DECODE(HCI_SEND_KEYPRESS_NOTIFICATION);
HCI_CMD_DECODE(HCI_SET_EVENT_MASK_PAGE_2);
HCI_CMD_DECODE(HCI_READ_ENH_TX_POWER_LEVEL);
HCI_CMD_DECODE(HCI_READ_LE_HOST_SUPPORT);
HCI_CMD_DECODE(HCI_WRITE_LE_HOST_SUPPORT);
HCI_CMD_DECODE(HCI_SET_RESERVED_LT_ADDR);
HCI_CMD_DECODE(HCI_DELETE_RESERVED_LT_ADDR);
HCI_CMD_DECODE(HCI_SET_CSB_DATA);
HCI_CMD_DECODE(HCI_READ_SYNCHRONIZATION_TRAIN_PARAMS);
HCI_CMD_DECODE(HCI_WRITE_SYNCHRONIZATION_TRAIN_PARAMS);
HCI_CMD_DECODE(HCI_READ_SECURE_CONNECTIONS_HOST_SUPPORT);
HCI_CMD_DECODE(HCI_WRITE_SECURE_CONNECTIONS_HOST_SUPPORT);
HCI_CMD_DECODE(HCI_READ_AUTHENTICATED_PAYLOAD_TIMEOUT);
HCI_CMD_DECODE(HCI_WRITE_AUTHENTICATED_PAYLOAD_TIMEOUT);
HCI_CMD_DECODE(HCI_READ_LOCAL_OOB_EXTENDED_DATA);
HCI_CMD_DECODE(HCI_READ_LOCAL_VER_INFO);
HCI_CMD_DECODE(HCI_READ_LOCAL_SUPP_COMMANDS);
HCI_CMD_DECODE(HCI_READ_LOCAL_SUPP_FEATURES);
HCI_CMD_DECODE(HCI_READ_LOCAL_EXT_FEATURES);
HCI_CMD_DECODE(HCI_READ_BUFFER_SIZE);
HCI_CMD_DECODE(HCI_READ_COUNTRY_CODE);
HCI_CMD_DECODE(HCI_READ_BD_ADDR);
HCI_CMD_DECODE(HCI_READ_LOCAL_SUPP_CODECS);
HCI_CMD_DECODE(HCI_READ_FAILED_CONTACT_COUNT);
HCI_CMD_DECODE(HCI_RESET_FAILED_CONTACT_COUNT);
HCI_CMD_DECODE(HCI_GET_LINK_QUALITY);
HCI_CMD_DECODE(HCI_READ_RSSI);
HCI_CMD_DECODE(HCI_READ_AFH_CHANNEL_MAP);
HCI_CMD_DECODE(HCI_READ_CLOCK);
HCI_CMD_DECODE(HCI_READ_ENCRYPTION_KEY_SIZE);
HCI_CMD_DECODE(HCI_SET_TRIGGERED_CLOCK_CAPTURE);
HCI_CMD_DECODE(HCI_READ_LOOPBACK_MODE);
HCI_CMD_DECODE(HCI_WRITE_LOOPBACK_MODE);
HCI_CMD_DECODE(HCI_ENABLE_DUT_MODE);
HCI_CMD_DECODE(HCI_WRITE_SIMPLE_PAIRING_DEBUG_MODE);
HCI_CMD_DECODE(HCI_WRITE_SECURE_CONNECTIONS_TEST_MODE);
HCI_CMD_DECODE(HCI_ULP_SET_EVENT_MASK);
HCI_CMD_DECODE(HCI_ULP_READ_BUFFER_SIZE);
HCI_CMD_DECODE(HCI_ULP_READ_LOCAL_SUPPORTED_FEATURES);
HCI_CMD_DECODE(HCI_ULP_SET_RANDOM_ADDRESS);
HCI_CMD_DECODE(HCI_ULP_SET_ADVERTISING_PARAMETERS);
HCI_CMD_DECODE(HCI_ULP_READ_ADVERTISING_CHANNEL_TX_POWER);
HCI_CMD_DECODE(HCI_ULP_SET_ADVERTISING_DATA);
HCI_CMD_DECODE(HCI_ULP_SET_SCAN_RESPONSE_DATA);
HCI_CMD_DECODE(HCI_ULP_SET_ADVERTISE_ENABLE);
HCI_CMD_DECODE(HCI_ULP_SET_SCAN_PARAMETERS);
HCI_CMD_DECODE(HCI_ULP_SET_SCAN_ENABLE);
HCI_CMD_DECODE(HCI_ULP_CREATE_CONNECTION);
HCI_CMD_DECODE(HCI_ULP_CREATE_CONNECTION_CANCEL);
HCI_CMD_DECODE(HCI_ULP_READ_WHITE_LIST_SIZE);
HCI_CMD_DECODE(HCI_ULP_CLEAR_WHITE_LIST);
HCI_CMD_DECODE(HCI_ULP_ADD_DEVICE_TO_WHITE_LIST);
HCI_CMD_DECODE(HCI_ULP_REMOVE_DEVICE_FROM_WHITE_LIST);
HCI_CMD_DECODE(HCI_ULP_CONNECTION_UPDATE);
HCI_CMD_DECODE(HCI_ULP_SET_HOST_CHANNEL_CLASSIFICATION);
HCI_CMD_DECODE(HCI_ULP_READ_CHANNEL_MAP);
HCI_CMD_DECODE(HCI_ULP_READ_REMOTE_USED_FEATURES);
HCI_CMD_DECODE(HCI_ULP_ENCRYPT);
HCI_CMD_DECODE(HCI_ULP_RAND);
HCI_CMD_DECODE(HCI_ULP_START_ENCRYPTION);
HCI_CMD_DECODE(HCI_ULP_LONG_TERM_KEY_REQUEST_REPLY);
HCI_CMD_DECODE(HCI_ULP_LONG_TERM_KEY_REQUEST_NEGATIVE_REPLY);
HCI_CMD_DECODE(HCI_ULP_READ_SUPPORTED_STATES);
HCI_CMD_DECODE(HCI_ULP_RECEIVER_TEST);
HCI_CMD_DECODE(HCI_ULP_TRANSMITTER_TEST);
HCI_CMD_DECODE(HCI_ULP_TEST_END);
HCI_CMD_DECODE(HCI_ULP_REMOTE_CONNECTION_PARAMETER_REQUEST_REPLY);
HCI_CMD_DECODE(HCI_ULP_REMOTE_CONNECTION_PARAMETER_REQUEST_NEGATIVE_REPLY);
HCI_CMD_DECODE(HCI_ULP_SET_DATA_LENGTH);
HCI_CMD_DECODE(HCI_ULP_READ_SUGGESTED_DEFAULT_DATA_LENGTH);
HCI_CMD_DECODE(HCI_ULP_WRITE_SUGGESTED_DEFAULT_DATA_LENGTH);
HCI_CMD_DECODE(HCI_ULP_READ_LOCAL_P256_PUBLIC_KEY);
HCI_CMD_DECODE(HCI_ULP_GENERATE_DHKEY);
HCI_CMD_DECODE(HCI_ULP_ADD_DEVICE_TO_RESOLVING_LIST);
HCI_CMD_DECODE(HCI_ULP_REMOVE_DEVICE_FROM_RESOLVING_LIST);
HCI_CMD_DECODE(HCI_ULP_CLEAR_RESOLVING_LIST);
HCI_CMD_DECODE(HCI_ULP_READ_RESOLVING_LIST_SIZE);
HCI_CMD_DECODE(HCI_ULP_READ_PEER_RESOLVABLE_ADDRESS);
HCI_CMD_DECODE(HCI_ULP_READ_LOCAL_RESOLVABLE_ADDRESS);
HCI_CMD_DECODE(HCI_ULP_SET_ADDRESS_RESOLUTION_ENABLE);
HCI_CMD_DECODE(HCI_ULP_SET_RANDOM_PRIVATE_ADDRESS_TIMEOUT);
HCI_CMD_DECODE(HCI_ULP_READ_MAXIMUM_DATA_LENGTH);
HCI_CMD_DECODE(HCI_ULP_READ_PHY);
HCI_CMD_DECODE(HCI_ULP_SET_DEFAULT_PHY);
HCI_CMD_DECODE(HCI_ULP_SET_PHY);
HCI_CMD_DECODE(HCI_ULP_ENHANCED_RECEIVER_TEST);
HCI_CMD_DECODE(HCI_ULP_ENHANCED_TRANSMITTER_TEST);
}
return ret;
}
static const char *scsc_bt_dump_hci_command(struct BSMHCP_TD_CONTROL *td_info)
{
return td_info->length ?
scsc_hci_evt_decode_command_code(*((u16 *) &td_info->data[0])) :
"[empty]";
}
const char *scsc_hci_evt_decode_event_code(u8 *data)
{
const char *ret = "NA";
u8 hci_event_code = data[0];
u8 hci_ulp_sub_code = data[2];
switch (hci_event_code) {
HCI_EV_DECODE(HCI_EV_INQUIRY_COMPLETE);
HCI_EV_DECODE(HCI_EV_INQUIRY_RESULT);
HCI_EV_DECODE(HCI_EV_CONN_COMPLETE);
HCI_EV_DECODE(HCI_EV_CONN_REQUEST);
HCI_EV_DECODE(HCI_EV_DISCONNECT_COMPLETE);
HCI_EV_DECODE(HCI_EV_AUTH_COMPLETE);
HCI_EV_DECODE(HCI_EV_REMOTE_NAME_REQ_COMPLETE);
HCI_EV_DECODE(HCI_EV_ENCRYPTION_CHANGE);
HCI_EV_DECODE(HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE);
HCI_EV_DECODE(HCI_EV_MASTER_LINK_KEY_COMPLETE);
HCI_EV_DECODE(HCI_EV_READ_REM_SUPP_FEATURES_COMPLETE);
HCI_EV_DECODE(HCI_EV_READ_REMOTE_VER_INFO_COMPLETE);
HCI_EV_DECODE(HCI_EV_QOS_SETUP_COMPLETE);
HCI_EV_DECODE(HCI_EV_HARDWARE_ERROR);
HCI_EV_DECODE(HCI_EV_FLUSH_OCCURRED);
HCI_EV_DECODE(HCI_EV_ROLE_CHANGE);
HCI_EV_DECODE(HCI_EV_NUMBER_COMPLETED_PKTS);
HCI_EV_DECODE(HCI_EV_MODE_CHANGE);
HCI_EV_DECODE(HCI_EV_RETURN_LINK_KEYS);
HCI_EV_DECODE(HCI_EV_PIN_CODE_REQ);
HCI_EV_DECODE(HCI_EV_LINK_KEY_REQ);
HCI_EV_DECODE(HCI_EV_LINK_KEY_NOTIFICATION);
HCI_EV_DECODE(HCI_EV_LOOPBACK_COMMAND);
HCI_EV_DECODE(HCI_EV_DATA_BUFFER_OVERFLOW);
HCI_EV_DECODE(HCI_EV_MAX_SLOTS_CHANGE);
HCI_EV_DECODE(HCI_EV_READ_CLOCK_OFFSET_COMPLETE);
HCI_EV_DECODE(HCI_EV_CONN_PACKET_TYPE_CHANGED);
HCI_EV_DECODE(HCI_EV_QOS_VIOLATION);
HCI_EV_DECODE(HCI_EV_PAGE_SCAN_MODE_CHANGE);
HCI_EV_DECODE(HCI_EV_PAGE_SCAN_REP_MODE_CHANGE);
HCI_EV_DECODE(HCI_EV_FLOW_SPEC_COMPLETE);
HCI_EV_DECODE(HCI_EV_INQUIRY_RESULT_WITH_RSSI);
HCI_EV_DECODE(HCI_EV_READ_REM_EXT_FEATURES_COMPLETE);
HCI_EV_DECODE(HCI_EV_FIXED_ADDRESS);
HCI_EV_DECODE(HCI_EV_ALIAS_ADDRESS);
HCI_EV_DECODE(HCI_EV_GENERATE_ALIAS_REQ);
HCI_EV_DECODE(HCI_EV_ACTIVE_ADDRESS);
HCI_EV_DECODE(HCI_EV_ALLOW_PRIVATE_PAIRING);
HCI_EV_DECODE(HCI_EV_ALIAS_ADDRESS_REQ);
HCI_EV_DECODE(HCI_EV_ALIAS_NOT_RECOGNISED);
HCI_EV_DECODE(HCI_EV_FIXED_ADDRESS_ATTEMPT);
HCI_EV_DECODE(HCI_EV_SYNC_CONN_COMPLETE);
HCI_EV_DECODE(HCI_EV_SYNC_CONN_CHANGED);
HCI_EV_DECODE(HCI_EV_SNIFF_SUB_RATE);
HCI_EV_DECODE(HCI_EV_EXTENDED_INQUIRY_RESULT);
HCI_EV_DECODE(HCI_EV_ENCRYPTION_KEY_REFRESH_COMPLETE);
HCI_EV_DECODE(HCI_EV_IO_CAPABILITY_REQUEST);
HCI_EV_DECODE(HCI_EV_IO_CAPABILITY_RESPONSE);
HCI_EV_DECODE(HCI_EV_USER_CONFIRMATION_REQUEST);
HCI_EV_DECODE(HCI_EV_USER_PASSKEY_REQUEST);
HCI_EV_DECODE(HCI_EV_REMOTE_OOB_DATA_REQUEST);
HCI_EV_DECODE(HCI_EV_SIMPLE_PAIRING_COMPLETE);
HCI_EV_DECODE(HCI_EV_LST_CHANGE);
HCI_EV_DECODE(HCI_EV_ENHANCED_FLUSH_COMPLETE);
HCI_EV_DECODE(HCI_EV_USER_PASSKEY_NOTIFICATION);
HCI_EV_DECODE(HCI_EV_KEYPRESS_NOTIFICATION);
HCI_EV_DECODE(HCI_EV_REM_HOST_SUPPORTED_FEATURES);
HCI_EV_DECODE(HCI_EV_TRIGGERED_CLOCK_CAPTURE);
HCI_EV_DECODE(HCI_EV_SYNCHRONIZATION_TRAIN_COMPLETE);
HCI_EV_DECODE(HCI_EV_SYNCHRONIZATION_TRAIN_RECEIVED);
HCI_EV_DECODE(HCI_EV_CSB_RECEIVE);
HCI_EV_DECODE(HCI_EV_CSB_TIMEOUT);
HCI_EV_DECODE(HCI_EV_TRUNCATED_PAGE_COMPLETE);
HCI_EV_DECODE(HCI_EV_SLAVE_PAGE_RESPONSE_TIMEOUT);
HCI_EV_DECODE(HCI_EV_CSB_CHANNEL_MAP_CHANGE);
HCI_EV_DECODE(HCI_EV_INQUIRY_RESPONSE_NOTIFICATION);
HCI_EV_DECODE(HCI_EV_AUTHENTICATED_PAYLOAD_TIMEOUT_EXPIRED);
case HCI_EV_COMMAND_COMPLETE:
{
u16 op_code = *((u16 *) &data[3]);
snprintf(scsc_hci_evt_decode_buffer,
sizeof(scsc_hci_evt_decode_buffer),
"HCI_EV_COMMAND_COMPLETE[Opcode=%s (0x%04x)]",
scsc_hci_evt_decode_command_code(op_code),
op_code);
ret = scsc_hci_evt_decode_buffer;
break;
}
case HCI_EV_COMMAND_STATUS:
{
u8 status = data[3];
u16 op_code = *((u16 *) &data[4]);
snprintf(scsc_hci_evt_decode_buffer,
sizeof(scsc_hci_evt_decode_buffer),
"HCI_EV_COMMAND_STATUS[Opcode:%s (0x%04x), Status: %u]",
scsc_hci_evt_decode_command_code(op_code),
op_code,
status);
ret = scsc_hci_evt_decode_buffer;
break;
}
case HCI_EV_ULP:
{
switch (hci_ulp_sub_code) {
HCI_EV_DECODE(HCI_EV_ULP_CONNECTION_COMPLETE);
HCI_EV_DECODE(HCI_EV_ULP_ADVERTISING_REPORT);
HCI_EV_DECODE(HCI_EV_ULP_CONNECTION_UPDATE_COMPLETE);
HCI_EV_DECODE(HCI_EV_ULP_READ_REMOTE_USED_FEATURES_COMPLETE);
HCI_EV_DECODE(HCI_EV_ULP_LONG_TERM_KEY_REQUEST);
HCI_EV_DECODE(HCI_EV_ULP_REMOTE_CONNECTION_PARAMETER_REQUEST);
HCI_EV_DECODE(HCI_EV_ULP_DATA_LENGTH_CHANGE);
HCI_EV_DECODE(HCI_EV_ULP_READ_LOCAL_P256_PUB_KEY_COMPLETE);
HCI_EV_DECODE(HCI_EV_ULP_GENERATE_DHKEY_COMPLETE);
HCI_EV_DECODE(HCI_EV_ULP_ENHANCED_CONNECTION_COMPLETE);
HCI_EV_DECODE(HCI_EV_ULP_DIRECT_ADVERTISING_REPORT);
HCI_EV_DECODE(HCI_EV_ULP_PHY_UPDATE_COMPLETE);
HCI_EV_DECODE(HCI_EV_ULP_USED_CHANNEL_SELECTION);
}
break;
}
}
return ret;
}
static const char *scsc_bt_dump_hci_event(struct BSMHCP_TD_HCI_EVT *td_info)
{
return td_info->length ?
scsc_hci_evt_decode_event_code(td_info->data) :
"[empty]";
}
void scsc_bt_dump_driver_state(void)
{
if (bt_service.bsmhcp_protocol) {
u32 index;
u32 readidx;
u32 writeidx;
pr_info("scsc_bt_dump_driver_state: Bluetooth Shared Memory Host Controller Protocol\n");
pr_info("scsc_bt_dump_driver_state: transfer ring read_idx write_idx\n");
pr_info("scsc_bt_dump_driver_state: ---------------------------------------\n");
pr_info("scsc_bt_dump_driver_state: hci_cmd %3u %3u\n",
bt_service.bsmhcp_protocol->header.mailbox_hci_cmd_read,
bt_service.bsmhcp_protocol->header.mailbox_hci_cmd_write);
pr_info("scsc_bt_dump_driver_state: hci_evt %3u %3u\n",
bt_service.bsmhcp_protocol->header.mailbox_hci_evt_read,
bt_service.bsmhcp_protocol->header.mailbox_hci_evt_write);
pr_info("scsc_bt_dump_driver_state: acl_tx %3u %3u\n",
bt_service.bsmhcp_protocol->header.mailbox_acl_tx_read,
bt_service.bsmhcp_protocol->header.mailbox_acl_tx_write);
pr_info("scsc_bt_dump_driver_state: acl_rx %3u %3u\n",
bt_service.bsmhcp_protocol->header.mailbox_acl_rx_read,
bt_service.bsmhcp_protocol->header.mailbox_acl_rx_write);
pr_info("scsc_bt_dump_driver_state: acl_free %3u %3u\n",
bt_service.bsmhcp_protocol->header.mailbox_acl_free_read,
bt_service.bsmhcp_protocol->header.mailbox_acl_free_write);
pr_info("scsc_bt_dump_driver_state: ---------------------------------------\n");
pr_info("scsc_bt_dump_driver_state: mxlog_filter = 0x%08x\n",
bt_service.bsmhcp_protocol->header.mxlog_filter);
pr_info("scsc_bt_dump_driver_state: firmware_control = 0x%08x\n",
bt_service.bsmhcp_protocol->header.firmware_control);
pr_info("scsc_bt_dump_driver_state: system_state = %s\n",
bt_service.bsmhcp_protocol->header.controller_flags == 0 ? "STOPPED" :
(bt_service.bsmhcp_protocol->header.controller_flags == 1 ? "STARTED" : "RUNNING"));
pr_info("scsc_bt_dump_driver_state: HCI commands:\n");
pr_info("scsc_bt_dump_driver_state: ptr index len details\n");
pr_info("scsc_bt_dump_driver_state: -------------------------------------------------------------\n");
readidx = bt_service.bsmhcp_protocol->header.mailbox_hci_cmd_read;
writeidx = bt_service.bsmhcp_protocol->header.mailbox_hci_cmd_write;
for (index = 0; index < BSMHCP_TRANSFER_RING_CMD_SIZE; index++) {
uint32_t write = writeidx >= index ? writeidx - index : BSMHCP_TRANSFER_RING_CMD_SIZE - index + writeidx;
struct BSMHCP_TD_CONTROL *td_info =
&bt_service.bsmhcp_protocol->hci_cmd_transfer_ring[write];
pr_info("scsc_bt_dump_driver_state: %c %c %3u %3u %s\n",
write == writeidx ? 'W' : ' ',
write == readidx ? 'R' : ' ',
write,
td_info->length,
scsc_bt_dump_hci_command(td_info));
}
pr_info("scsc_bt_dump_driver_state: -------------------------------------------------------------\n");
pr_info("scsc_bt_dump_driver_state: HCI events:\n");
pr_info("scsc_bt_dump_driver_state: ptr index len hci_handle event details\n");
pr_info("scsc_bt_dump_driver_state: -------------------------------------------------------------\n");
readidx = bt_service.bsmhcp_protocol->header.mailbox_hci_evt_read;
writeidx = bt_service.bsmhcp_protocol->header.mailbox_hci_evt_write;
for (index = 0; index < BSMHCP_TRANSFER_RING_EVT_SIZE; index++) {
uint32_t write = writeidx >= index ? writeidx - index : BSMHCP_TRANSFER_RING_EVT_SIZE - index + writeidx;
struct BSMHCP_TD_HCI_EVT *td_info =
&bt_service.bsmhcp_protocol->hci_evt_transfer_ring[write];
pr_info("scsc_bt_dump_driver_state: %c %c %3u %3u 0x%03x %2u %s\n",
write == writeidx ? 'W' : ' ',
write == readidx ? 'R' : ' ',
write,
td_info->length,
td_info->hci_connection_handle,
td_info->event_type,
scsc_bt_dump_hci_event(td_info));
}
pr_info("scsc_bt_dump_driver_state: -------------------------------------------------------------\n");
pr_info("scsc_bt_dump_driver_state: ACL to AIR:\n");
pr_info("scsc_bt_dump_driver_state: ptr index len idx start flush broadcast hci_handle cid\n");
pr_info("scsc_bt_dump_driver_state: -----------------------------------------------------------------\n");
readidx = bt_service.bsmhcp_protocol->header.mailbox_acl_tx_read;
writeidx = bt_service.bsmhcp_protocol->header.mailbox_acl_tx_write;
for (index = 0; index < BSMHCP_TRANSFER_RING_ACL_SIZE; index++) {
uint32_t write = writeidx >= index ? writeidx - index : BSMHCP_TRANSFER_RING_ACL_SIZE - index + writeidx;
struct BSMHCP_TD_ACL_TX_DATA *td_info =
&bt_service.bsmhcp_protocol->acl_tx_data_transfer_ring[write];
u8 pb = td_info->flags & BSMHCP_ACL_PB_FLAG_MASK;
u8 bc = td_info->flags & BSMHCP_ACL_BC_FLAG_BCAST_MASK;
pr_info("scsc_bt_dump_driver_state: %c %c %3u %3u %3u %c %c %c 0x%03x 0x%04x\n",
write == writeidx ? 'W' : ' ',
write == readidx ? 'R' : ' ',
write,
td_info->length,
td_info->buffer_index,
pb == BSMHCP_ACL_PB_FLAG_CONT ? 'N' : 'Y',
pb == BSMHCP_ACL_PB_FLAG_START_FLUSH ? 'Y' : 'N',
bc == BSMHCP_ACL_BC_FLAG_BCAST_NON ? 'N' : 'Y',
td_info->hci_connection_handle,
td_info->l2cap_cid);
}
pr_info("scsc_bt_dump_driver_state: -----------------------------------------------------------------\n");
pr_info("scsc_bt_dump_driver_state: ACL from AIR:\n");
pr_info("scsc_bt_dump_driver_state: ptr index len start flush hci_handle dis\n");
pr_info("scsc_bt_dump_driver_state: ----------------------------------------------\n");
readidx = bt_service.bsmhcp_protocol->header.mailbox_acl_rx_read;
writeidx = bt_service.bsmhcp_protocol->header.mailbox_acl_rx_write;
for (index = 0; index < BSMHCP_TRANSFER_RING_ACL_SIZE; index++) {
uint32_t write = writeidx >= index ? writeidx - index : BSMHCP_TRANSFER_RING_ACL_SIZE - index + writeidx;
struct BSMHCP_TD_ACL_RX *td_info =
&bt_service.bsmhcp_protocol->acl_rx_transfer_ring[write];
u8 pb = (td_info->packet_boundary << 4) & BSMHCP_ACL_PB_FLAG_MASK;
pr_info("scsc_bt_dump_driver_state: %c %c %3u %3u %c %c 0x%03x %c\n",
write == writeidx ? 'W' : ' ',
write == readidx ? 'R' : ' ',
write,
td_info->length,
pb == BSMHCP_ACL_PB_FLAG_CONT ? 'N' : 'Y',
td_info->broadcast_flag ? 'Y' : 'N',
td_info->hci_connection_handle,
td_info->disconnected ? 'Y' : 'N');
}
pr_info("scsc_bt_dump_driver_state: ----------------------------------------------\n");
}
}

View file

@ -0,0 +1,352 @@
/****************************************************************************
*
* Internal BT driver HCI decoder
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd
*
****************************************************************************/
#ifndef __SCSC_BT_HCI_H
#define __SCSC_BT_HCI_H
#define HCI_EVENT_HEADER_LENGTH (2)
#define HCI_LINK ((u16)0x0400)
#define HCI_POLICY ((u16)0x0800)
#define HCI_HOST_BB ((u16)0x0C00)
#define HCI_INFO ((u16)0x1000)
#define HCI_STATUS ((u16)0x1400)
#define HCI_TEST ((u16)0x1800)
#define HCI_ULP ((u16)0x2000)
#define HCI_INQUIRY ((u16)HCI_LINK | 0x0001)
#define HCI_INQUIRY_CANCEL ((u16)HCI_LINK | 0x0002)
#define HCI_PERIODIC_INQUIRY_MODE ((u16)HCI_LINK | 0x0003)
#define HCI_EXIT_PERIODIC_INQUIRY_MODE ((u16)HCI_LINK | 0x0004)
#define HCI_CREATE_CONNECTION ((u16)HCI_LINK | 0x0005)
#define HCI_DISCONNECT ((u16)HCI_LINK | 0x0006)
#define HCI_ADD_SCO_CONNECTION ((u16)HCI_LINK | 0x0007)
#define HCI_CREATE_CONNECTION_CANCEL ((u16)HCI_LINK | 0x0008)
#define HCI_ACCEPT_CONNECTION_REQ ((u16)HCI_LINK | 0x0009)
#define HCI_REJECT_CONNECTION_REQ ((u16)HCI_LINK | 0x000A)
#define HCI_LINK_KEY_REQ_REPLY ((u16)HCI_LINK | 0x000B)
#define HCI_LINK_KEY_REQ_NEG_REPLY ((u16)HCI_LINK | 0x000C)
#define HCI_PIN_CODE_REQ_REPLY ((u16)HCI_LINK | 0x000D)
#define HCI_PIN_CODE_REQ_NEG_REPLY ((u16)HCI_LINK | 0x000E)
#define HCI_CHANGE_CONN_PKT_TYPE ((u16)HCI_LINK | 0x000F)
#define HCI_AUTH_REQ ((u16)HCI_LINK | 0x0011)
#define HCI_SET_CONN_ENCRYPTION ((u16)HCI_LINK | 0x0013)
#define HCI_CHANGE_CONN_LINK_KEY ((u16)HCI_LINK | 0x0015)
#define HCI_MASTER_LINK_KEY ((u16)HCI_LINK | 0x0017)
#define HCI_REMOTE_NAME_REQ ((u16)HCI_LINK | 0x0019)
#define HCI_REMOTE_NAME_REQ_CANCEL ((u16)HCI_LINK | 0x001A)
#define HCI_READ_REMOTE_SUPP_FEATURES ((u16)HCI_LINK | 0x001B)
#define HCI_READ_REMOTE_EXT_FEATURES ((u16)HCI_LINK | 0x001C)
#define HCI_READ_REMOTE_VER_INFO ((u16)HCI_LINK | 0x001D)
#define HCI_READ_CLOCK_OFFSET ((u16)HCI_LINK | 0x001F)
#define HCI_READ_LMP_HANDLE ((u16)HCI_LINK | 0x0020)
#define HCI_EXCHANGE_FIXED_INFO ((u16)HCI_LINK | 0x0021)
#define HCI_EXCHANGE_ALIAS_INFO ((u16)HCI_LINK | 0x0022)
#define HCI_PRIVATE_PAIRING_REQ_REPLY ((u16)HCI_LINK | 0x0023)
#define HCI_PRIVATE_PAIRING_REQ_NEG_REPLY ((u16)HCI_LINK | 0x0024)
#define HCI_GENERATED_ALIAS ((u16)HCI_LINK | 0x0025)
#define HCI_ALIAS_ADDRESS_REQ_REPLY ((u16)HCI_LINK | 0x0026)
#define HCI_ALIAS_ADDRESS_REQ_NEG_REPLY ((u16)HCI_LINK | 0x0027)
#define HCI_SETUP_SYNCHRONOUS_CONN ((u16)HCI_LINK | 0x0028)
#define HCI_ACCEPT_SYNCHRONOUS_CONN_REQ ((u16)HCI_LINK | 0x0029)
#define HCI_REJECT_SYNCHRONOUS_CONN_REQ ((u16)HCI_LINK | 0x002A)
#define HCI_IO_CAPABILITY_REQUEST_REPLY ((u16)HCI_LINK | 0x002B)
#define HCI_USER_CONFIRMATION_REQUEST_REPLY ((u16)HCI_LINK | 0x002C)
#define HCI_USER_CONFIRMATION_REQUEST_NEG_REPLY ((u16)HCI_LINK | 0x002D)
#define HCI_USER_PASSKEY_REQUEST_REPLY ((u16)HCI_LINK | 0x002E)
#define HCI_USER_PASSKEY_REQUEST_NEG_REPLY ((u16)HCI_LINK | 0x002F)
#define HCI_REMOTE_OOB_DATA_REQUEST_REPLY ((u16)HCI_LINK | 0x0030)
#define HCI_REMOTE_OOB_DATA_REQUEST_NEG_REPLY ((u16)HCI_LINK | 0x0033)
#define HCI_IO_CAPABILITY_REQUEST_NEG_REPLY ((u16)HCI_LINK | 0x0034)
#define HCI_ENHANCED_SETUP_SYNC_CONN ((u16)HCI_LINK | 0x003D)
#define HCI_ENHANCED_ACCEPT_SYNC_CONN_REQ ((u16)HCI_LINK | 0x003E)
#define HCI_TRUNCATED_PAGE ((u16)HCI_LINK | 0x003F)
#define HCI_TRUNCATED_PAGE_CANCEL ((u16)HCI_LINK | 0x0040)
#define HCI_SET_CSB ((u16)HCI_LINK | 0x0041)
#define HCI_SET_CSB_RECEIVE ((u16)HCI_LINK | 0x0042)
#define HCI_START_SYNCHRONIZATION_TRAIN ((u16)HCI_LINK | 0x0043)
#define HCI_RECEIVE_SYNCHRONIZATION_TRAIN ((u16)HCI_LINK | 0x0044)
#define HCI_REMOTE_OOB_EXTENDED_DATA_REQUEST_REPLY ((u16)HCI_LINK | 0x0045)
#define HCI_HOLD_MODE ((u16)HCI_POLICY | 0x0001)
#define HCI_SNIFF_MODE ((u16)HCI_POLICY | 0x0003)
#define HCI_EXIT_SNIFF_MODE ((u16)HCI_POLICY | 0x0004)
#define HCI_PARK_MODE ((u16)HCI_POLICY | 0x0005)
#define HCI_EXIT_PARK_MODE ((u16)HCI_POLICY | 0x0006)
#define HCI_QOS_SETUP ((u16)HCI_POLICY | 0x0007)
#define HCI_ROLE_DISCOVERY ((u16)HCI_POLICY | 0x0009)
#define HCI_SWITCH_ROLE ((u16)HCI_POLICY | 0x000B)
#define HCI_READ_LINK_POLICY_SETTINGS ((u16)HCI_POLICY | 0x000C)
#define HCI_WRITE_LINK_POLICY_SETTINGS ((u16)HCI_POLICY | 0x000D)
#define HCI_READ_DEFAULT_LINK_POLICY_SETTINGS ((u16)HCI_POLICY | 0x000E)
#define HCI_WRITE_DEFAULT_LINK_POLICY_SETTINGS ((u16)HCI_POLICY | 0x000F)
#define HCI_FLOW_SPEC ((u16)HCI_POLICY | 0x0010)
#define HCI_SNIFF_SUB_RATE ((u16)HCI_POLICY | 0x0011)
#define HCI_SET_EVENT_MASK ((u16)HCI_HOST_BB | 0x0001)
#define HCI_RESET ((u16)HCI_HOST_BB | 0x0003)
#define HCI_SET_EVENT_FILTER ((u16)HCI_HOST_BB | 0x0005)
#define HCI_FLUSH ((u16)HCI_HOST_BB | 0x0008)
#define HCI_READ_PIN_TYPE ((u16)HCI_HOST_BB | 0x0009)
#define HCI_WRITE_PIN_TYPE ((u16)HCI_HOST_BB | 0x000A)
#define HCI_CREATE_NEW_UNIT_KEY ((u16)HCI_HOST_BB | 0x000B)
#define HCI_READ_STORED_LINK_KEY ((u16)HCI_HOST_BB | 0x000D)
#define HCI_WRITE_STORED_LINK_KEY ((u16)HCI_HOST_BB | 0x0011)
#define HCI_DELETE_STORED_LINK_KEY ((u16)HCI_HOST_BB | 0x0012)
#define HCI_CHANGE_LOCAL_NAME ((u16)HCI_HOST_BB | 0x0013)
#define HCI_READ_LOCAL_NAME ((u16)HCI_HOST_BB | 0x0014)
#define HCI_READ_CONN_ACCEPT_TIMEOUT ((u16)HCI_HOST_BB | 0x0015)
#define HCI_WRITE_CONN_ACCEPT_TIMEOUT ((u16)HCI_HOST_BB | 0x0016)
#define HCI_READ_PAGE_TIMEOUT ((u16)HCI_HOST_BB | 0x0017)
#define HCI_WRITE_PAGE_TIMEOUT ((u16)HCI_HOST_BB | 0x0018)
#define HCI_READ_SCAN_ENABLE ((u16)HCI_HOST_BB | 0x0019)
#define HCI_WRITE_SCAN_ENABLE ((u16)HCI_HOST_BB | 0x001A)
#define HCI_READ_PAGESCAN_ACTIVITY ((u16)HCI_HOST_BB | 0x001B)
#define HCI_WRITE_PAGESCAN_ACTIVITY ((u16)HCI_HOST_BB | 0x001C)
#define HCI_READ_INQUIRYSCAN_ACTIVITY ((u16)HCI_HOST_BB | 0x001D)
#define HCI_WRITE_INQUIRYSCAN_ACTIVITY ((u16)HCI_HOST_BB | 0x001E)
#define HCI_READ_AUTH_ENABLE ((u16)HCI_HOST_BB | 0x001F)
#define HCI_WRITE_AUTH_ENABLE ((u16)HCI_HOST_BB | 0x0020)
#define HCI_READ_ENC_MODE ((u16)HCI_HOST_BB | 0x0021)
#define HCI_WRITE_ENC_MODE ((u16)HCI_HOST_BB | 0x0022)
#define HCI_READ_CLASS_OF_DEVICE ((u16)HCI_HOST_BB | 0x0023)
#define HCI_WRITE_CLASS_OF_DEVICE ((u16)HCI_HOST_BB | 0x0024)
#define HCI_READ_VOICE_SETTING ((u16)HCI_HOST_BB | 0x0025)
#define HCI_WRITE_VOICE_SETTING ((u16)HCI_HOST_BB | 0x0026)
#define HCI_READ_AUTO_FLUSH_TIMEOUT ((u16)HCI_HOST_BB | 0x0027)
#define HCI_WRITE_AUTO_FLUSH_TIMEOUT ((u16)HCI_HOST_BB | 0x0028)
#define HCI_READ_NUM_BCAST_RETXS ((u16)HCI_HOST_BB | 0x0029)
#define HCI_WRITE_NUM_BCAST_RETXS ((u16)HCI_HOST_BB | 0x002A)
#define HCI_READ_HOLD_MODE_ACTIVITY ((u16)HCI_HOST_BB | 0x002B)
#define HCI_WRITE_HOLD_MODE_ACTIVITY ((u16)HCI_HOST_BB | 0x002C)
#define HCI_READ_TX_POWER_LEVEL ((u16)HCI_HOST_BB | 0x002D)
#define HCI_READ_SCO_FLOW_CON_ENABLE ((u16)HCI_HOST_BB | 0x002E)
#define HCI_WRITE_SCO_FLOW_CON_ENABLE ((u16)HCI_HOST_BB | 0x002F)
#define HCI_SET_HCTOHOST_FLOW_CONTROL ((u16)HCI_HOST_BB | 0x0031)
#define HCI_HOST_BUFFER_SIZE ((u16)HCI_HOST_BB | 0x0033)
#define HCI_HOST_NUM_COMPLETED_PACKETS ((u16)HCI_HOST_BB | 0x0035)
#define HCI_READ_LINK_SUPERV_TIMEOUT ((u16)HCI_HOST_BB | 0x0036)
#define HCI_WRITE_LINK_SUPERV_TIMEOUT ((u16)HCI_HOST_BB | 0x0037)
#define HCI_READ_NUM_SUPPORTED_IAC ((u16)HCI_HOST_BB | 0x0038)
#define HCI_READ_CURRENT_IAC_LAP ((u16)HCI_HOST_BB | 0x0039)
#define HCI_WRITE_CURRENT_IAC_LAP ((u16)HCI_HOST_BB | 0x003A)
#define HCI_READ_PAGESCAN_PERIOD_MODE ((u16)HCI_HOST_BB | 0x003B)
#define HCI_WRITE_PAGESCAN_PERIOD_MODE ((u16)HCI_HOST_BB | 0x003C)
#define HCI_READ_PAGESCAN_MODE ((u16)HCI_HOST_BB | 0x003D)
#define HCI_WRITE_PAGESCAN_MODE ((u16)HCI_HOST_BB | 0x003E)
#define HCI_SET_AFH_CHANNEL_CLASS ((u16)HCI_HOST_BB | 0x003F)
#define HCI_READ_INQUIRY_SCAN_TYPE ((u16)HCI_HOST_BB | 0x0042)
#define HCI_WRITE_INQUIRY_SCAN_TYPE ((u16)HCI_HOST_BB | 0x0043)
#define HCI_READ_INQUIRY_MODE ((u16)HCI_HOST_BB | 0x0044)
#define HCI_WRITE_INQUIRY_MODE ((u16)HCI_HOST_BB | 0x0045)
#define HCI_READ_PAGE_SCAN_TYPE ((u16)HCI_HOST_BB | 0x0046)
#define HCI_WRITE_PAGE_SCAN_TYPE ((u16)HCI_HOST_BB | 0x0047)
#define HCI_READ_AFH_CHANNEL_CLASS_M ((u16)HCI_HOST_BB | 0x0048)
#define HCI_WRITE_AFH_CHANNEL_CLASS_M ((u16)HCI_HOST_BB | 0x0049)
#define HCI_READ_ANON_MODE ((u16)HCI_HOST_BB | 0x004A)
#define HCI_WRITE_ANON_MODE ((u16)HCI_HOST_BB | 0x004B)
#define HCI_READ_ALIAS_AUTH_ENABLE ((u16)HCI_HOST_BB | 0x004C)
#define HCI_WRITE_ALIAS_AUTH_ENABLE ((u16)HCI_HOST_BB | 0x004D)
#define HCI_READ_ANON_ADDR_CHANGE_PARAMS ((u16)HCI_HOST_BB | 0x004E)
#define HCI_WRITE_ANON_ADDR_CHANGE_PARAMS ((u16)HCI_HOST_BB | 0x004F)
#define HCI_RESET_FIXED_ADDRESS_ATTEMPTS_COUNTER ((u16)HCI_HOST_BB | 0x0050)
#define HCI_READ_EXTENDED_INQUIRY_RESPONSE_DATA ((u16)HCI_HOST_BB | 0x0051)
#define HCI_WRITE_EXTENDED_INQUIRY_RESPONSE_DATA ((u16)HCI_HOST_BB | 0x0052)
#define HCI_REFRESH_ENCRYPTION_KEY ((u16)HCI_HOST_BB | 0x0053)
#define HCI_READ_SIMPLE_PAIRING_MODE ((u16)HCI_HOST_BB | 0x0055)
#define HCI_WRITE_SIMPLE_PAIRING_MODE ((u16)HCI_HOST_BB | 0x0056)
#define HCI_READ_LOCAL_OOB_DATA ((u16)HCI_HOST_BB | 0x0057)
#define HCI_READ_INQUIRY_RESPONSE_TX_POWER_LEVEL ((u16)HCI_HOST_BB | 0x0058)
#define HCI_WRITE_INQUIRY_TRANSMIT_POWER_LEVEL ((u16)HCI_HOST_BB | 0x0059)
#define HCI_ENHANCED_FLUSH ((u16)HCI_HOST_BB | 0x005F)
#define HCI_SEND_KEYPRESS_NOTIFICATION ((u16)HCI_HOST_BB | 0x0060)
#define HCI_SET_EVENT_MASK_PAGE_2 ((u16)HCI_HOST_BB | 0x0063)
#define HCI_READ_ENH_TX_POWER_LEVEL ((u16)HCI_HOST_BB | 0x0068)
#define HCI_READ_LE_HOST_SUPPORT ((u16)HCI_HOST_BB | 0x006C)
#define HCI_WRITE_LE_HOST_SUPPORT ((u16)HCI_HOST_BB | 0x006D)
#define HCI_SET_RESERVED_LT_ADDR ((u16)HCI_HOST_BB | 0x0074)
#define HCI_DELETE_RESERVED_LT_ADDR ((u16)HCI_HOST_BB | 0x0075)
#define HCI_SET_CSB_DATA ((u16)HCI_HOST_BB | 0x0076)
#define HCI_READ_SYNCHRONIZATION_TRAIN_PARAMS ((u16)HCI_HOST_BB | 0x0077)
#define HCI_WRITE_SYNCHRONIZATION_TRAIN_PARAMS ((u16)HCI_HOST_BB | 0x0078)
#define HCI_READ_SECURE_CONNECTIONS_HOST_SUPPORT ((u16)HCI_HOST_BB | 0x0079)
#define HCI_WRITE_SECURE_CONNECTIONS_HOST_SUPPORT ((u16)HCI_HOST_BB | 0x007A)
#define HCI_READ_AUTHENTICATED_PAYLOAD_TIMEOUT ((u16)HCI_HOST_BB | 0x007B)
#define HCI_WRITE_AUTHENTICATED_PAYLOAD_TIMEOUT ((u16)HCI_HOST_BB | 0x007C)
#define HCI_READ_LOCAL_OOB_EXTENDED_DATA ((u16)HCI_HOST_BB | 0x007D)
#define HCI_READ_LOCAL_VER_INFO ((u16)HCI_INFO | 0x0001)
#define HCI_READ_LOCAL_SUPP_COMMANDS ((u16)HCI_INFO | 0x0002)
#define HCI_READ_LOCAL_SUPP_FEATURES ((u16)HCI_INFO | 0x0003)
#define HCI_READ_LOCAL_EXT_FEATURES ((u16)HCI_INFO | 0x0004)
#define HCI_READ_BUFFER_SIZE ((u16)HCI_INFO | 0x0005)
#define HCI_READ_COUNTRY_CODE ((u16)HCI_INFO | 0x0007)
#define HCI_READ_BD_ADDR ((u16)HCI_INFO | 0x0009)
#define HCI_READ_LOCAL_SUPP_CODECS ((u16)HCI_INFO | 0x000B)
#define HCI_READ_FAILED_CONTACT_COUNT ((u16)HCI_STATUS | 0x0001)
#define HCI_RESET_FAILED_CONTACT_COUNT ((u16)HCI_STATUS | 0x0002)
#define HCI_GET_LINK_QUALITY ((u16)HCI_STATUS | 0x0003)
#define HCI_READ_RSSI ((u16)HCI_STATUS | 0x0005)
#define HCI_READ_AFH_CHANNEL_MAP ((u16)HCI_STATUS | 0x0006)
#define HCI_READ_CLOCK ((u16)HCI_STATUS | 0x0007)
#define HCI_READ_ENCRYPTION_KEY_SIZE ((u16)HCI_STATUS | 0x0008)
#define HCI_SET_TRIGGERED_CLOCK_CAPTURE ((u16)HCI_STATUS | 0x000D)
#define HCI_READ_LOOPBACK_MODE ((u16)HCI_TEST | 0x0001)
#define HCI_WRITE_LOOPBACK_MODE ((u16)HCI_TEST | 0x0002)
#define HCI_ENABLE_DUT_MODE ((u16)HCI_TEST | 0x0003)
#define HCI_WRITE_SIMPLE_PAIRING_DEBUG_MODE ((u16)HCI_TEST | 0x0004)
#define HCI_WRITE_SECURE_CONNECTIONS_TEST_MODE ((u16)HCI_TEST | 0x000A)
#define HCI_ULP_SET_EVENT_MASK ((u16)HCI_ULP | 0x0001)
#define HCI_ULP_READ_BUFFER_SIZE ((u16)HCI_ULP | 0x0002)
#define HCI_ULP_READ_LOCAL_SUPPORTED_FEATURES ((u16)HCI_ULP | 0x0003)
#define HCI_ULP_SET_RANDOM_ADDRESS ((u16)HCI_ULP | 0x0005)
#define HCI_ULP_SET_ADVERTISING_PARAMETERS ((u16)HCI_ULP | 0x0006)
#define HCI_ULP_READ_ADVERTISING_CHANNEL_TX_POWER ((u16)HCI_ULP | 0x0007)
#define HCI_ULP_SET_ADVERTISING_DATA ((u16)HCI_ULP | 0x0008)
#define HCI_ULP_SET_SCAN_RESPONSE_DATA ((u16)HCI_ULP | 0x0009)
#define HCI_ULP_SET_ADVERTISE_ENABLE ((u16)HCI_ULP | 0x000A)
#define HCI_ULP_SET_SCAN_PARAMETERS ((u16)HCI_ULP | 0x000B)
#define HCI_ULP_SET_SCAN_ENABLE ((u16)HCI_ULP | 0x000C)
#define HCI_ULP_CREATE_CONNECTION ((u16)HCI_ULP | 0x000D)
#define HCI_ULP_CREATE_CONNECTION_CANCEL ((u16)HCI_ULP | 0x000E)
#define HCI_ULP_READ_WHITE_LIST_SIZE ((u16)HCI_ULP | 0x000F)
#define HCI_ULP_CLEAR_WHITE_LIST ((u16)HCI_ULP | 0x0010)
#define HCI_ULP_ADD_DEVICE_TO_WHITE_LIST ((u16)HCI_ULP | 0x0011)
#define HCI_ULP_REMOVE_DEVICE_FROM_WHITE_LIST ((u16)HCI_ULP | 0x0012)
#define HCI_ULP_CONNECTION_UPDATE ((u16)HCI_ULP | 0x0013)
#define HCI_ULP_SET_HOST_CHANNEL_CLASSIFICATION ((u16)HCI_ULP | 0x0014)
#define HCI_ULP_READ_CHANNEL_MAP ((u16)HCI_ULP | 0x0015)
#define HCI_ULP_READ_REMOTE_USED_FEATURES ((u16)HCI_ULP | 0x0016)
#define HCI_ULP_ENCRYPT ((u16)HCI_ULP | 0x0017)
#define HCI_ULP_RAND ((u16)HCI_ULP | 0x0018)
#define HCI_ULP_START_ENCRYPTION ((u16)HCI_ULP | 0x0019)
#define HCI_ULP_LONG_TERM_KEY_REQUEST_REPLY ((u16)HCI_ULP | 0x001A)
#define HCI_ULP_LONG_TERM_KEY_REQUEST_NEGATIVE_REPLY ((u16)HCI_ULP | 0x001B)
#define HCI_ULP_READ_SUPPORTED_STATES ((u16)HCI_ULP | 0x001C)
#define HCI_ULP_RECEIVER_TEST ((u16)HCI_ULP | 0x001D)
#define HCI_ULP_TRANSMITTER_TEST ((u16)HCI_ULP | 0x001E)
#define HCI_ULP_TEST_END ((u16)HCI_ULP | 0x001F)
#define HCI_ULP_REMOTE_CONNECTION_PARAMETER_REQUEST_REPLY ((u16)HCI_ULP | 0x0020)
#define HCI_ULP_REMOTE_CONNECTION_PARAMETER_REQUEST_NEGATIVE_REPLY ((u16)HCI_ULP | 0x0021)
#define HCI_ULP_SET_DATA_LENGTH ((u16)HCI_ULP | 0x0022)
#define HCI_ULP_READ_SUGGESTED_DEFAULT_DATA_LENGTH ((u16)HCI_ULP | 0x0023)
#define HCI_ULP_WRITE_SUGGESTED_DEFAULT_DATA_LENGTH ((u16)HCI_ULP | 0x0024)
#define HCI_ULP_READ_LOCAL_P256_PUBLIC_KEY ((u16)HCI_ULP | 0x0025)
#define HCI_ULP_GENERATE_DHKEY ((u16)HCI_ULP | 0x0026)
#define HCI_ULP_ADD_DEVICE_TO_RESOLVING_LIST ((u16)HCI_ULP | 0x0027)
#define HCI_ULP_REMOVE_DEVICE_FROM_RESOLVING_LIST ((u16)HCI_ULP | 0x0028)
#define HCI_ULP_CLEAR_RESOLVING_LIST ((u16)HCI_ULP | 0x0029)
#define HCI_ULP_READ_RESOLVING_LIST_SIZE ((u16)HCI_ULP | 0x002A)
#define HCI_ULP_READ_PEER_RESOLVABLE_ADDRESS ((u16)HCI_ULP | 0x002B)
#define HCI_ULP_READ_LOCAL_RESOLVABLE_ADDRESS ((u16)HCI_ULP | 0x002C)
#define HCI_ULP_SET_ADDRESS_RESOLUTION_ENABLE ((u16)HCI_ULP | 0x002D)
#define HCI_ULP_SET_RANDOM_PRIVATE_ADDRESS_TIMEOUT ((u16)HCI_ULP | 0x002E)
#define HCI_ULP_READ_MAXIMUM_DATA_LENGTH ((u16)HCI_ULP | 0x002F)
#define HCI_ULP_READ_PHY ((u16)HCI_ULP | 0x0030)
#define HCI_ULP_SET_DEFAULT_PHY ((u16)HCI_ULP | 0x0031)
#define HCI_ULP_SET_PHY ((u16)HCI_ULP | 0x0032)
#define HCI_ULP_ENHANCED_RECEIVER_TEST ((u16)HCI_ULP | 0x0033)
#define HCI_ULP_ENHANCED_TRANSMITTER_TEST ((u16)HCI_ULP | 0x0034)
#define HCI_EV_INQUIRY_COMPLETE ((u8)0x01)
#define HCI_EV_INQUIRY_RESULT ((u8)0x02)
#define HCI_EV_CONN_COMPLETE ((u8)0x03)
#define HCI_EV_CONN_REQUEST ((u8)0x04)
#define HCI_EV_DISCONNECT_COMPLETE ((u8)0x05)
#define HCI_EV_AUTH_COMPLETE ((u8)0x06)
#define HCI_EV_REMOTE_NAME_REQ_COMPLETE ((u8)0x07)
#define HCI_EV_ENCRYPTION_CHANGE ((u8)0x08)
#define HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE ((u8)0x09)
#define HCI_EV_MASTER_LINK_KEY_COMPLETE ((u8)0x0A)
#define HCI_EV_READ_REM_SUPP_FEATURES_COMPLETE ((u8)0x0B)
#define HCI_EV_READ_REMOTE_VER_INFO_COMPLETE ((u8)0x0C)
#define HCI_EV_QOS_SETUP_COMPLETE ((u8)0x0D)
#define HCI_EV_COMMAND_COMPLETE ((u8)0x0E)
#define HCI_EV_COMMAND_STATUS ((u8)0x0F)
#define HCI_EV_HARDWARE_ERROR ((u8)0x10)
#define HCI_EV_FLUSH_OCCURRED ((u8)0x11)
#define HCI_EV_ROLE_CHANGE ((u8)0x12)
#define HCI_EV_NUMBER_COMPLETED_PKTS ((u8)0x13)
#define HCI_EV_MODE_CHANGE ((u8)0x14)
#define HCI_EV_RETURN_LINK_KEYS ((u8)0x15)
#define HCI_EV_PIN_CODE_REQ ((u8)0x16)
#define HCI_EV_LINK_KEY_REQ ((u8)0x17)
#define HCI_EV_LINK_KEY_NOTIFICATION ((u8)0x18)
#define HCI_EV_LOOPBACK_COMMAND ((u8)0x19)
#define HCI_EV_DATA_BUFFER_OVERFLOW ((u8)0x1A)
#define HCI_EV_MAX_SLOTS_CHANGE ((u8)0x1B)
#define HCI_EV_READ_CLOCK_OFFSET_COMPLETE ((u8)0x1C)
#define HCI_EV_CONN_PACKET_TYPE_CHANGED ((u8)0x1D)
#define HCI_EV_QOS_VIOLATION ((u8)0x1E)
#define HCI_EV_PAGE_SCAN_MODE_CHANGE ((u8)0x1F)
#define HCI_EV_PAGE_SCAN_REP_MODE_CHANGE ((u8)0x20)
/* 1.2 Events */
#define HCI_EV_FLOW_SPEC_COMPLETE ((u8)0x21)
#define HCI_EV_INQUIRY_RESULT_WITH_RSSI ((u8)0x22)
#define HCI_EV_READ_REM_EXT_FEATURES_COMPLETE ((u8)0x23)
#define HCI_EV_FIXED_ADDRESS ((u8)0x24)
#define HCI_EV_ALIAS_ADDRESS ((u8)0x25)
#define HCI_EV_GENERATE_ALIAS_REQ ((u8)0x26)
#define HCI_EV_ACTIVE_ADDRESS ((u8)0x27)
#define HCI_EV_ALLOW_PRIVATE_PAIRING ((u8)0x28)
#define HCI_EV_ALIAS_ADDRESS_REQ ((u8)0x29)
#define HCI_EV_ALIAS_NOT_RECOGNISED ((u8)0x2A)
#define HCI_EV_FIXED_ADDRESS_ATTEMPT ((u8)0x2B)
#define HCI_EV_SYNC_CONN_COMPLETE ((u8)0x2C)
#define HCI_EV_SYNC_CONN_CHANGED ((u8)0x2D)
/* 2.1 Events */
#define HCI_EV_SNIFF_SUB_RATE ((u8)0x2E)
#define HCI_EV_EXTENDED_INQUIRY_RESULT ((u8)0x2F)
#define HCI_EV_ENCRYPTION_KEY_REFRESH_COMPLETE ((u8)0x30)
#define HCI_EV_IO_CAPABILITY_REQUEST ((u8)0x31)
#define HCI_EV_IO_CAPABILITY_RESPONSE ((u8)0x32)
#define HCI_EV_USER_CONFIRMATION_REQUEST ((u8)0x33)
#define HCI_EV_USER_PASSKEY_REQUEST ((u8)0x34)
#define HCI_EV_REMOTE_OOB_DATA_REQUEST ((u8)0x35)
#define HCI_EV_SIMPLE_PAIRING_COMPLETE ((u8)0x36)
#define HCI_EV_LST_CHANGE ((u8)0x38)
#define HCI_EV_ENHANCED_FLUSH_COMPLETE ((u8)0x39)
#define HCI_EV_USER_PASSKEY_NOTIFICATION ((u8)0x3B)
#define HCI_EV_KEYPRESS_NOTIFICATION ((u8)0x3C)
#define HCI_EV_REM_HOST_SUPPORTED_FEATURES ((u8)0x3D)
#define HCI_EV_ULP ((u8)0x3E)
/* TCC + CSB Events */
#define HCI_EV_TRIGGERED_CLOCK_CAPTURE ((u8)0x4E)
#define HCI_EV_SYNCHRONIZATION_TRAIN_COMPLETE ((u8)0x4F)
#define HCI_EV_SYNCHRONIZATION_TRAIN_RECEIVED ((u8)0x50)
#define HCI_EV_CSB_RECEIVE ((u8)0x51)
#define HCI_EV_CSB_TIMEOUT ((u8)0x52)
#define HCI_EV_TRUNCATED_PAGE_COMPLETE ((u8)0x53)
#define HCI_EV_SLAVE_PAGE_RESPONSE_TIMEOUT ((u8)0x54)
#define HCI_EV_CSB_CHANNEL_MAP_CHANGE ((u8)0x55)
#define HCI_EV_INQUIRY_RESPONSE_NOTIFICATION ((u8)0x56)
/* 4.1 Events */
#define HCI_EV_AUTHENTICATED_PAYLOAD_TIMEOUT_EXPIRED ((u8)0x57)
/* ULP Sub-opcodes */
#define HCI_EV_ULP_CONNECTION_COMPLETE ((u8)0x01)
#define HCI_EV_ULP_ADVERTISING_REPORT ((u8)0x02)
#define HCI_EV_ULP_CONNECTION_UPDATE_COMPLETE ((u8)0x03)
#define HCI_EV_ULP_READ_REMOTE_USED_FEATURES_COMPLETE ((u8)0x04)
#define HCI_EV_ULP_LONG_TERM_KEY_REQUEST ((u8)0x05)
#define HCI_EV_ULP_REMOTE_CONNECTION_PARAMETER_REQUEST ((u8)0x06)
#define HCI_EV_ULP_DATA_LENGTH_CHANGE ((u8)0x07)
#define HCI_EV_ULP_READ_LOCAL_P256_PUB_KEY_COMPLETE ((u8)0x08)
#define HCI_EV_ULP_GENERATE_DHKEY_COMPLETE ((u8)0x09)
#define HCI_EV_ULP_ENHANCED_CONNECTION_COMPLETE ((u8)0x0A)
#define HCI_EV_ULP_DIRECT_ADVERTISING_REPORT ((u8)0x0B)
#define HCI_EV_ULP_PHY_UPDATE_COMPLETE ((u8)0x0C)
/* The subevent code of ULP_USED_CHANNEL_SELECTION_EVENT shall be updated
when it is defined in the spec.
Assign it as 0x0D temporarily. */
#define HCI_EV_ULP_USED_CHANNEL_SELECTION ((u8)0x0D)
#define HCI_CMD_DECODE(entry) case entry: ret = #entry; break
#define HCI_EV_DECODE(entry) case entry: ret = #entry; break
#endif /* __SCSC_BT_HCI_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,240 @@
/****************************************************************************
*
* Internal BT driver definitions
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd
*
****************************************************************************/
#ifndef __SCSC_BT_PRIV_H
#define __SCSC_BT_PRIV_H
#include <scsc/scsc_mx.h>
#include <scsc/api/bsmhcp.h>
#include <scsc/api/bhcs.h>
#include "scsc_shm.h"
#ifndef UNUSED
#define UNUSED(x) ((void)(x))
#endif
/**
* Size of temporary buffer (on stack) for peeking at HCI/H4
* packet header held in FIFO.
*
* Must be big enough to decode the
* length of any HCI packet type.
*
* For ACL that is 1 h4 header + 2 ACL handle + 2 ACL data size
*/
#define H4DMUX_HEADER_HCI (1 + 3) /* CMD, SCO */
#define H4DMUX_HEADER_ACL (1 + 4) /* ACL */
#define HCI_COMMAND_PKT (1)
#define HCI_ACLDATA_PKT (2)
#define HCI_EVENT_PKT (4)
#define ACLDATA_HEADER_SIZE (4)
#define L2CAP_HEADER_SIZE (4)
#define HCI_ACL_DATA_FLAGS(data) ((*(data + 1)) & 0xf0)
#define HCI_ACL_DATA_CON_HDL(data) ((u16)(*(data + 0) | ((*(data + 1)) & 0x0f) << 8))
#define HCI_ACL_DATA_LENGTH(data) ((u16)(*(data + 2) | (*(data + 3)) << 8))
#define HCI_L2CAP_LENGTH(data) ((u16)(*(data + 4) | (*(data + 5)) << 8))
#define HCI_L2CAP_CID(data) ((u16)(*(data + 6) | (*(data + 7)) << 8))
#define HCI_EVENT_NUMBER_OF_COMPLETED_PACKETS_EVENT (0x13)
#define HCI_EVENT_HARDWARE_ERROR_EVENT (0x10)
#define SCSC_BT_CONF "bluetooth/bt.hcf"
#define SCSC_BT_ADDR "/efs/bluetooth/bt_addr"
#define SCSC_BT_ADDR_LEN (6)
#define SCSC_H4_DEVICE_NAME "scsc_h4_0"
#define SCSC_BT_CONNECTION_INFO_MAX (0x1000)
#define SCSC_TTY_MINORS (8)
enum scsc_bt_shm_thread_flags;
enum scsc_bt_read_op {
BT_READ_OP_NONE,
BT_READ_OP_HCI_EVT,
BT_READ_OP_HCI_EVT_ERROR,
BT_READ_OP_ACL_DATA,
BT_READ_OP_ACL_CREDIT,
BT_READ_OP_STOP
};
struct scsc_bt_connection_info {
u8 state;
u16 length;
u16 l2cap_cid;
};
#define CONNECTION_NONE (0)
#define CONNECTION_ACTIVE (1)
#define CONNECTION_DISCONNECTED (2)
enum bt_link_type_enum {
BT_LINK_TYPE_SCO = 0,
BT_LINK_TYPE_ACL = 1,
BT_LINK_TYPE_SETUP_ID = 2,
BT_LINK_TYPE_SETUP_FHS = 3,
BT_LINK_TYPE_ESCO = 4,
BT_LINK_TYPE_ACL_23 = 5,
BT_LINK_TYPE_ESCO_23 = 6,
BT_LINK_TYPE_ANTPLUS = 7,
MAX_BT_LINK_TYPE = 7
};
struct scsc_bt_service {
dev_t device;
struct class *class;
struct scsc_mx *maxwell_core;
struct scsc_service *service;
struct device *dev;
struct cdev h4_cdev;
struct device *h4_device;
struct file *h4_file;
bool h4_users;
atomic_t h4_readers;
atomic_t h4_writers;
size_t h4_write_offset;
atomic_t error_count;
atomic_t service_users;
u8 *debug_ptr;
wait_queue_head_t debug_wait;
struct task_struct *debug_thread;
scsc_mifram_ref debug_output_ref; /* Bluetooth debug output reference */
struct completion debug_thread_complete;
bool debug_terminate;
scsc_mifram_ref bhcs_ref; /* Bluetooth host configuration service reference */
scsc_mifram_ref bsmhcp_ref; /* Bluetooth shared memory host controller protocol reference */
scsc_mifram_ref config_ref; /* Bluetooth configuration reference */
struct BSMHCP_PROTOCOL *bsmhcp_protocol; /* Bluetooth shared memory host controller protocol pointer */
size_t read_offset;
enum scsc_bt_read_op read_operation;
u32 read_index;
wait_queue_head_t read_wait;
wait_queue_head_t info_wait;
int last_alloc; /* Cached previous alloc index to aid search */
u8 allocated[BSMHCP_DATA_BUFFER_TX_ACL_SIZE];
u32 allocated_count;
u32 freed_count;
bool processed[BSMHCP_TRANSFER_RING_EVT_SIZE];
struct scsc_bt_connection_info connection_handle_list[SCSC_BT_CONNECTION_INFO_MAX];
bool hci_event_paused;
bool acldata_paused;
struct wake_lock read_wake_lock;
struct wake_lock write_wake_lock;
struct wake_lock service_wake_lock;
size_t write_wake_lock_count;
size_t write_wake_unlock_count;
size_t interrupt_count;
size_t interrupt_read_count;
size_t interrupt_write_count;
size_t interrupt_debug_count;
u32 mailbox_hci_evt_read;
u32 mailbox_hci_evt_write;
u32 mailbox_acl_rx_read;
u32 mailbox_acl_rx_write;
u32 mailbox_acl_free_read;
u32 mailbox_acl_free_read_scan;
u32 mailbox_acl_free_write;
u32 mailbox_debug_read;
u32 mailbox_debug_write;
/* NOTE! The naming takes the perspective of the local device (==src), as opposed to the L2CAP
* spec, which names the sender of the current signal as the src. */
u16 avdtp_signaling_src_cid;
u16 avdtp_signaling_dst_cid;
u16 avdtp_streaming_src_cid;
u16 avdtp_streaming_dst_cid;
u16 avdtp_hci_connection_handle;
struct completion recovery_release_complete;
struct completion recovery_probe_complete;
};
extern struct scsc_bt_service bt_service;
void scsc_bt_shm_debug(void);
/* Coex avdtp detection */
/* The buffers passed for inspection begin at the L2CAP basic header, as does the length
* passed in the function calls */
#define AVDTP_DETECT_MIN_DATA_LENGTH (12) /* We always want to look for the SRC CID */
#define AVDTP_DETECT_MIN_DATA_LENGTH_CON_RSP (16) /* For CON RSP, we want the result, too */
#define AVDTP_DETECT_MIN_AVDTP_LENGTH (6) /* Basic L2CAP header + 2 AVDTP octets as min */
#define HCI_ACL_PACKET_BOUNDARY_START_FLUSH (2)
/* Can't use HCI_L2CAP_CID(data), since that assumes 4 bytes of HCI header, which has been stripped
* for the calls to the avdtp detection functions */
#define HCI_L2CAP_RX_CID(data) ((u16)(*(data + 2) | (*(data + 3)) << 8))
#define HCI_L2CAP_CODE(data) ((u8)(*(data + 4)))
#define HCI_L2CAP_CON_REQ_PSM(data) ((u16)(*(data + 8) | (*(data + 9)) << 8))
/* Valid for at least connection request/response and disconnection request */
#define HCI_L2CAP_SOURCE_CID(data) ((u16)(*(data + 10) | (*(data + 11)) << 8))
/* Valid for at least connection and disconnection responses */
#define HCI_L2CAP_RSP_DEST_CID(data) ((u16)(*(data + 8) | (*(data + 9)) << 8))
#define HCI_L2CAP_CON_RSP_RESULT(data) ((u16)(*(data + 12) | (*(data + 13)) << 8))
#define HCI_L2CAP_CON_RSP_RESULT_SUCCESS (0)
#define L2CAP_AVDTP_PSM 0x0019
#define L2CAP_SIGNALING_CID 0x0001
#define L2CAP_CODE_CONNECT_REQ 0x02
#define L2CAP_CODE_CONNECT_RSP 0x03
#define L2CAP_CODE_DISCONNECT_REQ 0x06
#define L2CAP_CODE_DISCONNECT_RSP 0x07
#define AVDTP_MESSAGE_TYPE_OFFSET 4 /* Assuming only single packet type */
#define AVDTP_MESSAGE_TYPE_MASK 0x03
#define AVDTP_MESSAGE_TYPE(data) ((u8)(*(data + AVDTP_MESSAGE_TYPE_OFFSET)) & AVDTP_MESSAGE_TYPE_MASK)
#define AVDTP_MESSAGE_TYPE_RSP_ACCEPT 0x02
#define AVDTP_SIGNAL_ID_OFFSET 5 /* Assuming only single packet type */
#define AVDTP_SIGNAL_ID_MASK 0x1F
#define AVDTP_SIGNAL_ID(data) ((u8)(*(data + AVDTP_SIGNAL_ID_OFFSET)) & AVDTP_SIGNAL_ID_MASK)
#define AVDTP_SIGNAL_ID_START 0x07
#define AVDTP_SIGNAL_ID_CLOSE 0x08
#define AVDTP_SIGNAL_ID_SUSPEND 0x09
#define AVDTP_SIGNAL_ID_ABORT 0x0A
extern uint16_t avdtp_signaling_src_cid;
extern uint16_t avdtp_signaling_dst_cid;
extern uint16_t avdtp_streaming_src_cid;
extern uint16_t avdtp_streaming_dst_cid;
extern uint16_t avdtp_hci_connection_handle;
#define AVDTP_DETECT_SIGNALING_IGNORE 0
#define AVDTP_DETECT_SIGNALING_ACTIVE 1
#define AVDTP_DETECT_SIGNALING_INACTIVE 2
bool scsc_avdtp_detect_connection_tx(uint16_t hci_connection_handle, const unsigned char *data, uint16_t length);
bool scsc_avdtp_detect_connection_rx(uint16_t hci_connection_handle, const unsigned char *data, uint16_t length);
uint8_t scsc_avdtp_detect_signaling_tx(const unsigned char *data);
uint8_t scsc_avdtp_detect_signaling_rx(const unsigned char *data);
void scsc_avdtp_detect_tx(u16 hci_connection_handle, const unsigned char *data, uint16_t length);
void scsc_avdtp_detect_rx(u16 hci_connection_handle, const unsigned char *data, uint16_t length);
void scsc_bt_dump_driver_state(void);
const char *scsc_hci_evt_decode_event_code(u8 *data);
#endif /* __SCSC_BT_PRIV_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,27 @@
/****************************************************************************
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd
*
****************************************************************************/
/* Shared memory interface API */
#ifndef __SCSC_SHM_H__
#define __SCSC_SHM_H__
#include <scsc/api/bsmhcp.h>
int scsc_bt_shm_init(u32 filter);
void scsc_bt_shm_stop_thread(void);
void scsc_bt_shm_exit(void);
ssize_t scsc_bt_shm_h4_read(struct file *file,
char __user *buf,
size_t len,
loff_t *offset);
ssize_t scsc_bt_shm_h4_write(struct file *file,
const char __user *buf,
size_t len, loff_t *offset);
unsigned scsc_bt_shm_h4_poll(struct file *file, poll_table *wait);
#endif