mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
1317 lines
41 KiB
C
1317 lines
41 KiB
C
/****************************************************************************
|
|
*
|
|
* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
|
|
*
|
|
****************************************************************************/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/firmware.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/version.h>
|
|
#include "scsc_mx_impl.h"
|
|
#include "miframman.h"
|
|
#include "mifmboxman.h"
|
|
#include "mxman.h"
|
|
#include "srvman.h"
|
|
#include "mxmgmt_transport.h"
|
|
#include "gdb_transport.h"
|
|
#include "mxconf.h"
|
|
#include "fwimage.h"
|
|
#include "fwhdr.h"
|
|
#include "mxlog.h"
|
|
#include "fw_panic_record.h"
|
|
#include "mxproc.h"
|
|
#include "mxlog_transport.h"
|
|
#include <scsc/kic/slsi_kic_lib.h>
|
|
#include <scsc/scsc_release.h>
|
|
#include "scsc_mx.h"
|
|
#include <linux/fs.h>
|
|
|
|
#include <scsc/scsc_logring.h>
|
|
|
|
#define STRING_BUFFER_MAX_LENGTH 128
|
|
#define NUMBER_OF_STRING_ARGS 1
|
|
#define MX_DRAM_SIZE (4 * 1024 * 1024)
|
|
#define MX_FW_RUNTIME_LENGTH (1024 * 1024)
|
|
#define WAIT_FOR_FW_TO_START_DELAY_MS 1000
|
|
#define MBOX2_MAGIC_NUMBER 0xbcdeedcb
|
|
#define MBOX_INDEX_0 0
|
|
#define MBOX_INDEX_1 1
|
|
#define MBOX_INDEX_2 2
|
|
#define MBOX_INDEX_3 3
|
|
#define MBOX_INDEX_4 4
|
|
#define MBOX_INDEX_5 5
|
|
#define MBOX_INDEX_6 6
|
|
#define MBOX_INDEX_7 7
|
|
|
|
static bool allow_unidentified_firmware;
|
|
module_param(allow_unidentified_firmware, bool, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(allow_unidentified_firmware, "Allow unidentified firmware");
|
|
|
|
static bool skip_header;
|
|
module_param(skip_header, bool, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(skip_header, "Skip header, assuming unidentified firmware");
|
|
|
|
static bool crc_check_allow_none = true;
|
|
module_param(crc_check_allow_none, bool, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(crc_check_allow_none, "Allow skipping firmware CRC checks if CRC is not present");
|
|
|
|
static int crc_check_period_ms = 30000;
|
|
module_param(crc_check_period_ms, int, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(crc_check_period_ms, "Time period for checking the firmware CRCs");
|
|
|
|
static ulong mm_completion_timeout_ms = 2000;
|
|
module_param(mm_completion_timeout_ms, ulong, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(mm_completion_timeout_ms, "Timeout wait_for_mm_msg_start_ind (ms) - default 1000. 0 = infinite");
|
|
|
|
static bool skip_mbox0_check;
|
|
module_param(skip_mbox0_check, bool, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(skip_mbox0_check, "Allow skipping firmware mbox0 signature check");
|
|
|
|
static uint mif_access_max_time_ms = 100;
|
|
module_param(mif_access_max_time_ms, uint, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(mif_access_max_time_ms, "Timeout send_mm_msg_stop_blocking (ms) - default 100");
|
|
|
|
static uint firmware_startup_flags;
|
|
module_param(firmware_startup_flags, uint, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(firmware_startup_flags, "0 = Proceed as normal (default); Bit 0 = 1 - spin at start of CRT0; Other bits reserved = 0");
|
|
|
|
#ifdef CONFIG_SCSC_CHV_SUPPORT
|
|
/* First arg controls chv function */
|
|
int chv_run;
|
|
module_param(chv_run, int, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(chv_run, "Run chv f/w: 0 = feature disabled, 1 = for continuous checking, 2 = 1 shot, anything else, undefined");
|
|
|
|
/* Optional array of args for firmware to interpret when chv_run = 1 */
|
|
static unsigned int chv_argv[32];
|
|
static int chv_argc;
|
|
|
|
module_param_array(chv_argv, uint, &chv_argc, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(chv_argv, "Array of up to 32 x u32 args for the CHV firmware when chv_run = 1");
|
|
#endif
|
|
|
|
static bool disable_auto_coredump;
|
|
module_param(disable_auto_coredump, bool, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(disable_auto_coredump, "Disable driver automatic coredump");
|
|
|
|
static bool disable_error_handling;
|
|
module_param(disable_error_handling, bool, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(disable_error_handling, "Disable error handling");
|
|
|
|
static bool disable_recovery_handling;
|
|
module_param(disable_recovery_handling, bool, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(disable_recovery_handling, "Disable recovery handling");
|
|
|
|
static uint panic_record_delay = 1;
|
|
module_param(panic_record_delay, uint, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(panic_record_delay, "Delay in ms before accessing the panic record");
|
|
|
|
static bool disable_logger = true;
|
|
module_param(disable_logger, bool, S_IRUGO | S_IWUSR);
|
|
MODULE_PARM_DESC(disable_logger, "Disable launch of user space logger");
|
|
|
|
/**
|
|
* Maxwell Agent Management Messages.
|
|
*
|
|
* TODO: common defn with firmware, generated.
|
|
*
|
|
* The numbers here *must* match the firmware!
|
|
*/
|
|
enum {
|
|
MM_START_IND = 0,
|
|
MM_HALT_REQ = 1,
|
|
MM_FORCE_PANIC = 2,
|
|
MM_HOST_SUSPEND = 3,
|
|
MM_HOST_RESUME = 4
|
|
} ma_msg;
|
|
|
|
|
|
/**
|
|
* Format of the Maxwell agent messages
|
|
* on the Maxwell management transport stream.
|
|
*/
|
|
struct ma_msg_packet {
|
|
|
|
uint8_t ma_msg; /* Message from ma_msg enum */
|
|
uint32_t arg; /* Optional arg set by f/w in some to-host messages */
|
|
} __packed;
|
|
|
|
static void mxman_stop(struct mxman *mxman);
|
|
static void print_mailboxes(struct mxman *mxman);
|
|
static int _mx_exec(char *prog, int wait_exec);
|
|
|
|
#ifndef MAXWELL_SKIP_MANAGER
|
|
static int wait_for_mm_msg_start_ind(struct mxman *mxman)
|
|
{
|
|
int r;
|
|
|
|
if (0 == mm_completion_timeout_ms) {
|
|
/* Zero implies infinite wait */
|
|
r = wait_for_completion_interruptible(&mxman->mm_msg_start_ind_completion);
|
|
/* r = -ERESTARTSYS if interrupted, 0 if completed */
|
|
return r;
|
|
}
|
|
r = wait_for_completion_timeout(&mxman->mm_msg_start_ind_completion, msecs_to_jiffies(mm_completion_timeout_ms));
|
|
if (r == 0) {
|
|
SCSC_TAG_ERR(MXMAN, "timeout\n");
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static int coredump_helper(void)
|
|
{
|
|
int r;
|
|
int i;
|
|
|
|
slsi_kic_system_event(slsi_kic_system_event_category_recovery,
|
|
slsi_kic_system_events_coredump_in_progress, GFP_KERNEL);
|
|
|
|
for (i = 0; i < 20; i++) {
|
|
r = _mx_exec("/system/bin/moredump", UMH_WAIT_PROC);
|
|
if (r != -EBUSY)
|
|
break;
|
|
|
|
/* If the usermode helper fails with -EBUSY, the userspace is
|
|
* likely still frozen from suspend. Back off and retry.
|
|
*/
|
|
SCSC_TAG_INFO(MXMAN, "waiting for userspace to thaw...\n");
|
|
msleep(1000);
|
|
}
|
|
|
|
/* Application return codes are in the MSB */
|
|
if (r > 0xffL)
|
|
SCSC_TAG_INFO(MXMAN, "moredump.bin exit(%ld), check syslog\n", (r & 0xff00L) >> 8);
|
|
|
|
if (r >= 0) {
|
|
slsi_kic_system_event(slsi_kic_system_event_category_recovery,
|
|
slsi_kic_system_events_coredump_done, GFP_KERNEL);
|
|
}
|
|
|
|
|
|
return r;
|
|
}
|
|
|
|
static int send_mm_msg_stop_blocking(struct mxman *mxman)
|
|
{
|
|
struct ma_msg_packet message = { .ma_msg = MM_HALT_REQ };
|
|
|
|
mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message, sizeof(message));
|
|
msleep(mif_access_max_time_ms);
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifndef MAXWELL_SKIP_MANAGER
|
|
/** Receive handler for messages from the FW along the maxwell management transport */
|
|
static void mxman_message_handler(const void *message, void *data)
|
|
{
|
|
struct mxman *mxman = (struct mxman *)data;
|
|
|
|
/* Forward the message to the applicable service to deal with */
|
|
const struct ma_msg_packet *msg = message;
|
|
|
|
switch (msg->ma_msg) {
|
|
case MM_START_IND:
|
|
/* The arg can be used to determine the WLBT/S610 hardware revision */
|
|
SCSC_TAG_INFO(MXMAN, "Received MM_START_IND message from the firmware, arg=0x%04x\n", msg->arg);
|
|
mxman->rf_hw_ver = msg->arg;
|
|
complete(&mxman->mm_msg_start_ind_completion);
|
|
break;
|
|
default:
|
|
/* HERE: Unknown message, raise fault */
|
|
SCSC_TAG_WARNING(MXMAN, "Received unknown message from the firmware: msg->ma_msg=%d\n", msg->ma_msg);
|
|
break;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* This function calulates and checks two or three (depending on crc32_over_binary flag)
|
|
* crc32 values in the firmware header. The function will check crc32 over the firmware binary
|
|
* (i.e. everything in the file following the header) only if the crc32_over_binary is set to 'true'.
|
|
* This includes initialised data regions so it can be used to check when loading but will not be
|
|
* meaningful once execution starts.
|
|
*/
|
|
static int do_fw_crc32_checks(char *fw, u32 fw_image_size, struct fwhdr *fwhdr, bool crc32_over_binary)
|
|
{
|
|
int r;
|
|
|
|
if ((fwhdr->fw_crc32 == 0 || fwhdr->header_crc32 == 0 || fwhdr->const_crc32 == 0) && crc_check_allow_none == 0) {
|
|
SCSC_TAG_ERR(MXMAN, "error: CRC is missing fw_crc32=%d header_crc32=%d crc_check_allow_none=%d\n",
|
|
fwhdr->fw_crc32, fwhdr->header_crc32, crc_check_allow_none);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (fwhdr->header_crc32 == 0 && crc_check_allow_none == 1) {
|
|
SCSC_TAG_INFO(MXMAN, "Skipping CRC check header_crc32=%d crc_check_allow_none=%d\n",
|
|
fwhdr->header_crc32, crc_check_allow_none);
|
|
} else {
|
|
/*
|
|
* CRC-32-IEEE of all preceding header fields (including other CRCs).
|
|
* Always the last word in the header.
|
|
*/
|
|
r = fwimage_check_fw_header_crc(fw, fwhdr->hdr_length, fwhdr->header_crc32);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "fwimage_check_fw_header_crc() failed\n");
|
|
return r;
|
|
}
|
|
}
|
|
|
|
if (fwhdr->const_crc32 == 0 && crc_check_allow_none == 1) {
|
|
SCSC_TAG_INFO(MXMAN, "Skipping CRC check const_crc32=%d crc_check_allow_none=%d\n",
|
|
fwhdr->const_crc32, crc_check_allow_none);
|
|
} else {
|
|
/*
|
|
* CRC-32-IEEE over the constant sections grouped together at start of firmware binary.
|
|
* This CRC should remain valid during execution. It can be used by run-time checker on
|
|
* host to detect firmware corruption (not all memory masters are subject to MPUs).
|
|
*/
|
|
r = fwimage_check_fw_const_section_crc(fw, fwhdr->const_crc32, fwhdr->const_fw_length, fwhdr->hdr_length);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "fwimage_check_fw_const_section_crc() failed\n");
|
|
return r;
|
|
}
|
|
}
|
|
|
|
if (crc32_over_binary) {
|
|
if (fwhdr->fw_crc32 == 0 && crc_check_allow_none == 1)
|
|
SCSC_TAG_INFO(MXMAN, "Skipping CRC check fw_crc32=%d crc_check_allow_none=%d\n",
|
|
fwhdr->fw_crc32, crc_check_allow_none);
|
|
else {
|
|
/*
|
|
* CRC-32-IEEE over the firmware binary (i.e. everything
|
|
* in the file following this header).
|
|
* This includes initialised data regions so it can be used to
|
|
* check when loading but will not be meaningful once execution starts.
|
|
*/
|
|
r = fwimage_check_fw_crc(fw, fw_image_size, fwhdr->hdr_length, fwhdr->fw_crc32);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "fwimage_check_fw_crc() failed\n");
|
|
return r;
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
static void fw_crc_wq_start(struct mxman *mxman)
|
|
{
|
|
if (mxman->check_crc && crc_check_period_ms)
|
|
queue_delayed_work(mxman->fw_crc_wq, &mxman->fw_crc_work, msecs_to_jiffies(crc_check_period_ms));
|
|
}
|
|
|
|
|
|
static void fw_crc_work_func(struct work_struct *work)
|
|
{
|
|
int r;
|
|
struct mxman *mxman = container_of((struct delayed_work *)work, struct mxman, fw_crc_work);
|
|
|
|
r = do_fw_crc32_checks(mxman->fw, mxman->fw_image_size, &mxman->fwhdr, false);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "do_fw_crc32_checks() failed r=%d\n", r);
|
|
mxman_fail(mxman, SCSC_PANIC_CODE_HOST << 15);
|
|
return;
|
|
}
|
|
fw_crc_wq_start(mxman);
|
|
}
|
|
|
|
|
|
static void fw_crc_wq_init(struct mxman *mxman)
|
|
{
|
|
mxman->fw_crc_wq = create_singlethread_workqueue("fw_crc_wq");
|
|
INIT_DELAYED_WORK(&mxman->fw_crc_work, fw_crc_work_func);
|
|
}
|
|
|
|
static void fw_crc_wq_stop(struct mxman *mxman)
|
|
{
|
|
mxman->check_crc = false;
|
|
cancel_delayed_work(&mxman->fw_crc_work);
|
|
flush_workqueue(mxman->fw_crc_wq);
|
|
}
|
|
|
|
static void fw_crc_wq_deinit(struct mxman *mxman)
|
|
{
|
|
fw_crc_wq_stop(mxman);
|
|
destroy_workqueue(mxman->fw_crc_wq);
|
|
}
|
|
|
|
static int transports_init(struct mxman *mxman)
|
|
{
|
|
struct mxconf *mxconf;
|
|
int r;
|
|
struct scsc_mx *mx = mxman->mx;
|
|
|
|
/* Initialise mx management stack */
|
|
r = mxmgmt_transport_init(scsc_mx_get_mxmgmt_transport(mx), mx);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "mxmgmt_transport_init() failed\n");
|
|
return r;
|
|
}
|
|
|
|
/* Initialise gdb transport for cortex-R4 */
|
|
r = gdb_transport_init(scsc_mx_get_gdb_transport_r4(mx), mx, GDB_TRANSPORT_R4);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "gdb_transport_init() failed\n");
|
|
mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mx));
|
|
return r;
|
|
}
|
|
|
|
/* Initialise gdb transport for cortex-M4 */
|
|
r = gdb_transport_init(scsc_mx_get_gdb_transport_m4(mx), mx, GDB_TRANSPORT_M4);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "gdb_transport_init() failed\n");
|
|
gdb_transport_release(scsc_mx_get_gdb_transport_r4(mx));
|
|
mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mx));
|
|
return r;
|
|
}
|
|
/* Initialise mxlog transport */
|
|
r = mxlog_transport_init(scsc_mx_get_mxlog_transport(mx), mx);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "mxlog_transport_init() failed\n");
|
|
gdb_transport_release(scsc_mx_get_gdb_transport_m4(mx));
|
|
gdb_transport_release(scsc_mx_get_gdb_transport_r4(mx));
|
|
mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mx));
|
|
return r;
|
|
}
|
|
|
|
/*
|
|
* Allocate & Initialise Infrastructre Config Structure
|
|
* including the mx management stack config information.
|
|
*/
|
|
mxconf = miframman_alloc(scsc_mx_get_ramman(mx), sizeof(struct mxconf), 4);
|
|
if (!mxconf) {
|
|
SCSC_TAG_ERR(MXMAN, "miframman_alloc() failed\n");
|
|
gdb_transport_release(scsc_mx_get_gdb_transport_m4(mx));
|
|
gdb_transport_release(scsc_mx_get_gdb_transport_r4(mx));
|
|
mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mx));
|
|
mxlog_transport_release(scsc_mx_get_mxlog_transport(mx));
|
|
return -ENOMEM;
|
|
}
|
|
mxman->mxconf = mxconf;
|
|
mxconf->magic = MXCONF_MAGIC;
|
|
mxconf->version.major = MXCONF_VERSION_MAJOR;
|
|
mxconf->version.minor = MXCONF_VERSION_MINOR;
|
|
/* serialise mxmgmt transport */
|
|
mxmgmt_transport_config_serialise(scsc_mx_get_mxmgmt_transport(mx), &mxconf->mx_trans_conf);
|
|
/* serialise Cortex-R4 gdb transport */
|
|
gdb_transport_config_serialise(scsc_mx_get_gdb_transport_r4(mx), &mxconf->mx_trans_conf_gdb_r4);
|
|
/* serialise Cortex-M4 gdb transport */
|
|
gdb_transport_config_serialise(scsc_mx_get_gdb_transport_m4(mx), &mxconf->mx_trans_conf_gdb_m4);
|
|
/* serialise mxlog transport */
|
|
mxlog_transport_config_serialise(scsc_mx_get_mxlog_transport(mx), &mxconf->mxlogconf);
|
|
SCSC_TAG_DEBUG(MXMAN, "read_bit_idx=%d write_bit_idx=%d buffer=%p num_packets=%d packet_size=%d read_index=%d write_index=%d\n",
|
|
scsc_mx_get_mxlog_transport(mx)->mif_stream.read_bit_idx,
|
|
scsc_mx_get_mxlog_transport(mx)->mif_stream.write_bit_idx,
|
|
scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.buffer,
|
|
scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.num_packets,
|
|
scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.packet_size,
|
|
*scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.read_index,
|
|
*scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.write_index
|
|
);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void transports_release(struct mxman *mxman)
|
|
{
|
|
#ifndef MAXWELL_SKIP_MANAGER
|
|
mxlog_transport_release(scsc_mx_get_mxlog_transport(mxman->mx));
|
|
mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mxman->mx));
|
|
gdb_transport_release(scsc_mx_get_gdb_transport_r4(mxman->mx));
|
|
gdb_transport_release(scsc_mx_get_gdb_transport_m4(mxman->mx));
|
|
#endif
|
|
miframman_free(scsc_mx_get_ramman(mxman->mx), mxman->mxconf);
|
|
}
|
|
|
|
static void mbox_init(struct mxman *mxman, u32 firmware_entry_point)
|
|
{
|
|
u32 *mbox0;
|
|
u32 *mbox1;
|
|
u32 *mbox2;
|
|
u32 *mbox3;
|
|
scsc_mifram_ref mifram_ref;
|
|
struct scsc_mx *mx = mxman->mx;
|
|
struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(mxman->mx);
|
|
|
|
/* Place firmware entry address in MIF MBOX 0 so R4 ROM knows where to jump to! */
|
|
mbox0 = mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif, MBOX_INDEX_0);
|
|
mbox1 = mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif, MBOX_INDEX_1);
|
|
|
|
/* Write (and flush) entry point to MailBox 0, config address to MBOX 1 */
|
|
*mbox0 = firmware_entry_point;
|
|
mif->get_mifram_ref(mif, mxman->mxconf, &mifram_ref);
|
|
*mbox1 = mifram_ref; /* must be R4-relative address here */
|
|
/* CPU memory barrier */
|
|
wmb();
|
|
/*
|
|
* write the magic number "0xbcdeedcb" to MIF Mailbox #2 &
|
|
* copy the firmware_startup_flags to MIF Mailbox #3 before starting (reset = 0) the R4
|
|
*/
|
|
mbox2 = mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif, MBOX_INDEX_2);
|
|
*mbox2 = MBOX2_MAGIC_NUMBER;
|
|
mbox3 = mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif, MBOX_INDEX_3);
|
|
*mbox3 = firmware_startup_flags;
|
|
}
|
|
|
|
static int fwhdr_init(char *fw, struct fwhdr *fwhdr, bool *fwhdr_parsed_ok, bool *check_crc)
|
|
{
|
|
/*
|
|
* Validate the fw image including checking the firmware header, majic #, version, checksum so on
|
|
* then do CRC on the entire image
|
|
*
|
|
* Derive some values from header -
|
|
*
|
|
* PORT: assumes little endian
|
|
*/
|
|
if (skip_header)
|
|
*fwhdr_parsed_ok = false; /* Allows the forced start address to be used */
|
|
else
|
|
*fwhdr_parsed_ok = fwhdr_parse(fw, fwhdr);
|
|
*check_crc = false;
|
|
if (*fwhdr_parsed_ok) {
|
|
SCSC_TAG_INFO(MXMAN, "FW HEADER version: hdr_major: %d hdr_minor: %d\n", fwhdr->hdr_major, fwhdr->hdr_minor);
|
|
switch (fwhdr->hdr_major) {
|
|
case 0:
|
|
switch (fwhdr->hdr_minor) {
|
|
case 2:
|
|
*check_crc = true;
|
|
break;
|
|
default:
|
|
SCSC_TAG_ERR(MXMAN, "Unsupported FW HEADER version: hdr_major: %d hdr_minor: %d\n",
|
|
fwhdr->hdr_major, fwhdr->hdr_minor);
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case 1:
|
|
*check_crc = true;
|
|
break;
|
|
default:
|
|
SCSC_TAG_ERR(MXMAN, "Unsupported FW HEADER version: hdr_major: %d hdr_minor: %d\n",
|
|
fwhdr->hdr_major, fwhdr->hdr_minor);
|
|
return -EINVAL;
|
|
}
|
|
switch (fwhdr->fwapi_major) {
|
|
case 0:
|
|
switch (fwhdr->fwapi_minor) {
|
|
case 2:
|
|
SCSC_TAG_INFO(MXMAN, "FWAPI version: fwapi_major: %d fwapi_minor: %d\n",
|
|
fwhdr->fwapi_major, fwhdr->fwapi_minor);
|
|
break;
|
|
default:
|
|
SCSC_TAG_ERR(MXMAN, "Unsupported FWAPI version: fwapi_major: %d fwapi_minor: %d\n",
|
|
fwhdr->fwapi_major, fwhdr->fwapi_minor);
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
default:
|
|
SCSC_TAG_ERR(MXMAN, "Unsupported FWAPI version: fwapi_major: %d fwapi_minor: %d\n",
|
|
fwhdr->fwapi_major, fwhdr->fwapi_minor);
|
|
return -EINVAL;
|
|
}
|
|
} else {
|
|
/* This is unidetified pre-header firmware - assume it is built to run at 0xb8000000 == 0 for bootrom */
|
|
if (allow_unidentified_firmware) {
|
|
SCSC_TAG_INFO(MXMAN, "Unidentified firmware override\n");
|
|
fwhdr->firmware_entry_point = 0;
|
|
fwhdr->fw_runtime_length = MX_FW_RUNTIME_LENGTH;
|
|
} else {
|
|
SCSC_TAG_ERR(MXMAN, "Unidentified firmware is not allowed\n");
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static void write_mcd_test_fw_version_file(struct mxman *mxman)
|
|
{
|
|
struct file *fp = NULL;
|
|
char *filepath = "/data/.wifiver.info";
|
|
char buf[80];
|
|
char *build_id = 0;
|
|
|
|
if (mxman)
|
|
build_id = mxman->fw_build_id;
|
|
|
|
fp = filp_open(filepath, O_WRONLY|O_CREAT, 0644);
|
|
|
|
if (IS_ERR(fp)) {
|
|
pr_err("returned error %d\n", IS_ERR(fp));
|
|
return;
|
|
} else if (fp == NULL) {
|
|
pr_err("%s doesn't exist.\n", filepath);
|
|
return;
|
|
}
|
|
snprintf(buf, sizeof(buf), "drv_ver: %d.%d.%d (f/w: %s)\n",
|
|
SCSC_RELEASE_PRODUCT, SCSC_RELEASE_ITERATION, SCSC_RELEASE_CANDIDATE,
|
|
build_id ? build_id : "unknown");
|
|
|
|
kernel_write(fp, buf, strlen(buf), 0);
|
|
|
|
if (fp)
|
|
filp_close(fp, NULL);
|
|
}
|
|
|
|
static int fw_init(struct mxman *mxman, void *start_dram, size_t size_dram, bool *fwhdr_parsed_ok)
|
|
{
|
|
int r;
|
|
char *build_id;
|
|
u32 fw_image_size;
|
|
struct fwhdr *fwhdr = &mxman->fwhdr;
|
|
char *fw = start_dram;
|
|
|
|
r = mx140_file_download_fw(mxman->mx, start_dram, size_dram, &fw_image_size);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "mx140_file_download_fw() failed (%d)\n", r);
|
|
return r;
|
|
}
|
|
|
|
r = fwhdr_init(fw, fwhdr, fwhdr_parsed_ok, &mxman->check_crc);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "fwhdr_init() failed\n");
|
|
return r;
|
|
}
|
|
mxman->fw = fw;
|
|
mxman->fw_image_size = fw_image_size;
|
|
if (mxman->check_crc) {
|
|
/* do CRC on the entire image */
|
|
r = do_fw_crc32_checks(fw, fw_image_size, &mxman->fwhdr, true);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "do_fw_crc32_checks() failed\n");
|
|
return r;
|
|
}
|
|
fw_crc_wq_start(mxman);
|
|
}
|
|
|
|
if (*fwhdr_parsed_ok) {
|
|
build_id = fwhdr_get_build_id(fw, fwhdr);
|
|
if (build_id) {
|
|
struct slsi_kic_service_info kic_info;
|
|
|
|
(void)snprintf(mxman->fw_build_id, sizeof(mxman->fw_build_id), "%s", build_id);
|
|
SCSC_TAG_INFO(MXMAN, "Firmware BUILD_ID: %s\n", mxman->fw_build_id);
|
|
|
|
(void) snprintf(kic_info.ver_str,
|
|
min(sizeof(mxman->fw_build_id), sizeof(kic_info.ver_str)),
|
|
"%s", mxman->fw_build_id);
|
|
kic_info.fw_api_major = fwhdr->fwapi_major;
|
|
kic_info.fw_api_minor = fwhdr->fwapi_minor;
|
|
kic_info.release_product = SCSC_RELEASE_PRODUCT;
|
|
kic_info.host_release_iteration = SCSC_RELEASE_ITERATION;
|
|
kic_info.host_release_candidate = SCSC_RELEASE_CANDIDATE;
|
|
|
|
slsi_kic_service_information(slsi_kic_technology_type_common, &kic_info);
|
|
} else
|
|
SCSC_TAG_ERR(MXMAN, "Failed to get Firmware BUILD_ID\n");
|
|
}
|
|
|
|
SCSC_TAG_DEBUG(MXMAN, "firmware_entry_point=0x%x fw_runtime_length=%d\n", fwhdr->firmware_entry_point, fwhdr->fw_runtime_length);
|
|
|
|
/* write /data/.wifiver.info if MCD *#2633# is requested. */
|
|
write_mcd_test_fw_version_file(mxman);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static int mxman_start(struct mxman *mxman)
|
|
{
|
|
void *start_dram;
|
|
size_t size_dram = MX_DRAM_SIZE;
|
|
struct scsc_mif_abs *mif;
|
|
struct fwhdr *fwhdr = &mxman->fwhdr;
|
|
bool fwhdr_parsed_ok;
|
|
#ifndef MAXWELL_SKIP_MANAGER
|
|
void *start_mifram_heap;
|
|
u32 length_mifram_heap;
|
|
int r;
|
|
#endif
|
|
(void)snprintf(mxman->fw_build_id, sizeof(mxman->fw_build_id), "unknown");
|
|
|
|
/* If the option is set to skip header, we must allow unidentified f/w */
|
|
if (skip_header) {
|
|
SCSC_TAG_INFO(MXMAN, "Ignoring firmware header block\n");
|
|
allow_unidentified_firmware = true;
|
|
}
|
|
|
|
mif = scsc_mx_get_mif_abs(mxman->mx);
|
|
start_dram = mif->map(mif, &size_dram);
|
|
|
|
if (!start_dram) {
|
|
SCSC_TAG_ERR(MXMAN, "Error allocating dram\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
SCSC_TAG_DEBUG(MXMAN, "Allocated %zu bytes\n", size_dram);
|
|
|
|
#ifdef CONFIG_SCSC_CHV_SUPPORT
|
|
if (chv_run)
|
|
allow_unidentified_firmware = true;
|
|
/* Set up chv arguments. */
|
|
|
|
#endif
|
|
|
|
#ifndef MAXWELL_SKIP_MANAGER
|
|
mxman->start_dram = start_dram;
|
|
|
|
r = fw_init(mxman, start_dram, size_dram, &fwhdr_parsed_ok);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "fw_init() failed\n");
|
|
mif->unmap(mif, mxman->start_dram);
|
|
return r;
|
|
}
|
|
|
|
/* set up memory protection (read only) from start_dram to start_dram+fw_length
|
|
* rounding up the size if required
|
|
*/
|
|
start_mifram_heap = (char *)start_dram + fwhdr->fw_runtime_length;
|
|
length_mifram_heap = size_dram - fwhdr->fw_runtime_length;
|
|
|
|
miframman_init(scsc_mx_get_ramman(mxman->mx), start_mifram_heap, length_mifram_heap);
|
|
mifmboxman_init(scsc_mx_get_mboxman(mxman->mx));
|
|
mifintrbit_init(scsc_mx_get_intrbit(mxman->mx), mif);
|
|
#else
|
|
miframman_init(scsc_mx_get_ramman(mxman->mx), start_dram, size_dram);
|
|
#endif
|
|
|
|
#ifndef MAXWELL_SKIP_MANAGER
|
|
/* Initialise transports */
|
|
r = transports_init(mxman);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "transports_init() failed\n");
|
|
fw_crc_wq_stop(mxman);
|
|
mifintrbit_deinit(scsc_mx_get_intrbit(mxman->mx));
|
|
miframman_deinit(scsc_mx_get_ramman(mxman->mx));
|
|
mifmboxman_deinit(scsc_mx_get_mboxman(mxman->mx));
|
|
/* Release the MIF memory resources */
|
|
mif->unmap(mif, mxman->start_dram);
|
|
return r;
|
|
}
|
|
mbox_init(mxman, fwhdr->firmware_entry_point);
|
|
init_completion(&mxman->mm_msg_start_ind_completion);
|
|
mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT,
|
|
&mxman_message_handler, mxman);
|
|
mxlog_init(scsc_mx_get_mxlog(mxman->mx), mxman->mx);
|
|
#endif
|
|
#ifdef CONFIG_SCSC_CHV_SUPPORT
|
|
if (chv_run) {
|
|
int i;
|
|
|
|
u32 *p = (u32 *)((u8 *)start_dram + SCSC_CHV_ARGV_ADDR_OFFSET);
|
|
|
|
if (chv_argc == 0) {
|
|
/*
|
|
* Setup the chv f/w arguments.
|
|
* Argument of 0 means run once (driver never set this).
|
|
* Argument of 1 means run forever.
|
|
*/
|
|
SCSC_TAG_INFO(MXMAN, "Setting up CHV arguments: start_dram=%p arg=%p, chv_run=%d\n", start_dram, p, chv_run);
|
|
*p++ = 1; /* argc */
|
|
*p++ = chv_run == 1 ? 0 : 1; /* arg */
|
|
} else {
|
|
/* Pass separate args */
|
|
*p++ = chv_argc; /* argc */
|
|
SCSC_TAG_INFO(MXMAN, "Setting up additional CHV args: chv_argc = %d\n", chv_argc);
|
|
|
|
for (i = 0; i < chv_argc; i++) {
|
|
SCSC_TAG_INFO(MXMAN, "Setting up additional CHV args: chv_argv[%d]: *(%p) = 0x%x\n", i, p, (u32)chv_argv[i]);
|
|
*p++ = (u32)chv_argv[i]; /* arg */
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
mxproc_create_ctrl_proc_dir(&mxman->mxproc, mxman);
|
|
/* release Maxwell from reset */
|
|
mif->reset(mif, 0);
|
|
#ifndef MAXWELL_SKIP_MANAGER
|
|
if (fwhdr_parsed_ok) {
|
|
r = wait_for_mm_msg_start_ind(mxman);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "wait_for_MM_START_IND() failed: r=%d\n", r);
|
|
print_mailboxes(mxman);
|
|
if (skip_mbox0_check) {
|
|
SCSC_TAG_ERR(MXMAN, "timeout ignored in skip_mbox0_check mode\n");
|
|
return 0;
|
|
}
|
|
mxman_stop(mxman);
|
|
return r;
|
|
}
|
|
} else {
|
|
msleep(WAIT_FOR_FW_TO_START_DELAY_MS);
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* workqueue thread
|
|
*/
|
|
static void mxman_failure_work(struct work_struct *work)
|
|
{
|
|
struct mxman *mxman = container_of(work, struct mxman, failure_work);
|
|
struct srvman *srvman;
|
|
struct scsc_mx *mx = mxman->mx;
|
|
struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(mxman->mx);
|
|
u16 scsc_panic_code;
|
|
|
|
wake_lock(&mxman->recovery_wake_lock);
|
|
|
|
slsi_kic_system_event(slsi_kic_system_event_category_error,
|
|
slsi_kic_system_events_subsystem_crashed, GFP_KERNEL);
|
|
|
|
SCSC_TAG_INFO(MXMAN, "Complete mm_msg_start_ind_completion\n");
|
|
complete(&mxman->mm_msg_start_ind_completion);
|
|
mutex_lock(&mxman->mxman_mutex);
|
|
srvman = scsc_mx_get_srvman(mxman->mx);
|
|
|
|
if (mxman->mxman_state != MXMAN_STATE_STARTED) {
|
|
SCSC_TAG_WARNING(MXMAN, "Not in started state: mxman->mxman_state=%d\n", mxman->mxman_state);
|
|
wake_unlock(&mxman->recovery_wake_lock);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return;
|
|
}
|
|
scsc_panic_code = mxman->scsc_panic_code;
|
|
mxlog_release(scsc_mx_get_mxlog(mx));
|
|
/* unregister channel handler */
|
|
mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT,
|
|
NULL, NULL);
|
|
mxmgmt_transport_set_error(scsc_mx_get_mxmgmt_transport(mx));
|
|
srvman_set_error(srvman);
|
|
fw_crc_wq_stop(mxman);
|
|
|
|
mxman->mxman_state = mxman->mxman_next_state;
|
|
|
|
if (mxman->mxman_state != MXMAN_STATE_FAILED
|
|
&& mxman->mxman_state != MXMAN_STATE_FREEZED) {
|
|
WARN_ON(mxman->mxman_state != MXMAN_STATE_FAILED
|
|
&& mxman->mxman_state != MXMAN_STATE_FREEZED);
|
|
SCSC_TAG_ERR(MXMAN, "Bad state=%d\n", mxman->mxman_state);
|
|
wake_unlock(&mxman->recovery_wake_lock);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return;
|
|
}
|
|
/* Signal panic to r4 and m4 processors */
|
|
SCSC_TAG_INFO(MXMAN, "Setting MIFINTRBIT_RESERVED_PANIC_R4\n");
|
|
mif->irq_bit_set(mif, MIFINTRBIT_RESERVED_PANIC_R4, SCSC_MIFINTR_TARGET_R4);
|
|
SCSC_TAG_INFO(MXMAN, "Setting MIFINTRBIT_RESERVED_PANIC_M4\n");
|
|
mif->irq_bit_set(mif, MIFINTRBIT_RESERVED_PANIC_M4, SCSC_MIFINTR_TARGET_M4);
|
|
srvman_freeze_services(srvman);
|
|
if (mxman->mxman_state == MXMAN_STATE_FAILED) {
|
|
u32 *r4_panic_record = (u32 *)(mxman->fw + mxman->fwhdr.r4_panic_record_offset);
|
|
u32 *m4_panic_record;
|
|
bool r4_panic_record_ok = false;
|
|
bool m4_panic_record_ok = false;
|
|
|
|
/* some configurable delay before accessing the panic record */
|
|
msleep(panic_record_delay);
|
|
r4_panic_record_ok = fw_parse_r4_panic_record(r4_panic_record);
|
|
if (mxman->fwhdr.m4_panic_record_offset) {
|
|
m4_panic_record = (u32 *)(mxman->fw + mxman->fwhdr.m4_panic_record_offset);
|
|
m4_panic_record_ok = fw_parse_m4_panic_record(m4_panic_record);
|
|
} else {
|
|
SCSC_TAG_INFO(MXMAN, "M4 panic record doesn't exist in the firmware header\n");
|
|
}
|
|
/*
|
|
* Check if the panic was trigered by MX and
|
|
* set the subcode if so.
|
|
*/
|
|
if ((scsc_panic_code & 0x8000) == 0) {
|
|
if (r4_panic_record_ok)
|
|
scsc_panic_code |= 0xFFF & r4_panic_record[2];
|
|
else if (m4_panic_record_ok)
|
|
scsc_panic_code |= 0xFFF & m4_panic_record[2];
|
|
}
|
|
/* Set unspecified technilogy for now */
|
|
scsc_panic_code |= 0x03 << 13;
|
|
SCSC_TAG_INFO(MXMAN, "scsc_panic_code=0x%x\n", scsc_panic_code);
|
|
SCSC_TAG_INFO(MXMAN, "Trying to schedule coredump\n");
|
|
SCSC_TAG_INFO(MXMAN, "scsc_release %d.%d.%d\n",
|
|
SCSC_RELEASE_PRODUCT,
|
|
SCSC_RELEASE_ITERATION,
|
|
SCSC_RELEASE_CANDIDATE);
|
|
|
|
/* schedule coredump and wait for it to finish */
|
|
if (disable_auto_coredump)
|
|
SCSC_TAG_INFO(MXMAN, "Driver automatic coredump disabled, not launching coredump helper\n");
|
|
else {
|
|
SCSC_TAG_INFO(MXMAN, "Invoking coredump helper\n");
|
|
(void)coredump_helper();
|
|
}
|
|
|
|
/* Clean up the MIF following error handling */
|
|
if (mif->mif_cleanup && disable_recovery_handling)
|
|
mif->mif_cleanup(mif);
|
|
}
|
|
if (!disable_recovery_handling)
|
|
srvman_clear_error(srvman);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
if (!disable_recovery_handling) {
|
|
SCSC_TAG_INFO(MXMAN, "Calling srvman_unfreeze_services with scsc_panic_code=0x%x\n", scsc_panic_code);
|
|
srvman_unfreeze_services(srvman, scsc_panic_code);
|
|
if (scsc_mx_module_reset() < 0)
|
|
SCSC_TAG_INFO(MXMAN, "failed to call scsc_mx_module_reset\n");
|
|
}
|
|
|
|
/**
|
|
* If recovery is disabled and an scsc_mx_service_open has been hold up,
|
|
* release it, rather than wait for the recovery_completion to timeout.
|
|
*/
|
|
if (disable_recovery_handling)
|
|
complete(&mxman->recovery_completion);
|
|
|
|
wake_unlock(&mxman->recovery_wake_lock);
|
|
}
|
|
|
|
static void failure_wq_init(struct mxman *mxman)
|
|
{
|
|
mxman->failure_wq = create_singlethread_workqueue("failure_wq");
|
|
INIT_WORK(&mxman->failure_work, mxman_failure_work);
|
|
}
|
|
|
|
static void failure_wq_stop(struct mxman *mxman)
|
|
{
|
|
cancel_work_sync(&mxman->failure_work);
|
|
flush_workqueue(mxman->failure_wq);
|
|
}
|
|
|
|
static void failure_wq_deinit(struct mxman *mxman)
|
|
{
|
|
failure_wq_stop(mxman);
|
|
destroy_workqueue(mxman->failure_wq);
|
|
}
|
|
|
|
static void failure_wq_start(struct mxman *mxman)
|
|
{
|
|
if (disable_error_handling)
|
|
SCSC_TAG_INFO(MXMAN, "error handling disabled\n");
|
|
else
|
|
queue_work(mxman->failure_wq, &mxman->failure_work);
|
|
}
|
|
|
|
static void print_mailboxes(struct mxman *mxman)
|
|
{
|
|
struct scsc_mif_abs *mif;
|
|
struct mifmboxman *mboxman;
|
|
int i;
|
|
|
|
mif = scsc_mx_get_mif_abs(mxman->mx);
|
|
mboxman = scsc_mx_get_mboxman(mxman->mx);
|
|
|
|
SCSC_TAG_INFO(MXMAN, "Printing mailbox values:\n");
|
|
for (i = 0; i < MIFMBOX_NUM; i++)
|
|
SCSC_TAG_INFO(MXMAN, "MBOX_%d: 0x%x\n", i, *mifmboxman_get_mbox_ptr(mboxman, mif, i));
|
|
}
|
|
|
|
/*
|
|
* Check for matching f/w and h/w
|
|
*
|
|
* Returns 0: f/w and h/w match
|
|
* 1: f/w and h/w mismatch, try the next config
|
|
* -ve fatal error
|
|
*/
|
|
static int mxman_hw_ver_check(struct mxman *mxman)
|
|
{
|
|
if (mx140_file_supported_hw(mxman->mx, mxman->rf_hw_ver))
|
|
return 0;
|
|
else
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* Select the f/w version to load next
|
|
*/
|
|
static int mxman_select_next_fw(struct mxman *mxman)
|
|
{
|
|
return mx140_file_select_fw(mxman->mx, mxman->rf_hw_ver);
|
|
}
|
|
|
|
/* Boot MX140 with given f/w */
|
|
static int __mxman_open(struct mxman *mxman)
|
|
{
|
|
int r;
|
|
struct srvman *srvman;
|
|
|
|
mx140_basedir_file(mxman->mx);
|
|
|
|
mutex_lock(&mxman->mxman_mutex);
|
|
|
|
srvman = scsc_mx_get_srvman(mxman->mx);
|
|
if (srvman && srvman->error) {
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (mxman->mxman_state == MXMAN_STATE_STARTED) {
|
|
/* if in the STARTED state there MUST already be some users */
|
|
if (WARN_ON(!mxman->users)) {
|
|
SCSC_TAG_ERR(MXMAN, "ERROR mxman->mxman_state=%d users=%d\n", mxman->mxman_state, mxman->users);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return -EINVAL;
|
|
}
|
|
mxman->users++;
|
|
SCSC_TAG_INFO(MXMAN, "Already opened: users=%d\n", mxman->users);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return 0;
|
|
} else if (mxman->mxman_state == MXMAN_STATE_STOPPED) {
|
|
r = mxman_start(mxman);
|
|
if (r) {
|
|
SCSC_TAG_ERR(MXMAN, "maxwell_manager_start() failed r=%d users=%d\n", r, mxman->users);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return r;
|
|
}
|
|
mxman->users++;
|
|
mxman->mxman_state = MXMAN_STATE_STARTED;
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
if (!disable_logger)
|
|
_mx_exec("/system/bin/mx_logger.sh", UMH_WAIT_EXEC);
|
|
return 0;
|
|
}
|
|
WARN_ON(mxman->mxman_state != MXMAN_STATE_STARTED && mxman->mxman_state != MXMAN_STATE_STOPPED);
|
|
SCSC_TAG_ERR(MXMAN, "Bad state: mxman->mxman_state=%d\n", mxman->mxman_state);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return -EPERM;
|
|
}
|
|
|
|
int mxman_open(struct mxman *mxman)
|
|
{
|
|
int r;
|
|
int try = 0;
|
|
|
|
struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(mxman->mx);
|
|
|
|
for (try = 0; try < 2; try++) {
|
|
/* Boot WLBT. This will determine the h/w version */
|
|
r = __mxman_open(mxman);
|
|
if (r)
|
|
return r;
|
|
|
|
/* On retries, restore USBPLL owner as WLBT */
|
|
if (try > 0 && mif->mif_restart)
|
|
mif->mif_restart(mif);
|
|
|
|
/* Check the h/w and f/w versions are compatible */
|
|
r = mxman_hw_ver_check(mxman);
|
|
if (r > 0) {
|
|
/* Not compatible, so try next f/w */
|
|
SCSC_TAG_INFO(MXMAN, "Incompatible h/w 0x%04x vs f/w, close and try next\n", mxman->rf_hw_ver);
|
|
|
|
/* Temporarily return USBPLL owner to AP to keep USB alive */
|
|
if (mif->mif_cleanup)
|
|
mif->mif_cleanup(mif);
|
|
|
|
/* Stop WLBT */
|
|
mxman_close(mxman);
|
|
|
|
/* Select the new f/w for this hw ver */
|
|
mxman_select_next_fw(mxman);
|
|
} else
|
|
break; /* Running or given up */
|
|
}
|
|
|
|
return r;
|
|
}
|
|
|
|
static void mxman_stop(struct mxman *mxman)
|
|
{
|
|
struct scsc_mif_abs *mif;
|
|
|
|
SCSC_TAG_INFO(MXMAN, "\n");
|
|
|
|
(void)snprintf(mxman->fw_build_id, sizeof(mxman->fw_build_id), "unknown");
|
|
|
|
mxproc_remove_ctrl_proc_dir(&mxman->mxproc);
|
|
|
|
/* Shutdown the hardware */
|
|
mif = scsc_mx_get_mif_abs(mxman->mx);
|
|
mif->reset(mif, 1);
|
|
transports_release(mxman);
|
|
|
|
#ifndef MAXWELL_SKIP_MANAGER
|
|
mxlog_release(scsc_mx_get_mxlog(mxman->mx));
|
|
/* unregister channel handler */
|
|
mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT,
|
|
NULL, NULL);
|
|
fw_crc_wq_stop(mxman);
|
|
#endif
|
|
|
|
/* Unitialise components (they may perform some checks - e.g. all memory freed) */
|
|
mifintrbit_deinit(scsc_mx_get_intrbit(mxman->mx));
|
|
miframman_deinit(scsc_mx_get_ramman(mxman->mx));
|
|
mifmboxman_deinit(scsc_mx_get_mboxman(mxman->mx));
|
|
|
|
/* Release the MIF memory resources */
|
|
mif->unmap(mif, mxman->start_dram);
|
|
}
|
|
|
|
void mxman_close(struct mxman *mxman)
|
|
{
|
|
int r;
|
|
struct srvman *srvman;
|
|
|
|
mutex_lock(&mxman->mxman_mutex);
|
|
srvman = scsc_mx_get_srvman(mxman->mx);
|
|
if (srvman && srvman->error) {
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
|
|
return;
|
|
}
|
|
|
|
SCSC_TAG_INFO(MXMAN, "\n");
|
|
|
|
if (mxman->mxman_state == MXMAN_STATE_STARTED) {
|
|
if (WARN_ON(!mxman->users)) {
|
|
SCSC_TAG_ERR(MXMAN, "ERROR users=%d\n", mxman->users);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return;
|
|
}
|
|
mxman->users--;
|
|
if (mxman->users) {
|
|
SCSC_TAG_INFO(MXMAN, "Current number of users=%d\n", mxman->users);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return;
|
|
}
|
|
#ifndef MAXWELL_SKIP_MANAGER
|
|
/*
|
|
* Ask the subsystem to stop (MM_STOP_REQ), and wait
|
|
* for response (MM_STOP_RSP).
|
|
*/
|
|
r = send_mm_msg_stop_blocking(mxman);
|
|
if (r)
|
|
SCSC_TAG_ERR(MXMAN, "send_mm_msg_stop_blocking failed: r=%d\n", r);
|
|
|
|
#endif
|
|
mxman_stop(mxman);
|
|
mxman->mxman_state = MXMAN_STATE_STOPPED;
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
} else if (mxman->mxman_state == MXMAN_STATE_FAILED) {
|
|
if (WARN_ON(!mxman->users))
|
|
SCSC_TAG_ERR(MXMAN, "ERROR users=%d\n", mxman->users);
|
|
|
|
mxman->users--;
|
|
if (mxman->users) {
|
|
SCSC_TAG_INFO(MXMAN, "Current number of users=%d\n", mxman->users);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return;
|
|
}
|
|
|
|
mxman_stop(mxman);
|
|
mxman->mxman_state = MXMAN_STATE_STOPPED;
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
complete(&mxman->recovery_completion);
|
|
} else {
|
|
WARN_ON(mxman->mxman_state != MXMAN_STATE_STARTED);
|
|
SCSC_TAG_ERR(MXMAN, "Bad state: mxman->mxman_state=%d\n", mxman->mxman_state);
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return;
|
|
}
|
|
}
|
|
|
|
void mxman_fail(struct mxman *mxman, u16 scsc_panic_code)
|
|
{
|
|
if (mxman->mxman_state == MXMAN_STATE_STARTED) {
|
|
mxman->mxman_next_state = MXMAN_STATE_FAILED;
|
|
mxman->scsc_panic_code = scsc_panic_code;
|
|
SCSC_TAG_INFO(MXMAN, "scsc_panic_code=0x%x\n", scsc_panic_code);
|
|
failure_wq_start(mxman);
|
|
} else {
|
|
SCSC_TAG_WARNING(MXMAN, "Not in MXMAN_STATE_STARTED state, ignore\n");
|
|
}
|
|
}
|
|
|
|
void mxman_freeze(struct mxman *mxman)
|
|
{
|
|
if (mxman->mxman_state == MXMAN_STATE_STARTED) {
|
|
mxman->mxman_next_state = MXMAN_STATE_FREEZED;
|
|
failure_wq_start(mxman);
|
|
} else {
|
|
SCSC_TAG_WARNING(MXMAN, "Not in MXMAN_STATE_STARTED state, ignore\n");
|
|
}
|
|
}
|
|
|
|
void mxman_init(struct mxman *mxman, struct scsc_mx *mx)
|
|
{
|
|
mxman->mx = mx;
|
|
mxman->suspended = 0;
|
|
fw_crc_wq_init(mxman);
|
|
failure_wq_init(mxman);
|
|
mutex_init(&mxman->mxman_mutex);
|
|
init_completion(&mxman->recovery_completion);
|
|
wake_lock_init(&mxman->recovery_wake_lock, WAKE_LOCK_SUSPEND, "mxman_recovery");
|
|
|
|
/* set the initial state */
|
|
mxman->mxman_state = MXMAN_STATE_STOPPED;
|
|
(void)snprintf(mxman->fw_build_id, sizeof(mxman->fw_build_id), "unknown");
|
|
mxproc_create_info_proc_dir(&mxman->mxproc, mxman);
|
|
}
|
|
|
|
void mxman_deinit(struct mxman *mxman)
|
|
{
|
|
mxproc_remove_info_proc_dir(&mxman->mxproc);
|
|
fw_crc_wq_deinit(mxman);
|
|
failure_wq_deinit(mxman);
|
|
wake_lock_destroy(&mxman->recovery_wake_lock);
|
|
mutex_destroy(&mxman->mxman_mutex);
|
|
}
|
|
|
|
int mxman_force_panic(struct mxman *mxman)
|
|
{
|
|
struct srvman *srvman;
|
|
struct ma_msg_packet message = { .ma_msg = MM_FORCE_PANIC };
|
|
|
|
mutex_lock(&mxman->mxman_mutex);
|
|
srvman = scsc_mx_get_srvman(mxman->mx);
|
|
if (srvman && srvman->error) {
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (mxman->mxman_state == MXMAN_STATE_STARTED) {
|
|
mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message, sizeof(message));
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return 0;
|
|
}
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return -EINVAL;
|
|
}
|
|
|
|
int mxman_suspend(struct mxman *mxman)
|
|
{
|
|
struct srvman *srvman;
|
|
struct ma_msg_packet message = { .ma_msg = MM_HOST_SUSPEND };
|
|
|
|
SCSC_TAG_INFO(MXMAN, "\n");
|
|
|
|
mutex_lock(&mxman->mxman_mutex);
|
|
srvman = scsc_mx_get_srvman(mxman->mx);
|
|
if (srvman && srvman->error) {
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (mxman->mxman_state == MXMAN_STATE_STARTED) {
|
|
SCSC_TAG_INFO(MXMAN, "MM_HOST_SUSPEND\n");
|
|
mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message, sizeof(message));
|
|
mxman->suspended = 1;
|
|
atomic_inc(&mxman->suspend_count);
|
|
}
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
return 0;
|
|
}
|
|
|
|
void mxman_resume(struct mxman *mxman)
|
|
{
|
|
struct srvman *srvman;
|
|
struct ma_msg_packet message = { .ma_msg = MM_HOST_RESUME };
|
|
|
|
SCSC_TAG_INFO(MXMAN, "\n");
|
|
|
|
mutex_lock(&mxman->mxman_mutex);
|
|
srvman = scsc_mx_get_srvman(mxman->mx);
|
|
if (srvman && srvman->error) {
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
|
|
return;
|
|
}
|
|
|
|
if (mxman->mxman_state == MXMAN_STATE_STARTED) {
|
|
SCSC_TAG_INFO(MXMAN, "MM_HOST_RESUME\n");
|
|
mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message, sizeof(message));
|
|
mxman->suspended = 0;
|
|
}
|
|
mutex_unlock(&mxman->mxman_mutex);
|
|
}
|
|
|
|
static void _mx_exec_cleanup(struct subprocess_info *sp_info)
|
|
{
|
|
if (!sp_info) {
|
|
pr_err("%s: sp_info is null", __func__);
|
|
return;
|
|
}
|
|
if (!sp_info->argv) {
|
|
pr_err("%s: argv is null", __func__);
|
|
return;
|
|
}
|
|
|
|
pr_info("%s: 0x%p\n", __func__, sp_info->argv);
|
|
argv_free(sp_info->argv);
|
|
}
|
|
|
|
/* prog - full path to programme
|
|
* wait_exec - one of UMH_WAIT_EXEC, UMH_WAIT_PROC, UMH_KILLABLE, UMH_NO_WAIT
|
|
*/
|
|
static int _mx_exec(char *prog, int wait_exec)
|
|
{
|
|
static char const *envp[] = { "HOME=/", "PATH=/system/bin:/sbin:", NULL };
|
|
const int exec_string_buffer_len = STRING_BUFFER_MAX_LENGTH;
|
|
const int exec_string_args = NUMBER_OF_STRING_ARGS;
|
|
char **argv;
|
|
char argv_str[exec_string_buffer_len];
|
|
int argc, result, len;
|
|
struct subprocess_info *sp_info;
|
|
|
|
len = snprintf(argv_str, exec_string_buffer_len, "%s", prog);
|
|
if (len >= exec_string_buffer_len) {
|
|
/* snprintf() returns a value of buffer size of greater if it had to truncate the format string. */
|
|
pr_err("%s: exec string buffer insufficient (buffer size=%d, actual string=%d)\n", __func__, exec_string_buffer_len, len);
|
|
return -E2BIG;
|
|
}
|
|
|
|
/* Kernel library function argv_split() will allocate memory for argv. */
|
|
argc = 0;
|
|
argv = argv_split(GFP_ATOMIC, argv_str, &argc);
|
|
if (!argv) {
|
|
pr_err("%s: failed to allocate argv for userspace helper\n", __func__);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
/* Check the argument count - should be exec_string_args. */
|
|
if (argc != exec_string_args) {
|
|
pr_err("%s: exec string has the wrong number of arguments (has %d, should be %d)\n", __func__, argc, exec_string_args);
|
|
argv_free(argv);
|
|
return -E2BIG;
|
|
}
|
|
|
|
/* Allocate sp_info and initialise pointers to argv and envp. */
|
|
sp_info = call_usermodehelper_setup(argv[0], argv, (char **)envp, GFP_KERNEL, NULL, _mx_exec_cleanup, NULL);
|
|
|
|
if (!sp_info) {
|
|
pr_err("%s: call_usermodehelper_setup() failed\n", __func__);
|
|
argv_free(argv);
|
|
return -EIO;
|
|
}
|
|
|
|
/*
|
|
* Put sp_info into work queue for processing by khelper.
|
|
* UMH_WAIT_EXEC: wait to see launch
|
|
*/
|
|
pr_info("%s: Launch %s\n", __func__, prog);
|
|
|
|
result = call_usermodehelper_exec(sp_info, wait_exec);
|
|
|
|
if (result != 0) {
|
|
/*
|
|
* call_usermodehelper_exec() will free sp_info and call any cleanup function
|
|
* whether it succeeds or fails, so do not free argv.
|
|
*/
|
|
if (result == -ENOENT)
|
|
pr_err("%s: call_usermodehelper() failed with %d, Executable not found %s'\n", __func__, result, prog);
|
|
else
|
|
pr_err("%s: call_usermodehelper_exec() failed with %d\n", __func__, result);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
int mx140_log_dump(void)
|
|
{
|
|
return _mx_exec("/system/bin/mx_logger_dump.sh", UMH_WAIT_EXEC);
|
|
}
|
|
EXPORT_SYMBOL(mx140_log_dump);
|