Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

15
drivers/gud/Kconfig Normal file
View file

@ -0,0 +1,15 @@
#
# Mobicore Configuration
#
if SOC_EXYNOS8890
source "drivers/gud/gud-exynos8890/Kconfig"
endif
if SOC_EXYNOS7870
source "drivers/gud/gud-exynos7870/Kconfig"
endif
if SOC_EXYNOS7570
source "drivers/gud/gud-exynos7570/Kconfig"
endif

3
drivers/gud/Makefile Normal file
View file

@ -0,0 +1,3 @@
obj-$(CONFIG_SOC_EXYNOS8890) += gud-exynos8890/
obj-$(CONFIG_SOC_EXYNOS7870) += gud-exynos7870/
obj-$(CONFIG_SOC_EXYNOS7570) += gud-exynos7570/

View file

@ -0,0 +1,39 @@
#
# Trustonic drivers configuration
#
config TRUSTONIC_TEE
tristate "Trustonic TEE Driver"
depends on ARM || ARM64
default y
---help---
Enable Trustonic TEE support
config TRUSTONIC_TEE_LPAE
bool "Trustonic TEE uses LPAE"
depends on TRUSTONIC_TEE
default y if ARM64
default n if ARM
---help---
Enable Trustonic TEE 64-bit physical addresses support
config TRUSTONIC_TEE_DEBUG
bool "Trustonic TEE driver debug mode"
depends on TRUSTONIC_TEE
default n
---help---
Enable the debug mode in the Trustonic TEE Driver.
config TRUSTONIC_TRUSTED_UI
tristate "Trustonic Trusted UI"
depends on TRUSTONIC_TEE
---help---
Enable Trustonic Trusted User Interface
config TRUSTONIC_TRUSTED_UI_FB_BLANK
bool "Trustonic Trusted UI with fb_blank"
depends on TRUSTONIC_TRUSTED_UI
---help---
Blank the framebuffer before starting a TUI session
source "drivers/gud/gud-exynos7570/sec-os-ctrl/Kconfig"
source "drivers/gud/gud-exynos7570/sec-os-booster/Kconfig"

View file

@ -0,0 +1,9 @@
#
# Makefile for the <t-base core and trusted UI drivers
#
obj-$(CONFIG_TRUSTONIC_TEE) := MobiCoreDriver/
obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui/
obj-$(CONFIG_SECURE_OS_CONTROL) += sec-os-ctrl/
obj-$(CONFIG_SECURE_OS_BOOSTER_API) += sec-os-booster/

View file

@ -0,0 +1,32 @@
#
# Makefile for the <t-base core driver
#
GUD_ROOT_FOLDER := drivers/gud/gud-exynos7570
# add our modules to kernel.
obj-$(CONFIG_TRUSTONIC_TEE) += mcDrvModule.o
mcDrvModule-y := \
admin.o \
client.o \
clientlib.o \
clock.o \
fastcall.o \
logging.o \
main.o \
mcp.o \
mmu.o \
pm.o \
scheduler.o \
session.o \
user.o
# Release mode by default
ccflags-y += -DNDEBUG
ccflags-y += -Wno-declaration-after-statement
ccflags-$(CONFIG_TRUSTONIC_TEE_DEBUG) += -DDEBUG
# MobiCore Driver includes
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver

View file

@ -0,0 +1,981 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/delay.h>
#include "public/mc_user.h"
#include "public/mc_admin.h"
#include "mci/mcloadformat.h"
#include "main.h"
#include "mmu.h" /* For load_check and load_token */
#include "mcp.h"
#include "client.h"
#include "admin.h"
static struct admin_ctx {
struct mutex admin_tgid_mutex; /* Lock for admin_tgid below */
pid_t admin_tgid;
int (*tee_start_cb)(void);
void (*tee_stop_cb)(void);
int last_start_ret;
} admin_ctx;
static struct mc_admin_driver_request {
/* Global */
struct mutex mutex; /* Protects access to this struct */
struct mutex states_mutex; /* Protect access to the states */
enum client_state {
IDLE,
REQUEST_SENT,
BUFFERS_READY,
} client_state;
enum server_state {
NOT_CONNECTED, /* Device not open */
READY, /* Waiting for requests */
REQUEST_RECEIVED, /* Got a request, is working */
RESPONSE_SENT, /* Has sent a response header */
DATA_SENT, /* Blocked until data is consumed */
} server_state;
/* Request */
u32 request_id;
struct mc_admin_request request;
struct completion client_complete;
/* Response */
struct mc_admin_response response;
struct completion server_complete;
void *buffer; /* Reception buffer (pre-allocated) */
size_t size; /* Size of the reception buffer */
} g_request;
static struct tee_object *tee_object_alloc(bool is_sp_trustlet, size_t length)
{
struct tee_object *obj;
size_t size = sizeof(*obj) + length;
size_t header_length = 0;
/* Determine required size */
if (is_sp_trustlet) {
/* Need space for lengths info and containers */
header_length = sizeof(struct mc_blob_len_info);
size += header_length + 3 * MAX_SO_CONT_SIZE;
}
/* Allocate memory */
obj = vzalloc(size);
if (!obj)
return NULL;
/* A non-zero header_length indicates that we have a SP trustlet */
obj->header_length = (u32)header_length;
obj->length = (u32)length;
return obj;
}
void tee_object_free(struct tee_object *robj)
{
vfree(robj);
}
static inline void client_state_change(enum client_state state)
{
mutex_lock(&g_request.states_mutex);
g_request.client_state = state;
mutex_unlock(&g_request.states_mutex);
}
static inline bool client_state_is(enum client_state state)
{
bool is;
mutex_lock(&g_request.states_mutex);
is = g_request.client_state == state;
mutex_unlock(&g_request.states_mutex);
return is;
}
static inline void server_state_change(enum server_state state)
{
mutex_lock(&g_request.states_mutex);
g_request.server_state = state;
mutex_unlock(&g_request.states_mutex);
}
static inline bool server_state_is(enum server_state state)
{
bool is;
mutex_lock(&g_request.states_mutex);
is = g_request.server_state == state;
mutex_unlock(&g_request.states_mutex);
return is;
}
static void request_cancel(void);
static int request_send(u32 command, const struct mc_uuid_t *uuid, bool is_gp,
u32 spid)
{
int counter = 10;
int ret = 0;
/* Prepare request */
mutex_lock(&g_request.states_mutex);
/* Wait a little for daemon to connect */
while ((g_request.server_state == NOT_CONNECTED) && counter--) {
mutex_unlock(&g_request.states_mutex);
ssleep(1);
mutex_lock(&g_request.states_mutex);
}
WARN_ON(g_request.client_state != IDLE);
if (g_request.server_state != READY) {
mutex_unlock(&g_request.states_mutex);
if (g_request.server_state != NOT_CONNECTED) {
mc_dev_err("invalid daemon state %d\n",
g_request.server_state);
ret = -EPROTO;
goto end;
} else {
mc_dev_err("daemon not connected\n");
ret = -ENOTCONN;
goto end;
}
}
memset(&g_request.request, 0, sizeof(g_request.request));
memset(&g_request.response, 0, sizeof(g_request.response));
g_request.request.request_id = g_request.request_id;
g_request.request.command = command;
if (uuid)
memcpy(&g_request.request.uuid, uuid, sizeof(*uuid));
else
memset(&g_request.request.uuid, 0, sizeof(*uuid));
g_request.request.is_gp = is_gp;
g_request.request.spid = spid;
g_request.client_state = REQUEST_SENT;
mutex_unlock(&g_request.states_mutex);
/* Send request */
complete(&g_request.client_complete);
/* Wait for header (could be interruptible, but then needs more work) */
wait_for_completion(&g_request.server_complete);
/* Server should be waiting with some data for us */
mutex_lock(&g_request.states_mutex);
switch (g_request.server_state) {
case NOT_CONNECTED:
/* Daemon gone */
ret = -EPIPE;
break;
case READY:
/* No data to come, likely an error */
ret = -g_request.response.error_no;
break;
case RESPONSE_SENT:
case DATA_SENT:
/* Normal case, data to come */
ret = 0;
break;
case REQUEST_RECEIVED:
/* Should not happen as complete means the state changed */
mc_dev_err("daemon is in a bad state: %d\n",
g_request.server_state);
ret = -EPIPE;
break;
}
mutex_unlock(&g_request.states_mutex);
end:
if (ret)
request_cancel();
return ret;
}
static int request_receive(void *address, u32 size)
{
/*
* At this point we have received the header and prepared some buffers
* to receive data that we know are coming from the server.
*/
/* Check server state */
bool server_ok;
mutex_lock(&g_request.states_mutex);
server_ok = (g_request.server_state == RESPONSE_SENT) ||
(g_request.server_state == DATA_SENT);
mutex_unlock(&g_request.states_mutex);
if (!server_ok) {
mc_dev_err("expected server state %d or %d, not %d\n",
RESPONSE_SENT, DATA_SENT, g_request.server_state);
request_cancel();
return -EPIPE;
}
/* Setup reception buffer */
g_request.buffer = address;
g_request.size = size;
client_state_change(BUFFERS_READY);
/* Unlock write of data */
complete(&g_request.client_complete);
/* Wait for data (far too late to be interruptible) */
wait_for_completion(&g_request.server_complete);
/* Reset reception buffer */
g_request.buffer = NULL;
g_request.size = 0;
/* Return to idle state */
client_state_change(IDLE);
return 0;
}
/* Must be called instead of request_receive() to cancel a pending request */
static void request_cancel(void)
{
/* Unlock write of data */
mutex_lock(&g_request.states_mutex);
if (g_request.server_state == DATA_SENT)
complete(&g_request.client_complete);
/* Return to idle state */
g_request.client_state = IDLE;
mutex_unlock(&g_request.states_mutex);
}
static int admin_get_root_container(void *address)
{
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_GET_ROOT_CONTAINER, 0, 0, 0);
if (ret)
goto end;
/* Check length against max */
if (g_request.response.length >= MAX_SO_CONT_SIZE) {
request_cancel();
mc_dev_err("response length exceeds maximum\n");
ret = EREMOTEIO;
goto end;
}
/* Get data */
ret = request_receive(address, g_request.response.length);
if (!ret)
ret = g_request.response.length;
end:
mutex_unlock(&g_request.mutex);
return ret;
}
static int admin_get_sp_container(void *address, u32 spid)
{
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_GET_SP_CONTAINER, 0, 0, spid);
if (ret)
goto end;
/* Check length against max */
if (g_request.response.length >= MAX_SO_CONT_SIZE) {
request_cancel();
mc_dev_err("response length exceeds maximum\n");
ret = EREMOTEIO;
goto end;
}
/* Get data */
ret = request_receive(address, g_request.response.length);
if (!ret)
ret = g_request.response.length;
end:
mutex_unlock(&g_request.mutex);
return ret;
}
static int admin_get_trustlet_container(void *address,
const struct mc_uuid_t *uuid, u32 spid)
{
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_GET_TRUSTLET_CONTAINER, uuid, 0, spid);
if (ret)
goto end;
/* Check length against max */
if (g_request.response.length >= MAX_SO_CONT_SIZE) {
request_cancel();
mc_dev_err("response length exceeds maximum\n");
ret = EREMOTEIO;
goto end;
}
/* Get data */
ret = request_receive(address, g_request.response.length);
if (!ret)
ret = g_request.response.length;
end:
mutex_unlock(&g_request.mutex);
return ret;
}
static struct tee_object *admin_get_trustlet(const struct mc_uuid_t *uuid,
bool is_gp, u32 *spid)
{
struct tee_object *obj = NULL;
bool is_sp_tl;
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_GET_TRUSTLET, uuid, is_gp, 0);
if (ret)
goto end;
/* Allocate memory */
is_sp_tl = g_request.response.service_type == SERVICE_TYPE_SP_TRUSTLET;
obj = tee_object_alloc(is_sp_tl, g_request.response.length);
if (!obj) {
request_cancel();
ret = -ENOMEM;
goto end;
}
/* Get data */
ret = request_receive(&obj->data[obj->header_length], obj->length);
*spid = g_request.response.spid;
end:
mutex_unlock(&g_request.mutex);
if (ret)
return ERR_PTR(ret);
return obj;
}
static void mc_admin_sendcrashdump(void)
{
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_SIGNAL_CRASH, NULL, false, 0);
if (ret)
goto end;
/* Done */
request_cancel();
end:
mutex_unlock(&g_request.mutex);
}
static int tee_object_make(u32 spid, struct tee_object *obj)
{
struct mc_blob_len_info *l_info = (struct mc_blob_len_info *)obj->data;
u8 *address = &obj->data[obj->header_length + obj->length];
struct mclf_header_v2 *thdr;
int ret;
/* Get root container */
ret = admin_get_root_container(address);
if (ret < 0)
goto err;
l_info->root_size = ret;
address += ret;
/* Get SP container */
ret = admin_get_sp_container(address, spid);
if (ret < 0)
goto err;
l_info->sp_size = ret;
address += ret;
/* Get trustlet container */
thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
ret = admin_get_trustlet_container(address, &thdr->uuid, spid);
if (ret < 0)
goto err;
l_info->ta_size = ret;
address += ret;
/* Setup lengths information */
l_info->magic = MC_TLBLOBLEN_MAGIC;
obj->length += sizeof(*l_info);
obj->length += l_info->root_size + l_info->sp_size + l_info->ta_size;
ret = 0;
err:
return ret;
}
struct tee_object *tee_object_read(u32 spid, uintptr_t address, size_t length)
{
char __user *addr = (char __user *)address;
struct tee_object *obj;
u8 *data;
struct mclf_header_v2 thdr;
int ret;
/* Check length */
if (length < sizeof(thdr)) {
mc_dev_err("buffer shorter than header size\n");
return ERR_PTR(-EFAULT);
}
/* Read header */
if (copy_from_user(&thdr, addr, sizeof(thdr))) {
mc_dev_err("header: copy_from_user failed\n");
return ERR_PTR(-EFAULT);
}
/* Allocate memory */
obj = tee_object_alloc(thdr.service_type == SERVICE_TYPE_SP_TRUSTLET,
length);
if (!obj)
return ERR_PTR(-ENOMEM);
/* Copy header */
data = &obj->data[obj->header_length];
memcpy(data, &thdr, sizeof(thdr));
/* Copy the rest of the data */
data += sizeof(thdr);
if (copy_from_user(data, &addr[sizeof(thdr)], length - sizeof(thdr))) {
mc_dev_err("data: copy_from_user failed\n");
vfree(obj);
return ERR_PTR(-EFAULT);
}
if (obj->header_length) {
ret = tee_object_make(spid, obj);
if (ret) {
vfree(obj);
return ERR_PTR(ret);
}
}
return obj;
}
struct tee_object *tee_object_select(const struct mc_uuid_t *uuid)
{
struct tee_object *obj;
struct mclf_header_v2 *thdr;
obj = tee_object_alloc(false, sizeof(*thdr));
if (!obj)
return ERR_PTR(-ENOMEM);
thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
memcpy(&thdr->uuid, uuid, sizeof(thdr->uuid));
return obj;
}
struct tee_object *tee_object_get(const struct mc_uuid_t *uuid, bool is_gp)
{
struct tee_object *obj;
u32 spid = 0;
/* admin_get_trustlet creates the right object based on service type */
obj = admin_get_trustlet(uuid, is_gp, &spid);
if (IS_ERR(obj))
return obj;
/* SP trustlet: create full secure object with all containers */
if (obj->header_length) {
int ret;
/* Do not return EINVAL in this case as SPID was not found */
if (!spid) {
vfree(obj);
return ERR_PTR(-ENOENT);
}
ret = tee_object_make(spid, obj);
if (ret) {
vfree(obj);
return ERR_PTR(ret);
}
}
return obj;
}
static inline int load_driver(struct tee_client *client,
struct mc_admin_load_info *info)
{
struct tee_object *obj;
struct mclf_header_v2 *thdr;
struct mc_identity identity = {
.login_type = LOGIN_PUBLIC,
};
uintptr_t dci = 0;
u32 dci_len = 0;
u32 sid;
int ret;
obj = tee_object_read(info->spid, info->address, info->length);
if (IS_ERR(obj))
return PTR_ERR(obj);
thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
if (!(thdr->flags & MC_SERVICE_HEADER_FLAGS_NO_CONTROL_INTERFACE)) {
/*
* The driver requires a DCI, although we won't be able to use
* it to communicate.
*/
dci_len = PAGE_SIZE;
ret = client_cbuf_create(client, dci_len, &dci, NULL);
if (ret)
goto end;
}
/* Open session */
ret = client_add_session(client, obj, dci, dci_len, &sid, false,
&identity);
if (!ret)
mc_dev_devel("driver loaded with sid %x", sid);
/*
* Always 'free' the buffer (will remain as long as used), never freed
* otherwise
*/
client_cbuf_free(client, dci);
end:
vfree(obj);
return ret;
}
static inline int load_token(struct mc_admin_load_info *token)
{
struct tee_mmu *mmu;
struct mcp_buffer_map map;
int ret;
mmu = tee_mmu_create(current, (void *)(uintptr_t)token->address,
token->length);
if (IS_ERR(mmu))
return PTR_ERR(mmu);
tee_mmu_buffer(mmu, &map);
ret = mcp_load_token(token->address, &map);
tee_mmu_delete(mmu);
return ret;
}
static inline int load_check(struct mc_admin_load_info *info)
{
struct tee_object *obj;
struct tee_mmu *mmu;
struct mcp_buffer_map map;
int ret;
obj = tee_object_read(info->spid, info->address, info->length);
if (IS_ERR(obj))
return PTR_ERR(obj);
mmu = tee_mmu_create(NULL, obj->data, obj->length);
if (IS_ERR(mmu))
return PTR_ERR(mmu);
tee_mmu_buffer(mmu, &map);
ret = mcp_load_check(obj, &map);
tee_mmu_delete(mmu);
return ret;
}
static ssize_t admin_write(struct file *file, const char __user *user,
size_t len, loff_t *off)
{
int ret;
/* No offset allowed [yet] */
if (*off) {
mc_dev_err("offset not supported\n");
g_request.response.error_no = EPIPE;
ret = -ECOMM;
goto err;
}
if (server_state_is(REQUEST_RECEIVED)) {
/* Check client state */
if (!client_state_is(REQUEST_SENT)) {
mc_dev_err("expected client state %d, not %d\n",
REQUEST_SENT, g_request.client_state);
g_request.response.error_no = EPIPE;
ret = -EPIPE;
goto err;
}
/* Receive response header */
if (copy_from_user(&g_request.response, user,
sizeof(g_request.response))) {
mc_dev_err("failed to get response from daemon\n");
g_request.response.error_no = EPIPE;
ret = -ECOMM;
goto err;
}
/* Check request ID */
if (g_request.request.request_id !=
g_request.response.request_id) {
mc_dev_err("expected id %d, not %d\n",
g_request.request.request_id,
g_request.response.request_id);
g_request.response.error_no = EPIPE;
ret = -EBADE;
goto err;
}
/* Response header is acceptable */
ret = sizeof(g_request.response);
if (g_request.response.length)
server_state_change(RESPONSE_SENT);
else
server_state_change(READY);
goto end;
} else if (server_state_is(RESPONSE_SENT)) {
/* Server is waiting */
server_state_change(DATA_SENT);
/* Get data */
ret = wait_for_completion_interruptible(
&g_request.client_complete);
/* Server received a signal, let see if it tries again */
if (ret) {
server_state_change(RESPONSE_SENT);
return ret;
}
/* Check client state */
if (!client_state_is(BUFFERS_READY)) {
mc_dev_err("expected client state %d, not %d\n",
BUFFERS_READY, g_request.client_state);
g_request.response.error_no = EPIPE;
ret = -EPIPE;
goto err;
}
/* We do not deal with several writes */
if (len != g_request.size)
len = g_request.size;
ret = copy_from_user(g_request.buffer, user, len);
if (ret) {
mc_dev_err("failed to get data from daemon\n");
g_request.response.error_no = EPIPE;
ret = -ECOMM;
goto err;
}
ret = len;
server_state_change(READY);
goto end;
} else {
ret = -ECOMM;
goto err;
}
err:
server_state_change(READY);
end:
complete(&g_request.server_complete);
return ret;
}
static long admin_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *uarg = (void __user *)arg;
int ret = -EINVAL;
mc_dev_devel("%u from %s\n", _IOC_NR(cmd), current->comm);
switch (cmd) {
case MC_ADMIN_IO_GET_DRIVER_REQUEST: {
/* Update TGID as it may change (when becoming a daemon) */
if (admin_ctx.admin_tgid != current->tgid) {
admin_ctx.admin_tgid = current->tgid;
mc_dev_info("admin PID changed to %d\n",
admin_ctx.admin_tgid);
}
/* Block until a request is available */
ret = wait_for_completion_interruptible(
&g_request.client_complete);
if (ret)
/* Interrupted by signal */
break;
/* Check client state */
if (!client_state_is(REQUEST_SENT)) {
mc_dev_err("expected client state %d, not %d\n",
REQUEST_SENT, g_request.client_state);
g_request.response.error_no = EPIPE;
complete(&g_request.server_complete);
ret = -EPIPE;
break;
}
/* Send request (the driver request mutex is held) */
ret = copy_to_user(uarg, &g_request.request,
sizeof(g_request.request));
if (ret) {
server_state_change(READY);
complete(&g_request.server_complete);
ret = -EPROTO;
break;
}
/* Now that the daemon got it, update the request ID */
g_request.request_id++;
server_state_change(REQUEST_RECEIVED);
break;
}
case MC_ADMIN_IO_GET_INFO: {
struct mc_admin_driver_info info;
info.drv_version = MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
MCDRVMODULEAPI_VERSION_MINOR);
info.initial_cmd_id = g_request.request_id;
ret = copy_to_user(uarg, &info, sizeof(info));
break;
}
case MC_ADMIN_IO_LOAD_DRIVER: {
struct tee_client *client = file->private_data;
struct mc_admin_load_info info;
if (copy_from_user(&info, uarg, sizeof(info))) {
ret = -EFAULT;
break;
}
/* Make sure we have a local client */
if (!client) {
client = client_create(true);
/* Store client for future use/close */
file->private_data = client;
}
if (!client) {
ret = -ENOMEM;
break;
}
ret = load_driver(client, &info);
break;
}
case MC_ADMIN_IO_LOAD_TOKEN: {
struct mc_admin_load_info info;
if (copy_from_user(&info, uarg, sizeof(info))) {
ret = -EFAULT;
break;
}
ret = load_token(&info);
break;
}
case MC_ADMIN_IO_LOAD_CHECK: {
struct mc_admin_load_info info;
if (copy_from_user(&info, uarg, sizeof(info))) {
ret = -EFAULT;
break;
}
ret = load_check(&info);
break;
}
default:
ret = -ENOIOCTLCMD;
}
return ret;
}
/*
* mc_fd_release() - This function will be called from user space as close(...)
* The client data are freed and the associated memory pages are unreserved.
*
* @inode
* @file
*
* Returns 0
*/
static int admin_release(struct inode *inode, struct file *file)
{
/* ExySp: print close process info */
mc_dev_info("closed by PID(%d), name(%s)\n", current->pid, current->comm);
/* Close client if any */
if (file->private_data)
client_close((struct tee_client *)file->private_data);
/* Requests from driver to daemon */
mutex_lock(&g_request.states_mutex);
g_request.server_state = NOT_CONNECTED;
/* A non-zero command indicates that a thread is waiting */
if (g_request.client_state != IDLE) {
g_request.response.error_no = ESHUTDOWN;
complete(&g_request.server_complete);
}
mutex_unlock(&g_request.states_mutex);
mc_dev_info("admin connection closed, PID %d\n", admin_ctx.admin_tgid);
admin_ctx.admin_tgid = 0;
/*
* ret is quite irrelevant here as most apps don't care about the
* return value from close() and it's quite difficult to recover
*/
return 0;
}
static int admin_open(struct inode *inode, struct file *file)
{
int ret = 0;
/* ExySp: print open process info */
mc_dev_info("opened by PID(%d), name(%s)\n", current->pid, current->comm);
/* Only one connection allowed to admin interface */
mutex_lock(&admin_ctx.admin_tgid_mutex);
if (admin_ctx.admin_tgid) {
mc_dev_err("admin connection already open, PID %d\n",
admin_ctx.admin_tgid);
ret = -EBUSY;
} else {
admin_ctx.admin_tgid = current->tgid;
}
mutex_unlock(&admin_ctx.admin_tgid_mutex);
if (ret)
return ret;
/* Any value will do */
g_request.request_id = 42;
/* Setup the usual variables */
mc_dev_devel("accept %s as TEE daemon\n", current->comm);
/*
* daemon is connected so now we can safely suppose
* the secure world is loaded too
*/
if (admin_ctx.last_start_ret > 0)
admin_ctx.last_start_ret = admin_ctx.tee_start_cb();
/* Failed to start the TEE, either now or before */
if (admin_ctx.last_start_ret) {
mutex_lock(&admin_ctx.admin_tgid_mutex);
admin_ctx.admin_tgid = 0;
mutex_unlock(&admin_ctx.admin_tgid_mutex);
return admin_ctx.last_start_ret;
}
/* Requests from driver to daemon */
server_state_change(READY);
mc_dev_info("admin connection open, PID %d\n", admin_ctx.admin_tgid);
return 0;
}
/* function table structure of this device driver. */
static const struct file_operations mc_admin_fops = {
.owner = THIS_MODULE,
.open = admin_open,
.release = admin_release,
.unlocked_ioctl = admin_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = admin_ioctl,
#endif
.write = admin_write,
};
bool mc_is_admin_tgid(pid_t tgid)
{
bool match;
mutex_lock(&admin_ctx.admin_tgid_mutex);
match = admin_ctx.admin_tgid == tgid;
mutex_unlock(&admin_ctx.admin_tgid_mutex);
return match;
}
int mc_admin_init(struct cdev *cdev, int (*tee_start_cb)(void),
void (*tee_stop_cb)(void))
{
mutex_init(&admin_ctx.admin_tgid_mutex);
/* Requests from driver to daemon */
mutex_init(&g_request.mutex);
mutex_init(&g_request.states_mutex);
init_completion(&g_request.client_complete);
init_completion(&g_request.server_complete);
mcp_register_crashhandler(mc_admin_sendcrashdump);
/* Create char device */
cdev_init(cdev, &mc_admin_fops);
/* Register the call back for starting the secure world */
admin_ctx.tee_start_cb = tee_start_cb;
admin_ctx.tee_stop_cb = tee_stop_cb;
admin_ctx.last_start_ret = 1;
return 0;
}
void mc_admin_exit(void)
{
if (!admin_ctx.last_start_ret)
admin_ctx.tee_stop_cb();
}

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_ADMIN_H_
#define _MC_ADMIN_H_
struct cdev;
struct mc_uuid_t;
struct tee_object;
int mc_admin_init(struct cdev *cdev, int (*tee_start_cb)(void),
void (*tee_stop_cb)(void));
void mc_admin_exit(void);
bool mc_is_admin_tgid(pid_t tgid);
struct tee_object *tee_object_select(const struct mc_uuid_t *uuid);
struct tee_object *tee_object_get(const struct mc_uuid_t *uuid, bool is_gp);
struct tee_object *tee_object_read(u32 spid, uintptr_t address, size_t length);
void tee_object_free(struct tee_object *object);
#endif /* _MC_ADMIN_H_ */

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_ARM_H_
#define _MC_ARM_H_
#include "main.h"
#ifdef CONFIG_ARM64
inline bool has_security_extensions(void)
{
return true;
}
inline bool is_secure_mode(void)
{
return false;
}
#else
/*
* ARM Trustzone specific masks and modes
* Vanilla Linux is unaware of TrustZone extension.
* I.e. arch/arm/include/asm/ptrace.h does not define monitor mode.
* Also TZ bits in cpuid are not defined, ARM port uses magic numbers,
* see arch/arm/kernel/setup.c
*/
#define ARM_MONITOR_MODE (0x16) /*(0b10110)*/
#define ARM_SECURITY_EXTENSION_MASK (0x30)
/* check if CPU supports the ARM TrustZone Security Extensions */
inline bool has_security_extensions(void)
{
u32 fea = 0;
asm volatile(
"mrc p15, 0, %[fea], cr0, cr1, 0" :
[fea]"=r" (fea));
mc_dev_devel("CPU Features: 0x%X\n", fea);
/*
* If the CPU features ID has 0 for security features then the CPU
* doesn't support TrustZone at all!
*/
if ((fea & ARM_SECURITY_EXTENSION_MASK) == 0)
return false;
return true;
}
/* check if running in secure mode */
inline bool is_secure_mode(void)
{
u32 cpsr = 0;
u32 nsacr = 0;
asm volatile(
"mrc p15, 0, %[nsacr], cr1, cr1, 2\n"
"mrs %[cpsr], cpsr\n" :
[nsacr]"=r" (nsacr),
[cpsr]"=r"(cpsr));
mc_dev_devel("CPRS.M = set to 0x%X\n", cpsr & MODE_MASK);
mc_dev_devel("SCR.NS = set to 0x%X\n", nsacr);
/*
* If the NSACR contains the reset value(=0) then most likely we are
* running in Secure MODE.
* If the cpsr mode is set to monitor mode then we cannot load!
*/
if (nsacr == 0 || ((cpsr & MODE_MASK) == ARM_MONITOR_MODE))
return true;
return false;
}
#endif
#endif /* _MC_ARM_H_ */

View file

@ -0,0 +1,17 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MOBICORE_COMPONENT_BUILD_TAG
#define MOBICORE_COMPONENT_BUILD_TAG \
"t-base-EXYNOS64-Android-310B-V007-20160505_211700_344"
#endif

View file

@ -0,0 +1,921 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/err.h>
#include "public/mc_user.h"
#include "public/mc_admin.h"
#include "main.h"
#include "admin.h" /* tee_object* */
#include "mcp.h"
#include "mmu.h"
#include "session.h"
#include "client.h"
/* Context */
static struct client_ctx {
/* Clients list */
struct mutex clients_lock;
struct list_head clients;
/* Clients waiting for their last cbuf to be released */
struct mutex closing_clients_lock;
struct list_head closing_clients;
} client_ctx;
/*
* Contiguous buffer allocated to TLCs.
* These buffers are used as world shared memory (wsm) to share with
* secure world.
*/
struct cbuf {
/* Client this cbuf belongs to */
struct tee_client *client;
/* List element for client's list of cbuf's */
struct list_head list;
/* Number of references kept to this buffer */
struct kref kref;
/* virtual Kernel start address */
uintptr_t addr;
/* virtual Userspace start address */
uintptr_t uaddr;
/* physical start address */
phys_addr_t phys;
/* 2^order = number of pages allocated */
unsigned int order;
/* Length of memory mapped to user */
u32 len;
/* Has been freed via the API */
bool api_freed;
};
static inline void cbuf_get(struct cbuf *cbuf)
{
kref_get(&cbuf->kref);
}
static void cbuf_release(struct kref *kref)
{
struct cbuf *cbuf = container_of(kref, struct cbuf, kref);
struct tee_client *client = cbuf->client;
/* Unlist from client */
mutex_lock(&client->cbufs_lock);
list_del_init(&cbuf->list);
mutex_unlock(&client->cbufs_lock);
/* Release client token */
client_put(client);
/* Free */
free_pages(cbuf->addr, cbuf->order);
mc_dev_devel("freed cbuf %p: client %p addr %lx uaddr %lx len %u\n",
cbuf, client, cbuf->addr, cbuf->uaddr, cbuf->len);
kfree(cbuf);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_cbufs);
}
static inline void cbuf_put(struct cbuf *cbuf)
{
kref_put(&cbuf->kref, cbuf_release);
}
/*
* Map a kernel contiguous buffer to user space
*/
static int cbuf_map(struct vm_area_struct *vmarea, uintptr_t addr, u32 len,
uintptr_t *uaddr)
{
int ret;
if (WARN(!uaddr, "No uaddr pointer available"))
return -EINVAL;
if (WARN(!vmarea, "No vma available"))
return -EINVAL;
if (WARN(!addr, "No addr available"))
return -EINVAL;
if (len != (u32)(vmarea->vm_end - vmarea->vm_start)) {
mc_dev_err("cbuf incompatible with vma\n");
return -EINVAL;
}
vmarea->vm_flags |= VM_IO;
ret = remap_pfn_range(vmarea, vmarea->vm_start,
page_to_pfn(virt_to_page(addr)),
vmarea->vm_end - vmarea->vm_start,
vmarea->vm_page_prot);
if (ret) {
*uaddr = 0;
mc_dev_err("User mapping failed\n");
return ret;
}
*uaddr = vmarea->vm_start;
return 0;
}
/*
* Allocate and initialize a client object
*/
struct tee_client *client_create(bool is_from_kernel)
{
struct tee_client *client;
/* Allocate client structure */
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return NULL;
/* Increment debug counter */
atomic_inc(&g_ctx.c_clients);
/* initialize members */
client->pid = is_from_kernel ? 0 : current->pid;
memcpy(client->comm, current->comm, sizeof(client->comm));
kref_init(&client->kref);
INIT_LIST_HEAD(&client->cbufs);
mutex_init(&client->cbufs_lock);
INIT_LIST_HEAD(&client->sessions);
INIT_LIST_HEAD(&client->closing_sessions);
mutex_init(&client->sessions_lock);
INIT_LIST_HEAD(&client->list);
/* Add client to list of clients */
mutex_lock(&client_ctx.clients_lock);
list_add_tail(&client->list, &client_ctx.clients);
mutex_unlock(&client_ctx.clients_lock);
mc_dev_devel("created client %p\n", client);
return client;
}
/*
* Free client object + all objects it contains.
* Can be called only by last user referencing the client,
* therefore mutex lock seems overkill
*/
static void client_release(struct kref *kref)
{
struct tee_client *client;
client = container_of(kref, struct tee_client, kref);
/* Client is closed, remove from closing list */
mutex_lock(&client_ctx.closing_clients_lock);
list_del(&client->list);
mutex_unlock(&client_ctx.closing_clients_lock);
mc_dev_devel("freed client %p\n", client);
kfree(client);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_clients);
}
void client_put(struct tee_client *client)
{
kref_put(&client->kref, client_release);
}
/*
* Returns true if client is a kernel object.
*/
static inline bool client_is_kernel(struct tee_client *client)
{
return !client->pid;
}
/*
* Set client "closing" state, only if it contains no session.
* Once in "closing" state, system "close" can be called.
* Return: 0 if this state could be set.
*/
bool client_has_sessions(struct tee_client *client)
{
bool ret;
/* Check for sessions */
mutex_lock(&client->sessions_lock);
ret = !list_empty(&client->sessions);
mutex_unlock(&client->sessions_lock);
mc_dev_devel("client %p, exit with %d\n", client, ret);
return ret;
}
/*
* At this point, nobody has access to the client anymore, so no new sessions
* are being created.
*/
static void client_close_sessions(struct tee_client *client)
{
struct tee_session *session;
mutex_lock(&client->sessions_lock);
while (!list_empty(&client->sessions)) {
session = list_first_entry(&client->sessions,
struct tee_session, list);
/* Move session to closing sessions list */
list_move(&session->list, &client->closing_sessions);
/* Call session_close without lock */
mutex_unlock(&client->sessions_lock);
session_close(session);
mutex_lock(&client->sessions_lock);
}
mutex_unlock(&client->sessions_lock);
}
/*
* At this point, nobody has access to the client anymore, so no new contiguous
* buffers are being created.
*/
static void client_close_kernel_cbufs(struct tee_client *client)
{
/* Put buffers allocated and not freed via the kernel API */
if (!client_is_kernel(client))
return;
/* Look for cbufs that the client has not freed and put them */
while (true) {
struct cbuf *cbuf = NULL, *candidate;
mutex_lock(&client->cbufs_lock);
list_for_each_entry(candidate, &client->cbufs, list) {
if (!candidate->api_freed) {
candidate->api_freed = true;
cbuf = candidate;
break;
}
}
mutex_unlock(&client->cbufs_lock);
if (!cbuf)
break;
cbuf_put(cbuf);
}
}
/*
* Release a client and the session+cbuf objects it contains.
* @param client_t client
* @return driver error code
*/
void client_close(struct tee_client *client)
{
/* Move client from active clients to closing clients for debug */
mutex_lock(&client_ctx.clients_lock);
mutex_lock(&client_ctx.closing_clients_lock);
list_move(&client->list, &client_ctx.closing_clients);
mutex_unlock(&client_ctx.closing_clients_lock);
mutex_unlock(&client_ctx.clients_lock);
client_close_kernel_cbufs(client);
/* Close all remaining sessions */
client_close_sessions(client);
client_put(client);
mc_dev_devel("client %p closed\n", client);
}
/*
* The TEE is going to die, so get rid of whatever is shared with it
*/
void clients_kill_sessions(void)
{
struct tee_client *client;
mutex_lock(&client_ctx.clients_lock);
list_for_each_entry(client, &client_ctx.clients, list) {
/*
* session_kill() will put the session which should get freed
* and free its wsms/mmus and put any cbuf concerned
*/
mutex_lock(&client->sessions_lock);
while (!list_empty(&client->sessions)) {
struct tee_session *session;
session = list_first_entry(&client->sessions,
struct tee_session, list);
list_del(&session->list);
session_kill(session);
}
mutex_unlock(&client->sessions_lock);
}
mutex_unlock(&client_ctx.clients_lock);
}
/*
* Open TA for given client. TA binary is provided by the daemon.
* @param
* @return driver error code
*/
int client_open_session(struct tee_client *client, u32 *session_id,
const struct mc_uuid_t *uuid, uintptr_t tci,
size_t tci_len, bool is_gp_uuid,
struct mc_identity *identity)
{
int err = 0;
u32 sid = 0;
struct tee_object *obj;
/* Get secure object */
obj = tee_object_get(uuid, is_gp_uuid);
if (IS_ERR(obj)) {
/* Try to select secure object inside the SWd if not found */
if ((PTR_ERR(obj) == -ENOENT) && g_ctx.f_ta_auth)
obj = tee_object_select(uuid);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto end;
}
}
/* Open session */
err = client_add_session(client, obj, tci, tci_len, &sid, is_gp_uuid,
identity);
/* Fill in return parameter */
if (!err)
*session_id = sid;
/* Delete secure object */
tee_object_free(obj);
end:
mc_dev_devel("session %x, exit with %d\n", sid, err);
return err;
}
/*
* Open TA for given client. TA binary is provided by the client.
* @param
* @return driver error code
*/
int client_open_trustlet(struct tee_client *client, u32 *session_id, u32 spid,
uintptr_t trustlet, size_t trustlet_len,
uintptr_t tci, size_t tci_len)
{
struct tee_object *obj;
struct mc_identity identity = {
.login_type = LOGIN_PUBLIC,
};
u32 sid = 0;
int err = 0;
/* Create secure object from user-space trustlet binary */
obj = tee_object_read(spid, trustlet, trustlet_len);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto end;
}
/* Open session */
err = client_add_session(client, obj, tci, tci_len, &sid, false,
&identity);
/* Fill in return parameter */
if (!err)
*session_id = sid;
/* Delete secure object */
tee_object_free(obj);
end:
mc_dev_devel("session %x, exit with %d\n", sid, err);
return err;
}
/*
* Opens a TA and add corresponding session object to given client
* return: driver error code
*/
int client_add_session(struct tee_client *client, const struct tee_object *obj,
uintptr_t tci, size_t len, u32 *session_id, bool is_gp,
struct mc_identity *identity)
{
struct tee_session *session = NULL;
struct tee_mmu *obj_mmu = NULL;
int ret = 0;
/*
* Create session object with temp sid=0 BEFORE session is started,
* otherwise if a GP TA is started and NWd session object allocation
* fails, we cannot handle the potentially delayed GP closing.
* Adding session to list must be done AFTER it is started (once we have
* sid), therefore it cannot be done within session_create().
*/
session = session_create(client, is_gp, identity);
if (IS_ERR(session))
return PTR_ERR(session);
/* Create blob L2 table (blob is allocated by driver, so task=NULL) */
obj_mmu = tee_mmu_create(NULL, obj->data, obj->length);
if (IS_ERR(obj_mmu)) {
ret = PTR_ERR(obj_mmu);
goto err;
}
/* Open session */
ret = session_open(session, obj, obj_mmu, tci, len);
/* Blob table no more needed in any case */
tee_mmu_delete(obj_mmu);
if (ret)
goto err;
mutex_lock(&client->sessions_lock);
/* Add session to client */
list_add_tail(&session->list, &client->sessions);
/* Set sid returned by SWd */
*session_id = session->mcp_session.id;
mutex_unlock(&client->sessions_lock);
err:
/* Close or free session on error */
if (ret == -ENODEV) {
/* The session must enter the closing process... */
list_add_tail(&session->list, &client->closing_sessions);
session_close(session);
} else if (ret) {
session_put(session);
}
return ret;
}
/*
* Remove a session object from client and close corresponding TA
* Return: true if session was found and closed
*/
int client_remove_session(struct tee_client *client, u32 session_id)
{
struct tee_session *session = NULL, *candidate;
/* Move session from main list to closing list */
mutex_lock(&client->sessions_lock);
list_for_each_entry(candidate, &client->sessions, list) {
if (candidate->mcp_session.id == session_id) {
session = candidate;
list_move(&session->list, &client->closing_sessions);
break;
}
}
mutex_unlock(&client->sessions_lock);
if (!session)
return -ENXIO;
/* Close session */
return session_close(session);
}
/*
* Find a session object and increment its reference counter.
* Object cannot be freed until its counter reaches 0.
* return: pointer to the object, NULL if not found.
*/
static struct tee_session *client_get_session(struct tee_client *client,
u32 session_id)
{
struct tee_session *session = NULL, *candidate;
mutex_lock(&client->sessions_lock);
list_for_each_entry(candidate, &client->sessions, list) {
if (candidate->mcp_session.id == session_id) {
session = candidate;
session_get(session);
break;
}
}
mutex_unlock(&client->sessions_lock);
if (!session)
mc_dev_err("session %x not found\n", session_id);
return session;
}
/*
* Send a notification to TA
* @return driver error code
*/
int client_notify_session(struct tee_client *client, u32 session_id)
{
struct tee_session *session;
int ret;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
/* Send command to SWd */
ret = session_notify_swd(session);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit with %d\n", session_id, ret);
return ret;
}
/*
* Wait for a notification from TA
* @return driver error code
*/
int client_waitnotif_session(struct tee_client *client, u32 session_id,
s32 timeout, bool silent_expiry)
{
struct tee_session *session;
int ret;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
ret = session_waitnotif(session, timeout, silent_expiry);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit with %d\n", session_id, ret);
return ret;
}
/*
* Read session exit/termination code
*/
int client_get_session_exitcode(struct tee_client *client, u32 session_id,
s32 *exit_code)
{
struct tee_session *session;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
/* Retrieve error */
*exit_code = session_exitcode(session);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit code %d\n", session_id, *exit_code);
return 0;
}
/* Share a buffer with given TA in SWd */
int client_map_session_wsms(struct tee_client *client, u32 session_id,
struct mc_ioctl_buffer *bufs)
{
struct tee_session *session;
int ret;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
/* Add buffer to the session */
ret = session_wsms_add(session, bufs);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit with %d\n", session_id, ret);
return ret;
}
/* Stop sharing a buffer with SWd */
int client_unmap_session_wsms(struct tee_client *client, u32 session_id,
const struct mc_ioctl_buffer *bufs)
{
struct tee_session *session;
int ret;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
/* Remove buffer from session */
ret = session_wsms_remove(session, bufs);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit with %d\n", session_id, ret);
return ret;
}
/*
* This callback is called on remap
*/
static void cbuf_vm_open(struct vm_area_struct *vmarea)
{
struct cbuf *cbuf = vmarea->vm_private_data;
cbuf_get(cbuf);
}
/*
* This callback is called on unmap
*/
static void cbuf_vm_close(struct vm_area_struct *vmarea)
{
struct cbuf *cbuf = vmarea->vm_private_data;
cbuf_put(cbuf);
}
static struct vm_operations_struct cbuf_vm_ops = {
.open = cbuf_vm_open,
.close = cbuf_vm_close,
};
/*
* Create a cbuf object and add it to client
*/
int client_cbuf_create(struct tee_client *client, u32 len, uintptr_t *addr,
struct vm_area_struct *vmarea)
{
int err = 0;
struct cbuf *cbuf = NULL;
unsigned int order;
if (WARN(!client, "No client available"))
return -EINVAL;
if (WARN(!len, "No len available"))
return -EINVAL;
order = get_order(len);
if (order > MAX_ORDER) {
mc_dev_err("Buffer size too large\n");
return -ENOMEM;
}
/* Allocate buffer descriptor structure */
cbuf = kzalloc(sizeof(*cbuf), GFP_KERNEL);
if (!cbuf)
return -ENOMEM;
/* Increment debug counter */
atomic_inc(&g_ctx.c_cbufs);
/* Allocate buffer */
cbuf->addr = __get_free_pages(GFP_USER | __GFP_ZERO, order);
if (!cbuf->addr) {
kfree(cbuf);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_cbufs);
return -ENOMEM;
}
/* Map to user space if applicable */
if (!client_is_kernel(client)) {
err = cbuf_map(vmarea, cbuf->addr, len, &cbuf->uaddr);
if (err) {
free_pages(cbuf->addr, order);
kfree(cbuf);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_cbufs);
return err;
}
}
/* Init descriptor members */
cbuf->client = client;
cbuf->phys = virt_to_phys((void *)cbuf->addr);
cbuf->len = len;
cbuf->order = order;
kref_init(&cbuf->kref);
INIT_LIST_HEAD(&cbuf->list);
/* Keep cbuf in VMA private data for refcounting (user-space clients) */
if (vmarea) {
vmarea->vm_private_data = cbuf;
vmarea->vm_ops = &cbuf_vm_ops;
}
/* Fill return parameter for k-api */
if (addr)
*addr = cbuf->addr;
/* Get a token on the client */
client_get(client);
/* Add buffer to list */
mutex_lock(&client->cbufs_lock);
list_add_tail(&cbuf->list, &client->cbufs);
mutex_unlock(&client->cbufs_lock);
mc_dev_devel("created cbuf %p: client %p addr %lx uaddr %lx len %u\n",
cbuf, client, cbuf->addr, cbuf->uaddr, cbuf->len);
return err;
}
/*
* Find a contiguous buffer (cbuf) in the cbuf list of given client that
* contains given address and take a reference on it.
* Return pointer to the object, or NULL if not found.
*/
static struct cbuf *cbuf_get_by_addr(struct tee_client *client, uintptr_t addr)
{
struct cbuf *cbuf = NULL, *candidate;
bool is_kernel = client_is_kernel(client);
mutex_lock(&client->cbufs_lock);
list_for_each_entry(candidate, &client->cbufs, list) {
/* Compare to kernel VA or user VA depending on client type */
uintptr_t start = is_kernel ?
candidate->addr : candidate->uaddr;
uintptr_t end = start + candidate->len;
/* Check that (user) cbuf has not been unmapped */
if (!start)
break;
if ((addr >= start) && (addr < end)) {
cbuf = candidate;
break;
}
}
if (cbuf)
cbuf_get(cbuf);
mutex_unlock(&client->cbufs_lock);
return cbuf;
}
/*
* Remove a cbuf object from client, and mark it for freeing.
* Freeing will happen once all current references are released.
*/
int client_cbuf_free(struct tee_client *client, uintptr_t addr)
{
struct cbuf *cbuf = cbuf_get_by_addr(client, addr);
if (!cbuf) {
mc_dev_err("cbuf %lu not found\n", addr);
return -EINVAL;
}
/* Two references to put: the caller's and the one we just took */
cbuf_put(cbuf);
mutex_lock(&client->cbufs_lock);
cbuf->api_freed = true;
mutex_unlock(&client->cbufs_lock);
cbuf_put(cbuf);
return 0;
}
struct tee_mmu *client_mmu_create(struct tee_client *client, uintptr_t va,
u32 len, struct cbuf **cbuf_p)
{
/* Check if buffer is contained in a cbuf */
struct cbuf *cbuf = cbuf_get_by_addr(client, va);
struct task_struct *task = NULL;
struct tee_mmu *mmu;
*cbuf_p = cbuf;
if (cbuf) {
uintptr_t offset;
if (client_is_kernel(client)) {
offset = va - cbuf->addr;
} else {
offset = va - cbuf->uaddr;
/* Update va to point to kernel address */
va = cbuf->addr + offset;
}
if ((offset + len) > cbuf->len) {
mc_dev_err("crosses cbuf boundary\n");
cbuf_put(cbuf);
return ERR_PTR(-EINVAL);
}
} else if (!client_is_kernel(client)) {
/* Provide task if buffer was allocated in user space */
task = current;
}
/* Build MMU table for buffer */
mmu = tee_mmu_create(task, (void *)va, len);
if (IS_ERR_OR_NULL(mmu) && cbuf)
cbuf_put(cbuf);
return mmu;
}
void client_mmu_free(struct tee_client *client, uintptr_t va,
struct tee_mmu *mmu, struct cbuf *cbuf)
{
tee_mmu_delete(mmu);
if (cbuf)
cbuf_put(cbuf);
}
void client_init(void)
{
INIT_LIST_HEAD(&client_ctx.clients);
mutex_init(&client_ctx.clients_lock);
INIT_LIST_HEAD(&client_ctx.closing_clients);
mutex_init(&client_ctx.closing_clients_lock);
}
static inline int cbuf_debug_structs(struct kasnprintf_buf *buf,
struct cbuf *cbuf)
{
return kasnprintf(buf, "\tcbuf %p [%d]: addr %lx uaddr %lx len %u\n",
cbuf, kref_read(&cbuf->kref), cbuf->addr,
cbuf->uaddr, cbuf->len);
}
static int client_debug_structs(struct kasnprintf_buf *buf,
struct tee_client *client, bool is_closing)
{
struct cbuf *cbuf;
struct tee_session *session;
int ret;
if (client->pid)
ret = kasnprintf(buf, "client %p [%d]: %s (%d)%s\n",
client, kref_read(&client->kref),
client->comm, client->pid,
is_closing ? " <closing>" : "");
else
ret = kasnprintf(buf, "client %p [%d]: [kernel]%s\n",
client, kref_read(&client->kref),
is_closing ? " <closing>" : "");
if (ret < 0)
return ret;
/* Buffers */
mutex_lock(&client->cbufs_lock);
if (list_empty(&client->cbufs))
goto done_cbufs;
list_for_each_entry(cbuf, &client->cbufs, list) {
ret = cbuf_debug_structs(buf, cbuf);
if (ret < 0)
goto done_cbufs;
}
done_cbufs:
mutex_unlock(&client->cbufs_lock);
if (ret < 0)
return ret;
/* Sessions */
mutex_lock(&client->sessions_lock);
list_for_each_entry(session, &client->sessions, list) {
ret = session_debug_structs(buf, session, false);
if (ret < 0)
goto done_sessions;
}
list_for_each_entry(session, &client->closing_sessions, list) {
ret = session_debug_structs(buf, session, true);
if (ret < 0)
goto done_sessions;
}
done_sessions:
mutex_unlock(&client->sessions_lock);
if (ret < 0)
return ret;
return 0;
}
int clients_debug_structs(struct kasnprintf_buf *buf)
{
struct tee_client *client;
ssize_t ret = 0;
mutex_lock(&client_ctx.clients_lock);
list_for_each_entry(client, &client_ctx.clients, list) {
ret = client_debug_structs(buf, client, false);
if (ret < 0)
break;
}
mutex_unlock(&client_ctx.clients_lock);
if (ret < 0)
return ret;
mutex_lock(&client_ctx.closing_clients_lock);
list_for_each_entry(client, &client_ctx.closing_clients, list) {
ret = client_debug_structs(buf, client, true);
if (ret < 0)
break;
}
mutex_unlock(&client_ctx.closing_clients_lock);
return ret;
}

View file

@ -0,0 +1,97 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _CLIENT_H_
#define _CLIENT_H_
#include <linux/list.h>
#include <linux/sched.h> /* TASK_COMM_LEN */
struct tee_object;
struct tee_client {
/* PID of task that opened the device, 0 if kernel */
pid_t pid;
/* Command for task*/
char comm[TASK_COMM_LEN];
/* Number of references kept to this object */
struct kref kref;
/* List of contiguous buffers allocated by mcMallocWsm for the client */
struct list_head cbufs;
struct mutex cbufs_lock; /* lock for the cbufs list */
/* List of TA sessions opened by this client */
struct list_head sessions;
struct list_head closing_sessions;
struct mutex sessions_lock; /* sessions list + closing */
/* The list entry to attach to "ctx.clients" list */
struct list_head list;
};
/* Client */
struct tee_client *client_create(bool is_from_kernel);
static inline void client_get(struct tee_client *client)
{
kref_get(&client->kref);
}
void client_put(struct tee_client *client);
bool client_has_sessions(struct tee_client *client);
void client_close(struct tee_client *client);
/* All clients */
void clients_kill_sessions(void);
/* Session */
int client_open_session(struct tee_client *client, u32 *session_id,
const struct mc_uuid_t *uuid, uintptr_t tci,
size_t tci_len, bool is_gp_uuid,
struct mc_identity *identity);
int client_open_trustlet(struct tee_client *client, u32 *session_id, u32 spid,
uintptr_t trustlet, size_t trustlet_len,
uintptr_t tci, size_t tci_len);
int client_add_session(struct tee_client *client,
const struct tee_object *obj, uintptr_t tci, size_t len,
u32 *p_sid, bool is_gp_uuid,
struct mc_identity *identity);
int client_remove_session(struct tee_client *client, u32 session_id);
int client_notify_session(struct tee_client *client, u32 session_id);
int client_waitnotif_session(struct tee_client *client, u32 session_id,
s32 timeout, bool silent_expiry);
int client_get_session_exitcode(struct tee_client *client, u32 session_id,
s32 *exit_code);
int client_map_session_wsms(struct tee_client *client, u32 session_id,
struct mc_ioctl_buffer *bufs);
int client_unmap_session_wsms(struct tee_client *client, u32 session_id,
const struct mc_ioctl_buffer *bufs);
/* Contiguous buffer */
int client_cbuf_create(struct tee_client *client, u32 len, uintptr_t *addr,
struct vm_area_struct *vmarea);
int client_cbuf_free(struct tee_client *client, uintptr_t addr);
/* MMU */
struct cbuf;
struct tee_mmu *client_mmu_create(struct tee_client *client, uintptr_t buf,
u32 len, struct cbuf **cbuf);
void client_mmu_free(struct tee_client *client, uintptr_t buf,
struct tee_mmu *mmu, struct cbuf *cbuf);
/* Global */
void client_init(void);
/* Debug */
int clients_debug_structs(struct kasnprintf_buf *buf);
#endif /* _CLIENT_H_ */

View file

@ -0,0 +1,430 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/sched.h>
#include <linux/list.h>
#include "public/mc_user.h"
#include "public/mc_admin.h"
#include "public/mobicore_driver_api.h"
#include "main.h"
#include "client.h"
enum mc_result convert(int err)
{
switch (-err) {
case 0:
return MC_DRV_OK;
case ENOMSG:
return MC_DRV_NO_NOTIFICATION;
case EBADMSG:
return MC_DRV_ERR_NOTIFICATION;
case EAGAIN:
return MC_DRV_ERR_OUT_OF_RESOURCES;
case EHOSTDOWN:
return MC_DRV_ERR_INIT;
case ENODEV:
return MC_DRV_ERR_UNKNOWN_DEVICE;
case ENXIO:
return MC_DRV_ERR_UNKNOWN_SESSION;
case EPERM:
return MC_DRV_ERR_INVALID_OPERATION;
case EBADE:
return MC_DRV_ERR_INVALID_RESPONSE;
case ETIME:
return MC_DRV_ERR_TIMEOUT;
case ENOMEM:
return MC_DRV_ERR_NO_FREE_MEMORY;
case EUCLEAN:
return MC_DRV_ERR_FREE_MEMORY_FAILED;
case ENOTEMPTY:
return MC_DRV_ERR_SESSION_PENDING;
case EHOSTUNREACH:
return MC_DRV_ERR_DAEMON_UNREACHABLE;
case ENOENT:
return MC_DRV_ERR_INVALID_DEVICE_FILE;
case EINVAL:
return MC_DRV_ERR_INVALID_PARAMETER;
case EPROTO:
return MC_DRV_ERR_KERNEL_MODULE;
case ECOMM:
return MC_DRV_INFO_NOTIFICATION;
case EUNATCH:
return MC_DRV_ERR_NQ_FAILED;
case ERESTARTSYS:
return MC_DRV_ERR_INTERRUPTED_BY_SIGNAL;
default:
mc_dev_devel("error is %d\n", err);
return MC_DRV_ERR_UNKNOWN;
}
}
static inline bool is_valid_device(u32 device_id)
{
return device_id == MC_DEVICE_ID_DEFAULT;
}
static struct tee_client *client;
static int open_count;
static DEFINE_MUTEX(dev_mutex); /* Lock for the device */
static bool clientlib_client_get(void)
{
int ret = true;
mutex_lock(&dev_mutex);
if (!client)
ret = false;
else
client_get(client);
mutex_unlock(&dev_mutex);
return ret;
}
static void clientlib_client_put(void)
{
mutex_lock(&dev_mutex);
client_put(client);
mutex_unlock(&dev_mutex);
}
enum mc_result mc_open_device(u32 device_id)
{
enum mc_result mc_result = MC_DRV_OK;
/* Check parameters */
if (!is_valid_device(device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
mutex_lock(&dev_mutex);
if (!open_count)
client = client_create(true);
if (client) {
open_count++;
mc_dev_devel("Successfully opened the device\n");
} else {
mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
mc_dev_devel("Could not open device\n");
}
mutex_unlock(&dev_mutex);
return mc_result;
}
EXPORT_SYMBOL(mc_open_device);
enum mc_result mc_close_device(u32 device_id)
{
enum mc_result mc_result = MC_DRV_OK;
/* Check parameters */
if (!is_valid_device(device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
mutex_lock(&dev_mutex);
if (!client) {
mc_result = MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
goto end;
}
if (open_count > 1) {
open_count--;
goto end;
}
/* Check sessions and freeze client */
if (client_has_sessions(client)) {
mc_result = MC_DRV_ERR_SESSION_PENDING;
goto end;
}
/* Close the device */
client_close(client);
client = NULL;
open_count = 0;
end:
mutex_unlock(&dev_mutex);
return mc_result;
}
EXPORT_SYMBOL(mc_close_device);
enum mc_result mc_open_session(struct mc_session_handle *session,
const struct mc_uuid_t *uuid, u8 *tci, u32 len)
{
struct mc_identity identity = {
.login_type = LOGIN_PUBLIC,
};
enum mc_result ret;
/* Check parameters */
if (!session || !uuid)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_open_session(client, &session->session_id, uuid,
(uintptr_t)tci, len, false,
&identity));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_open_session);
enum mc_result mc_open_trustlet(struct mc_session_handle *session, u32 spid,
u8 *trustlet, u32 trustlet_len,
u8 *tci, u32 len)
{
enum mc_result ret;
/* Check parameters */
if (!session || !trustlet)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_open_trustlet(client, &session->session_id, spid,
(uintptr_t)trustlet, trustlet_len,
(uintptr_t)tci, len));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_open_trustlet);
enum mc_result mc_close_session(struct mc_session_handle *session)
{
enum mc_result ret;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_remove_session(client, session->session_id));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_close_session);
enum mc_result mc_notify(struct mc_session_handle *session)
{
enum mc_result ret;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_notify_session(client, session->session_id));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_notify);
enum mc_result mc_wait_notification(struct mc_session_handle *session,
s32 timeout)
{
enum mc_result ret;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_waitnotif_session(client, session->session_id,
timeout, false));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_wait_notification);
enum mc_result mc_malloc_wsm(u32 device_id, u32 align, u32 len, u8 **wsm,
u32 wsm_flags)
{
enum mc_result ret;
uintptr_t va;
/* Check parameters */
if (!is_valid_device(device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!len)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!wsm)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_cbuf_create(client, len, &va, NULL));
if (ret == MC_DRV_OK)
*wsm = (u8 *)va;
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_malloc_wsm);
enum mc_result mc_free_wsm(u32 device_id, u8 *wsm)
{
enum mc_result ret;
uintptr_t va = (uintptr_t)wsm;
/* Check parameters */
if (!is_valid_device(device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_cbuf_free(client, va));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_free_wsm);
enum mc_result mc_map(struct mc_session_handle *session, void *address,
u32 length, struct mc_bulk_map *map_info)
{
enum mc_result ret;
struct mc_ioctl_buffer bufs[MC_MAP_MAX];
u32 i;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!map_info)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
bufs[0].va = (uintptr_t)address;
bufs[0].len = length;
for (i = 1; i < MC_MAP_MAX; i++)
bufs[i].va = 0;
ret = convert(client_map_session_wsms(client, session->session_id,
bufs));
if (ret == MC_DRV_OK) {
map_info->secure_virt_addr = bufs[0].sva;
map_info->secure_virt_len = bufs[0].len;
}
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_map);
enum mc_result mc_unmap(struct mc_session_handle *session, void *address,
struct mc_bulk_map *map_info)
{
enum mc_result ret;
struct mc_ioctl_buffer bufs[MC_MAP_MAX];
u32 i;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!map_info)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
bufs[0].va = (uintptr_t)address;
bufs[0].len = map_info->secure_virt_len;
bufs[0].sva = map_info->secure_virt_addr;
for (i = 1; i < MC_MAP_MAX; i++)
bufs[i].va = 0;
ret = convert(client_unmap_session_wsms(client, session->session_id,
bufs));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_unmap);
enum mc_result mc_get_session_error_code(struct mc_session_handle *session,
s32 *exit_code)
{
enum mc_result ret;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!exit_code)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_get_session_exitcode(client, session->session_id,
exit_code));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_get_session_error_code);

View file

@ -0,0 +1,161 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "platform.h"
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/of.h>
#include "main.h"
#include "clock.h"
static struct clk_context {
struct clk *mc_ce_iface_clk;
struct clk *mc_ce_core_clk;
struct clk *mc_ce_bus_clk;
struct clk *mc_ce_core_src_clk;
} clk_ctx;
int mc_clock_init(void)
{
int ret = 0;
#ifdef MC_CLOCK_CORESRC_DEFAULTRATE
int core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
/* Get core clk src */
clk_ctx.mc_ce_core_src_clk = clk_get(g_ctx.mcd, "core_clk_src");
if (IS_ERR(clk_ctx.mc_ce_core_src_clk)) {
ret = PTR_ERR(clk_ctx.mc_ce_core_src_clk);
mc_dev_err("cannot get core src clock: %d\n", ret);
goto error;
}
#ifdef MC_CRYPTO_CLOCK_CORESRC_PROPNAME
if (of_property_read_u32(g_ctx.mcd->of_node,
MC_CRYPTO_CLOCK_CORESRC_PROPNAME,
&core_src_rate)) {
core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
mc_dev_err("cannot get ce clock frequency from DT, use %d\n",
core_src_rate);
}
#endif /* MC_CRYPTO_CLOCK_CORESRC_PROPNAME */
ret = clk_set_rate(clk_ctx.mc_ce_core_src_clk, core_src_rate);
if (ret) {
clk_put(clk_ctx.mc_ce_core_src_clk);
clk_ctx.mc_ce_core_src_clk = NULL;
mc_dev_err("cannot set core clock src rate: %d\n", ret);
ret = -EIO;
goto error;
}
#endif /* MC_CLOCK_CORESRC_DEFAULTRATE */
/* Get core clk */
clk_ctx.mc_ce_core_clk = clk_get(g_ctx.mcd, "core_clk");
if (IS_ERR(clk_ctx.mc_ce_core_clk)) {
ret = PTR_ERR(clk_ctx.mc_ce_core_clk);
mc_dev_err("cannot get core clock: %d\n", ret);
goto error;
}
/* Get Interface clk */
clk_ctx.mc_ce_iface_clk = clk_get(g_ctx.mcd, "iface_clk");
if (IS_ERR(clk_ctx.mc_ce_iface_clk)) {
clk_put(clk_ctx.mc_ce_core_clk);
ret = PTR_ERR(clk_ctx.mc_ce_iface_clk);
mc_dev_err("cannot get iface clock: %d\n", ret);
goto error;
}
/* Get AXI clk */
clk_ctx.mc_ce_bus_clk = clk_get(g_ctx.mcd, "bus_clk");
if (IS_ERR(clk_ctx.mc_ce_bus_clk)) {
clk_put(clk_ctx.mc_ce_iface_clk);
clk_put(clk_ctx.mc_ce_core_clk);
ret = PTR_ERR(clk_ctx.mc_ce_bus_clk);
mc_dev_err("cannot get AXI bus clock: %d\n", ret);
goto error;
}
return ret;
error:
clk_ctx.mc_ce_core_clk = NULL;
clk_ctx.mc_ce_iface_clk = NULL;
clk_ctx.mc_ce_bus_clk = NULL;
clk_ctx.mc_ce_core_src_clk = NULL;
return ret;
}
void mc_clock_exit(void)
{
if (clk_ctx.mc_ce_iface_clk)
clk_put(clk_ctx.mc_ce_iface_clk);
if (clk_ctx.mc_ce_core_clk)
clk_put(clk_ctx.mc_ce_core_clk);
if (clk_ctx.mc_ce_bus_clk)
clk_put(clk_ctx.mc_ce_bus_clk);
if (clk_ctx.mc_ce_core_src_clk)
clk_put(clk_ctx.mc_ce_core_src_clk);
}
int mc_clock_enable(void)
{
int rc;
rc = clk_prepare_enable(clk_ctx.mc_ce_core_clk);
if (rc) {
mc_dev_err("cannot enable core clock\n");
goto err_core;
}
rc = clk_prepare_enable(clk_ctx.mc_ce_iface_clk);
if (rc) {
mc_dev_err("cannot enable interface clock\n");
goto err_iface;
}
rc = clk_prepare_enable(clk_ctx.mc_ce_bus_clk);
if (rc) {
mc_dev_err("cannot enable bus clock\n");
goto err_bus;
}
return 0;
err_bus:
clk_disable_unprepare(clk_ctx.mc_ce_iface_clk);
err_iface:
clk_disable_unprepare(clk_ctx.mc_ce_core_clk);
err_core:
return rc;
}
void mc_clock_disable(void)
{
if (clk_ctx.mc_ce_iface_clk)
clk_disable_unprepare(clk_ctx.mc_ce_iface_clk);
if (clk_ctx.mc_ce_core_clk)
clk_disable_unprepare(clk_ctx.mc_ce_core_clk);
if (clk_ctx.mc_ce_bus_clk)
clk_disable_unprepare(clk_ctx.mc_ce_bus_clk);
}
#endif /* MC_CRYPTO_CLOCK_MANAGEMENT */

View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_CLOCK_H_
#define _MC_CLOCK_H_
#include "platform.h" /* MC_CRYPTO_CLOCK_MANAGEMENT */
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
/* Initialize secure crypto clocks */
int mc_clock_init(void);
/* Free secure crypto clocks */
void mc_clock_exit(void);
/* Enable secure crypto clocks */
int mc_clock_enable(void);
/* Disable secure crypto clocks */
void mc_clock_disable(void);
#else /* MC_CRYPTO_CLOCK_MANAGEMENT */
static inline int mc_clock_init(void)
{
return 0;
}
static inline void mc_clock_exit(void)
{
}
static inline int mc_clock_enable(void)
{
return 0;
}
static inline void mc_clock_disable(void)
{
}
#endif /* !MC_CRYPTO_CLOCK_MANAGEMENT */
#endif /* _MC_CLOCK_H_ */

View file

@ -0,0 +1,725 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include "public/mc_user.h"
#include "public/mc_linux_api.h"
#include "mci/mcifc.h"
#include "platform.h" /* MC_FASTCALL_WORKER_THREAD and more */
#include "main.h"
#include "clock.h" /* mc_clock_enable, mc_clock_disable */
#include "fastcall.h"
/* ExySp: Lock for core switch processing */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
struct mutex core_switch_lock;
uint8_t core_status = 0xFF;
static int disable_local_timer;
int mc_timer(void);
void mc_set_schedule_policy(int core);
int __mc_switch_core(int cpu);
#endif
struct fastcall_work {
#ifdef MC_FASTCALL_WORKER_THREAD
struct kthread_work work;
#else
struct work_struct work;
#endif
void *data;
};
/* generic fast call parameters */
union mc_fc_generic {
struct mc_fc_as_in {
u32 cmd;
u32 param[3];
} as_in;
struct {
u32 resp;
u32 ret;
u32 param[2];
} as_out;
};
/* fast call init */
union mc_fc_init {
union mc_fc_generic as_generic;
struct {
u32 cmd;
u32 base;
u32 nq_info;
u32 mcp_info;
} as_in;
struct {
u32 resp;
u32 ret;
u32 flags;
u32 rfu;
} as_out;
};
/* fast call info parameters */
union mc_fc_info {
union mc_fc_generic as_generic;
struct {
u32 cmd;
u32 ext_info_id;
u32 rfu[2];
} as_in;
struct {
u32 resp;
u32 ret;
u32 state;
u32 ext_info;
} as_out;
};
#ifdef TBASE_CORE_SWITCHER
/* fast call switch Core parameters */
union mc_fc_swich_core {
union mc_fc_generic as_generic;
struct {
u32 cmd;
u32 core_id;
u32 rfu[2];
} as_in;
struct {
u32 resp;
u32 ret;
u32 state;
u32 ext_info;
} as_out;
};
#endif
#ifdef MC_FASTCALL_WORKER_THREAD
static struct task_struct *fastcall_thread;
static DEFINE_KTHREAD_WORKER(fastcall_worker);
#endif
/* Structure to log SMC calls */
struct smc_log_entry {
u64 cpu_clk;
struct mc_fc_as_in as_in;
};
#define SMC_LOG_SIZE 256
static struct smc_log_entry smc_log[SMC_LOG_SIZE];
static int smc_log_index;
/*
* _smc() - fast call to MobiCore
*
* @data: pointer to fast call data
*/
static inline int _smc(union mc_fc_generic *mc_fc_generic)
{
if (!mc_fc_generic)
return -EINVAL;
/* Log SMC call */
smc_log[smc_log_index].cpu_clk = local_clock();
smc_log[smc_log_index].as_in = mc_fc_generic->as_in;
if (++smc_log_index >= SMC_LOG_SIZE)
smc_log_index = 0;
#ifdef MC_SMC_FASTCALL
return smc_fastcall(mc_fc_generic, sizeof(*mc_fc_generic));
#else /* MC_SMC_FASTCALL */
{
#ifdef CONFIG_ARM64
/* SMC expect values in x0-x3 */
register u64 reg0 __asm__("x0") = mc_fc_generic->as_in.cmd;
register u64 reg1 __asm__("x1") = mc_fc_generic->as_in.param[0];
register u64 reg2 __asm__("x2") = mc_fc_generic->as_in.param[1];
register u64 reg3 __asm__("x3") = mc_fc_generic->as_in.param[2];
/*
* According to AARCH64 SMC Calling Convention (ARM DEN 0028A),
* section 3.1: registers x4-x17 are unpredictable/scratch
* registers. So we have to make sure that the compiler does
* not allocate any of those registers by letting him know that
* the asm code might clobber them.
*/
__asm__ volatile (
"smc #0\n"
: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
:
: "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
"x12", "x13", "x14", "x15", "x16", "x17"
);
#else /* CONFIG_ARM64 */
/* SMC expect values in r0-r3 */
register u32 reg0 __asm__("r0") = mc_fc_generic->as_in.cmd;
register u32 reg1 __asm__("r1") = mc_fc_generic->as_in.param[0];
register u32 reg2 __asm__("r2") = mc_fc_generic->as_in.param[1];
register u32 reg3 __asm__("r3") = mc_fc_generic->as_in.param[2];
__asm__ volatile (
#ifdef MC_ARCH_EXTENSION_SEC
/*
* This pseudo op is supported and required from
* binutils 2.21 on
*/
".arch_extension sec\n"
#endif /* MC_ARCH_EXTENSION_SEC */
"smc #0\n"
: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
);
#ifdef __ARM_VE_A9X4_QEMU__
/*
* Qemu does not return to the address following the SMC
* instruction so we have to insert several nop instructions to
* workaround this Qemu bug.
*/
__asm__ volatile (
"nop\n"
"nop\n"
"nop\n"
"nop"
);
#endif /* __ARM_VE_A9X4_QEMU__ */
#endif /* !CONFIG_ARM64 */
/* set response */
mc_fc_generic->as_out.resp = reg0;
mc_fc_generic->as_out.ret = reg1;
mc_fc_generic->as_out.param[0] = reg2;
mc_fc_generic->as_out.param[1] = reg3;
}
return 0;
#endif /* !MC_SMC_FASTCALL */
}
#ifdef TBASE_CORE_SWITCHER
static int active_cpu;
#ifdef CONFIG_SECURE_OS_BOOSTER_API
/* ExySp: for sos performance */
void mc_set_schedule_policy(int core)
{
if (core == DEFAULT_BIG_CORE)
disable_local_timer = 1;
else
disable_local_timer = 0;
return;
}
#endif
#ifdef MC_FASTCALL_WORKER_THREAD
#ifdef CONFIG_SECURE_OS_BOOSTER_API
/* ExySp: for sos performance */
static void mc_cpu_offline(int cpu)
{
int i;
mutex_lock(&core_switch_lock);
core_status &= ~(0x1 << cpu);
if (active_cpu != cpu) {
mc_dev_devel("not active CPU, no action taken\n");
mutex_unlock(&core_switch_lock);
return;
}
/* Chose the first online CPU and switch! */
for_each_online_cpu(i) {
if (cpu != i) {
mc_dev_devel("CPU %d is dying, switching to %d\n",
cpu, i);
mc_set_schedule_policy(DEFAULT_LITTLE_CORE);
__mc_switch_core(i);
break;
}
mc_dev_devel("Skipping CPU %d\n", cpu);
}
mutex_unlock(&core_switch_lock);
}
void mc_cpu_online(int cpu)
{
mutex_lock(&core_switch_lock);
core_status |= (0x1 << cpu);
mutex_unlock(&core_switch_lock);
}
#else
static void mc_cpu_offline(int cpu)
{
int i;
if (active_cpu != cpu) {
mc_dev_devel("not active CPU, no action taken\n");
return;
}
/* Chose the first online CPU and switch! */
for_each_online_cpu(i) {
if (cpu != i) {
mc_dev_devel("CPU %d is dying, switching to %d\n",
cpu, i);
mc_switch_core(i);
break;
}
mc_dev_devel("Skipping CPU %d\n", cpu);
}
}
#endif
static int mobicore_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
int cpu = (int)(uintptr_t)hcpu;
switch (action) {
#ifdef CONFIG_SECURE_OS_BOOSTER_API
/* ExySp: for sos performance */
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
mc_cpu_online(cpu);
break;
#endif
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
/* ExySp */
mc_dev_info("Cpu %d is going to hotplug out\n", cpu);
mc_cpu_offline(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
/* ExySp */
mc_dev_info("Cpu %d is hotplug out\n", cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block mobicore_cpu_notifer = {
.notifier_call = mobicore_cpu_callback,
};
#endif /* MC_FASTCALL_WORKER_THREAD */
static cpumask_t mc_exec_core_switch(union mc_fc_generic *mc_fc_generic)
{
cpumask_t cpu;
int new_cpu;
u32 cpu_id[] = CPU_IDS;
new_cpu = mc_fc_generic->as_in.param[0];
mc_fc_generic->as_in.param[0] = cpu_id[mc_fc_generic->as_in.param[0]];
if (_smc(mc_fc_generic) != 0 || mc_fc_generic->as_out.ret != 0) {
/* ExySp: print info */
mc_dev_info("CoreSwap failed %d -> %d (cpu %d still active)\n",
raw_smp_processor_id(),
mc_fc_generic->as_in.param[0],
raw_smp_processor_id());
} else {
active_cpu = new_cpu;
/* ExySp: print info */
mc_dev_info("CoreSwap ok %d -> %d\n",
raw_smp_processor_id(), active_cpu);
}
cpumask_clear(&cpu);
cpumask_set_cpu(active_cpu, &cpu);
return cpu;
}
static ssize_t debug_coreswitch_write(struct file *file,
const char __user *buffer,
size_t buffer_len, loff_t *x)
{
int new_cpu = 0;
/* Invalid data, nothing to do */
if (buffer_len < 1)
return -EINVAL;
if (kstrtouint_from_user(buffer, buffer_len, 0, &new_cpu))
return -EINVAL;
mc_dev_devel("Set active cpu to %d\n", new_cpu);
mc_switch_core(new_cpu);
return buffer_len;
}
static const struct file_operations mc_debug_coreswitch_ops = {
.write = debug_coreswitch_write,
};
#else /* TBASE_CORE_SWITCHER */
static inline cpumask_t mc_exec_core_switch(union mc_fc_generic *mc_fc_generic)
{
return CPU_MASK_CPU0;
}
#endif /* !TBASE_CORE_SWITCHER */
#ifdef MC_FASTCALL_WORKER_THREAD
static void fastcall_work_func(struct kthread_work *work)
#else
static void fastcall_work_func(struct work_struct *work)
#endif
{
struct fastcall_work *fc_work =
container_of(work, struct fastcall_work, work);
union mc_fc_generic *mc_fc_generic = fc_work->data;
/* ExySp: for sos performance */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
int irq_check_cnt = 0;
struct irq_desc *desc = irq_to_desc(MC_INTR_LOCAL_TIMER);
#endif
if (!mc_fc_generic)
return;
mc_clock_enable();
if (mc_fc_generic->as_in.cmd == MC_FC_SWAP_CPU) {
#ifdef MC_FASTCALL_WORKER_THREAD
cpumask_t new_msk = mc_exec_core_switch(mc_fc_generic);
set_cpus_allowed(fastcall_thread, new_msk);
#else
mc_exec_core_switch(mc_fc_generic);
#endif
} else {
/* ExySp: for sos performance */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
if (active_cpu == DEFAULT_BIG_CORE && disable_local_timer) {
irq_check_cnt++;
disable_irq(MC_INTR_LOCAL_TIMER);
mc_timer();
}
#endif
_smc(mc_fc_generic);
/* ExySp: for sos performance */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
if (irq_check_cnt) {
if (desc->depth != 0)
enable_irq(MC_INTR_LOCAL_TIMER);
}
#endif
}
mc_clock_disable();
}
static bool mc_fastcall(void *data)
{
#ifdef MC_FASTCALL_WORKER_THREAD
struct fastcall_work fc_work = {
KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
.data = data,
};
if (!queue_kthread_work(&fastcall_worker, &fc_work.work))
return false;
/* If work is queued or executing, wait for it to finish execution */
flush_kthread_work(&fc_work.work);
#else
struct fastcall_work fc_work = {
.data = data,
};
INIT_WORK_ONSTACK(&fc_work.work, fastcall_work_func);
if (!schedule_work_on(0, &fc_work.work))
return false;
flush_work(&fc_work.work);
#endif
return true;
}
int mc_fastcall_init(void)
{
int ret = mc_clock_init();
if (ret)
return ret;
#ifdef MC_FASTCALL_WORKER_THREAD
fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
"mc_fastcall");
if (IS_ERR(fastcall_thread)) {
ret = PTR_ERR(fastcall_thread);
fastcall_thread = NULL;
mc_dev_err("cannot create fastcall wq: %d\n", ret);
return ret;
}
/* this thread MUST run on CPU 0 at startup */
set_cpus_allowed(fastcall_thread, CPU_MASK_CPU0);
wake_up_process(fastcall_thread);
#ifdef TBASE_CORE_SWITCHER
ret = register_cpu_notifier(&mobicore_cpu_notifer);
/* Create debugfs structs entry */
debugfs_create_file("active_cpu", 0600, g_ctx.debug_dir, NULL,
&mc_debug_coreswitch_ops);
#endif
#endif /* MC_FASTCALL_WORKER_THREAD */
/* ExySp: init lock for core switch processing */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
mutex_init(&core_switch_lock);
#endif
return ret;
}
void mc_fastcall_exit(void)
{
#ifdef MC_FASTCALL_WORKER_THREAD
if (!IS_ERR_OR_NULL(fastcall_thread)) {
#ifdef TBASE_CORE_SWITCHER
unregister_cpu_notifier(&mobicore_cpu_notifer);
#endif
kthread_stop(fastcall_thread);
fastcall_thread = NULL;
}
#endif /* MC_FASTCALL_WORKER_THREAD */
mc_clock_exit();
}
/*
* convert fast call return code to linux driver module error code
*/
static int convert_fc_ret(u32 ret)
{
switch (ret) {
case MC_FC_RET_OK:
return 0;
case MC_FC_RET_ERR_INVALID:
return -EINVAL;
case MC_FC_RET_ERR_ALREADY_INITIALIZED:
return -EBUSY;
default:
return -EFAULT;
}
}
int mc_fc_init(uintptr_t base_pa, ptrdiff_t off, size_t q_len, size_t buf_len)
{
#ifdef CONFIG_ARM64
u32 base_high = (u32)(base_pa >> 32);
#else
u32 base_high = 0;
#endif
union mc_fc_init fc_init;
/* Call the INIT fastcall to setup MobiCore initialization */
memset(&fc_init, 0, sizeof(fc_init));
fc_init.as_in.cmd = MC_FC_INIT;
/* base address of mci buffer PAGE_SIZE (default is 4KB) aligned */
fc_init.as_in.base = (u32)base_pa;
/* notification buffer start/length [16:16] [start, length] */
fc_init.as_in.nq_info =
(u32)(((base_high & 0xFFFF) << 16) | (q_len & 0xFFFF));
/* mcp buffer start/length [16:16] [start, length] */
fc_init.as_in.mcp_info = (u32)((off << 16) | (buf_len & 0xFFFF));
mc_dev_devel("cmd=%d, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x\n",
fc_init.as_in.cmd, fc_init.as_in.base,
fc_init.as_in.nq_info, fc_init.as_in.mcp_info);
mc_fastcall(&fc_init.as_generic);
mc_dev_devel("out cmd=0x%08x, ret=0x%08x\n", fc_init.as_out.resp,
fc_init.as_out.ret);
if (fc_init.as_out.flags & MC_FC_INIT_FLAG_LPAE)
g_ctx.f_lpae = true;
return convert_fc_ret(fc_init.as_out.ret);
}
int mc_fc_info(u32 ext_info_id, u32 *state, u32 *ext_info)
{
union mc_fc_info fc_info;
int ret = 0;
memset(&fc_info, 0, sizeof(fc_info));
fc_info.as_in.cmd = MC_FC_INFO;
fc_info.as_in.ext_info_id = ext_info_id;
mc_fastcall(&fc_info.as_generic);
ret = convert_fc_ret(fc_info.as_out.ret);
if (ret) {
if (state)
*state = MC_STATUS_NOT_INITIALIZED;
if (ext_info)
*ext_info = 0;
mc_dev_err("code %d for idx %d\n", ret, ext_info_id);
} else {
if (state)
*state = fc_info.as_out.state;
if (ext_info)
*ext_info = fc_info.as_out.ext_info;
}
return ret;
}
int mc_fc_mem_trace(phys_addr_t buffer, u32 size)
{
union mc_fc_generic mc_fc_generic;
memset(&mc_fc_generic, 0, sizeof(mc_fc_generic));
mc_fc_generic.as_in.cmd = MC_FC_MEM_TRACE;
mc_fc_generic.as_in.param[0] = (u32)buffer;
#ifdef CONFIG_ARM64
mc_fc_generic.as_in.param[1] = (u32)(buffer >> 32);
#endif
mc_fc_generic.as_in.param[2] = size;
mc_fastcall(&mc_fc_generic);
return convert_fc_ret(mc_fc_generic.as_out.ret);
}
int mc_fc_nsiq(void)
{
union mc_fc_generic fc;
int ret;
memset(&fc, 0, sizeof(fc));
fc.as_in.cmd = MC_SMC_N_SIQ;
mc_fastcall(&fc);
ret = convert_fc_ret(fc.as_out.ret);
if (ret)
mc_dev_err("failed: %d\n", ret);
return ret;
}
int mc_fc_yield(void)
{
union mc_fc_generic fc;
int ret;
memset(&fc, 0, sizeof(fc));
fc.as_in.cmd = MC_SMC_N_YIELD;
mc_fastcall(&fc);
ret = convert_fc_ret(fc.as_out.ret);
if (ret)
mc_dev_err("failed: %d\n", ret);
return ret;
}
static int show_smc_log_entry(struct kasnprintf_buf *buf,
struct smc_log_entry *entry)
{
return kasnprintf(buf, "%20llu %10d 0x%08x 0x%08x 0x%08x\n",
entry->cpu_clk, (s32)entry->as_in.cmd,
entry->as_in.param[0], entry->as_in.param[1],
entry->as_in.param[2]);
}
/*
* Dump SMC log circular buffer, starting from oldest command. It is assumed
* nothing goes in any more at this point.
*/
int mc_fastcall_debug_smclog(struct kasnprintf_buf *buf)
{
int i, ret = 0;
ret = kasnprintf(buf, "%20s %10s %-10s %-10s %-10s\n",
"CPU clock", "command", "param1", "param2", "param3");
if (ret < 0)
return ret;
if (smc_log[smc_log_index].cpu_clk)
/* Buffer has wrapped around, dump end (oldest records) */
for (i = smc_log_index; i < SMC_LOG_SIZE; i++) {
ret = show_smc_log_entry(buf, &smc_log[i]);
if (ret < 0)
return ret;
}
/* Dump first records */
for (i = 0; i < smc_log_index; i++) {
ret = show_smc_log_entry(buf, &smc_log[i]);
if (ret < 0)
return ret;
}
return ret;
}
#ifdef TBASE_CORE_SWITCHER
int mc_active_core(void)
{
return active_cpu;
}
/* ExySp: for sos performance */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
int __mc_switch_core(int cpu)
#else
int mc_switch_core(int cpu)
#endif
{
s32 ret = 0;
union mc_fc_swich_core fc_switch_core;
if (!cpu_online(cpu))
return 1;
memset(&fc_switch_core, 0, sizeof(fc_switch_core));
fc_switch_core.as_in.cmd = MC_FC_SWAP_CPU;
if (cpu < COUNT_OF_CPUS)
fc_switch_core.as_in.core_id = cpu;
else
fc_switch_core.as_in.core_id = 0;
mc_dev_devel("<- cmd=0x%08x, core_id=0x%08x\n",
fc_switch_core.as_in.cmd, fc_switch_core.as_in.core_id);
/* ExySp: for sos performance */
mc_dev_info("<- cpu=0x%08x, active_cpu=0x%08x\n",
cpu, active_cpu);
mc_fastcall(&fc_switch_core.as_generic);
ret = convert_fc_ret(fc_switch_core.as_out.ret);
mc_dev_devel("exit with %d/0x%08X\n", ret, ret);
return ret;
}
/* ExySp: for sos performance */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
int mc_switch_core(int cpu)
{
int ret;
mutex_lock(&core_switch_lock);
if (!(core_status & (0x1 << cpu))){
mc_dev_devel("Core status... core #%d is off line\n", cpu);
mutex_unlock(&core_switch_lock);
return 1;
}
ret = __mc_switch_core(cpu);
mutex_unlock(&core_switch_lock);
return ret;
}
#endif
#endif

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _TBASE_FASTCALL_H_
#define _TBASE_FASTCALL_H_
/* Use the arch_extension sec pseudo op before switching to secure world */
#if defined(__GNUC__) && \
defined(__GNUC_MINOR__) && \
defined(__GNUC_PATCHLEVEL__) && \
((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)) \
>= 40502
#ifndef CONFIG_ARM64
#define MC_ARCH_EXTENSION_SEC
#endif
#endif
int mc_fc_init(uintptr_t base_pa, ptrdiff_t off, size_t q_len, size_t buf_len);
int mc_fc_info(u32 ext_info_id, u32 *state, u32 *ext_info);
int mc_fc_mem_trace(phys_addr_t buffer, u32 size);
int mc_fc_nsiq(void);
int mc_fc_yield(void);
int mc_fastcall_init(void);
void mc_fastcall_exit(void);
int mc_fastcall_debug_smclog(struct kasnprintf_buf *buf);
#endif /* _TBASE_FASTCALL_H_ */

View file

@ -0,0 +1,256 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include "main.h"
#include "fastcall.h"
#include "logging.h"
/* Supported log buffer version */
#define MC_LOG_VERSION 2
/* Default length of the log ring buffer 256KiB */
#define LOG_BUF_ORDER 6
/* Max Len of a log line for printing */
#define LOG_LINE_SIZE 256
/* Definitions for log version 2 */
#define LOG_TYPE_MASK (0x0007)
#define LOG_TYPE_CHAR 0
#define LOG_TYPE_INTEGER 1
/* Field length */
#define LOG_LENGTH_MASK (0x00F8)
#define LOG_LENGTH_SHIFT 3
/* Extra attributes */
#define LOG_EOL (0x0100)
#define LOG_INTEGER_DECIMAL (0x0200)
#define LOG_INTEGER_SIGNED (0x0400)
struct mc_logmsg {
u16 ctrl; /* Type and format of data */
u16 source; /* Unique value for each event source */
u32 log_data; /* Value, if any */
};
/* MobiCore internal trace buffer structure. */
struct mc_trace_buf {
u32 version; /* version of trace buffer */
u32 length; /* length of buff */
u32 head; /* last write position */
u8 buff[]; /* start of the log buffer */
};
static struct logging_ctx {
struct work_struct work;
union {
struct mc_trace_buf *trace_buf; /* Circular log buffer */
unsigned long trace_page;
};
bool buffer_is_shared; /* Log buffer cannot be freed */
u32 tail; /* MobiCore log read position */
u32 line_len; /* Log Line buffer current length */
int thread_err;
u16 prev_source; /* Previous Log source */
char line[LOG_LINE_SIZE]; /* Log Line buffer */
u32 enabled; /* Log can be disabled via debugfs */
bool dead;
} log_ctx;
static inline void log_eol(u16 source)
{
if (!strnlen(log_ctx.line, LOG_LINE_SIZE)) {
/* In case a TA tries to print a 0x0 */
log_ctx.line_len = 0;
return;
}
if (log_ctx.prev_source)
/* MobiCore Userspace */
dev_info(g_ctx.mcd, "%03x|%s\n", log_ctx.prev_source,
log_ctx.line);
else
/* MobiCore kernel */
dev_info(g_ctx.mcd, "%s\n", log_ctx.line);
log_ctx.line_len = 0;
log_ctx.line[0] = 0;
}
/*
* Collect chars in log_ctx.line buffer and output the buffer when it is full.
* No locking needed because only "mobicore_log" thread updates this buffer.
*/
static inline void log_char(char ch, u16 source)
{
if (ch == '\n' || ch == '\r') {
log_eol(source);
return;
}
if ((log_ctx.line_len >= (LOG_LINE_SIZE - 1)) ||
(source != log_ctx.prev_source))
log_eol(source);
log_ctx.line[log_ctx.line_len++] = ch;
log_ctx.line[log_ctx.line_len] = 0;
log_ctx.prev_source = source;
}
static inline void log_string(u32 ch, u16 source)
{
while (ch) {
log_char(ch & 0xFF, source);
ch >>= 8;
}
}
static inline void log_number(u32 format, u32 value, u16 source)
{
int width = (format & LOG_LENGTH_MASK) >> LOG_LENGTH_SHIFT;
char fmt[16];
char buffer[32];
const char *reader = buffer;
if (format & LOG_INTEGER_DECIMAL)
if (format & LOG_INTEGER_SIGNED)
snprintf(fmt, sizeof(fmt), "%%%ud", width);
else
snprintf(fmt, sizeof(fmt), "%%%uu", width);
else
snprintf(fmt, sizeof(fmt), "%%0%ux", width);
snprintf(buffer, sizeof(buffer), fmt, value);
while (*reader)
log_char(*reader++, source);
}
static inline int log_msg(void *data)
{
struct mc_logmsg *msg = (struct mc_logmsg *)data;
int log_type = msg->ctrl & LOG_TYPE_MASK;
switch (log_type) {
case LOG_TYPE_CHAR:
log_string(msg->log_data, msg->source);
break;
case LOG_TYPE_INTEGER:
log_number(msg->ctrl, msg->log_data, msg->source);
break;
}
if (msg->ctrl & LOG_EOL)
log_eol(msg->source);
return sizeof(*msg);
}
static void log_worker(struct work_struct *work)
{
static DEFINE_MUTEX(local_mutex);
mutex_lock(&local_mutex);
while (log_ctx.trace_buf->head != log_ctx.tail) {
if (log_ctx.trace_buf->version != MC_LOG_VERSION) {
mc_dev_err("Bad log data v%d (exp. v%d), stop\n",
log_ctx.trace_buf->version, MC_LOG_VERSION);
log_ctx.dead = true;
break;
}
log_ctx.tail += log_msg(&log_ctx.trace_buf->buff[log_ctx.tail]);
/* Wrap over if no space left for a complete message */
if ((log_ctx.tail + sizeof(struct mc_logmsg)) >
log_ctx.trace_buf->length)
log_ctx.tail = 0;
}
mutex_unlock(&local_mutex);
}
/*
* Wake up the log reader thread
* This should be called from the places where calls into MobiCore have
* generated some logs(eg, yield, SIQ...)
*/
void mc_logging_run(void)
{
if (log_ctx.enabled && !log_ctx.dead &&
(log_ctx.trace_buf->head != log_ctx.tail))
schedule_work(&log_ctx.work);
}
int mc_logging_start(void)
{
int ret = mc_fc_mem_trace(virt_to_phys((void *)(log_ctx.trace_page)),
BIT(LOG_BUF_ORDER) * PAGE_SIZE);
if (ret) {
mc_dev_err("shared traces setup failed\n");
return ret;
}
log_ctx.buffer_is_shared = true;
mc_dev_devel("fc_log version %u\n", log_ctx.trace_buf->version);
mc_logging_run();
return 0;
}
void mc_logging_stop(void)
{
if (!mc_fc_mem_trace(0, 0))
log_ctx.buffer_is_shared = false;
mc_logging_run();
flush_work(&log_ctx.work);
}
/*
* Setup MobiCore kernel log. It assumes it's running on CORE 0!
* The fastcall will complain is that is not the case!
*/
int mc_logging_init(void)
{
/*
* We are going to map this buffer into virtual address space in SWd.
* To reduce complexity there, we use a contiguous buffer.
*/
log_ctx.trace_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
LOG_BUF_ORDER);
if (!log_ctx.trace_page)
return -ENOMEM;
INIT_WORK(&log_ctx.work, log_worker);
log_ctx.enabled = true;
debugfs_create_bool("swd_debug", 0600, g_ctx.debug_dir,
&log_ctx.enabled);
return 0;
}
void mc_logging_exit(void)
{
/*
* This is not racey as the only caller for mc_logging_run is the
* scheduler which gets stopped before us, and long before we exit.
*/
if (!log_ctx.buffer_is_shared)
free_pages(log_ctx.trace_page, LOG_BUF_ORDER);
else
mc_dev_err("log buffer unregister not supported\n");
}

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_LOGGING_H_
#define _MC_LOGGING_H_
void mc_logging_run(void);
int mc_logging_init(void);
void mc_logging_exit(void);
int mc_logging_start(void);
void mc_logging_stop(void);
#endif /* _MC_LOGGING_H_ */

View file

@ -0,0 +1,679 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/debugfs.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
#include "public/mc_user.h"
#include "public/mc_admin.h" /* MC_ADMIN_DEVNODE */
#include "platform.h" /* MC_PM_RUNTIME */
#include "main.h"
#include "fastcall.h"
#include "arm.h"
#include "mmu.h"
#include "scheduler.h"
#include "pm.h"
#include "logging.h"
#include "admin.h"
#include "user.h"
#include "mcp.h"
#include "client.h"
#include "build_tag.h"
/* Define a MobiCore device structure for use with dev_debug() etc */
static struct device_driver driver = {
.name = "Trustonic"
};
static struct device device = {
.driver = &driver
};
struct mc_device_ctx g_ctx = {
.mcd = &device
};
static struct main_ctx {
#ifdef MC_PM_RUNTIME
/* Whether hibernation succeeded */
bool did_hibernate;
/* Reboot notifications */
struct notifier_block reboot_notifier;
/* PM notifications */
struct notifier_block pm_notifier;
#endif
/* Devices */
dev_t device;
struct class *class;
/* Admin device */
struct cdev admin_cdev;
/* User device */
dev_t user_dev;
struct cdev user_cdev;
/* Debug counters */
struct mutex struct_counters_buf_mutex;
char struct_counters_buf[256];
int struct_counters_buf_len;
} main_ctx;
static int mobicore_start(void);
static void mobicore_stop(void);
int kasnprintf(struct kasnprintf_buf *buf, const char *fmt, ...)
{
va_list args;
int max_size = buf->size - buf->off;
int i;
va_start(args, fmt);
i = vsnprintf(buf->buf + buf->off, max_size, fmt, args);
if (i >= max_size) {
int new_size = PAGE_ALIGN(buf->size + i + 1);
char *new_buf = krealloc(buf->buf, new_size, buf->gfp);
if (!new_buf) {
i = -ENOMEM;
} else {
buf->buf = new_buf;
buf->size = new_size;
max_size = buf->size - buf->off;
i = vsnprintf(buf->buf + buf->off, max_size, fmt, args);
}
}
if (i > 0)
buf->off += i;
va_end(args);
return i;
}
ssize_t debug_generic_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos,
int (*function)(struct kasnprintf_buf *buf))
{
/* Add/update buffer */
if (!file->private_data || !*ppos) {
struct kasnprintf_buf *buf, *old_buf;
int ret;
buf = kzalloc(GFP_KERNEL, sizeof(*buf));
if (!buf)
return -ENOMEM;
buf->gfp = GFP_KERNEL;
ret = function(buf);
if (ret < 0) {
kfree(buf);
return ret;
}
old_buf = file->private_data;
file->private_data = buf;
kfree(old_buf);
}
if (file->private_data) {
struct kasnprintf_buf *buf = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos, buf->buf,
buf->off);
}
return 0;
}
int debug_generic_release(struct inode *inode, struct file *file)
{
struct kasnprintf_buf *buf = file->private_data;
kfree(buf->buf);
kfree(buf);
return 0;
}
static ssize_t debug_structs_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
return debug_generic_read(file, user_buf, count, ppos,
clients_debug_structs);
}
static const struct file_operations mc_debug_structs_ops = {
.read = debug_structs_read,
.llseek = default_llseek,
.release = debug_generic_release,
};
static ssize_t debug_struct_counters_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
if (!*ppos) {
int ret;
mutex_lock(&main_ctx.struct_counters_buf_mutex);
ret = snprintf(main_ctx.struct_counters_buf,
sizeof(main_ctx.struct_counters_buf),
"clients: %d\n"
"cbufs: %d\n"
"sessions: %d\n"
"wsms: %d\n"
"mmus: %d\n"
"maps: %d\n",
atomic_read(&g_ctx.c_clients),
atomic_read(&g_ctx.c_cbufs),
atomic_read(&g_ctx.c_sessions),
atomic_read(&g_ctx.c_wsms),
atomic_read(&g_ctx.c_mmus),
atomic_read(&g_ctx.c_maps));
mutex_unlock(&main_ctx.struct_counters_buf_mutex);
if (ret > 0)
main_ctx.struct_counters_buf_len = ret;
}
return simple_read_from_buffer(user_buf, count, ppos,
main_ctx.struct_counters_buf,
main_ctx.struct_counters_buf_len);
}
static const struct file_operations mc_debug_struct_counters_ops = {
.read = debug_struct_counters_read,
.llseek = default_llseek,
};
static inline int device_user_init(void)
{
struct device *dev;
int ret = 0;
main_ctx.user_dev = MKDEV(MAJOR(main_ctx.device), 1);
/* Create the user node */
mc_user_init(&main_ctx.user_cdev);
ret = cdev_add(&main_ctx.user_cdev, main_ctx.user_dev, 1);
if (ret) {
mc_dev_err("user cdev_add failed\n");
return ret;
}
main_ctx.user_cdev.owner = THIS_MODULE;
dev = device_create(main_ctx.class, NULL, main_ctx.user_dev, NULL,
MC_USER_DEVNODE);
if (IS_ERR(dev)) {
cdev_del(&main_ctx.user_cdev);
mc_dev_err("user device_create failed\n");
return PTR_ERR(dev);
}
/* Create debugfs structs entry */
debugfs_create_file("structs", 0400, g_ctx.debug_dir, NULL,
&mc_debug_structs_ops);
return 0;
}
static inline void device_user_exit(void)
{
device_destroy(main_ctx.class, main_ctx.user_dev);
cdev_del(&main_ctx.user_cdev);
}
#ifdef MC_PM_RUNTIME
static int reboot_notifier(struct notifier_block *nb, unsigned long event,
void *dummy)
{
switch (event) {
case SYS_HALT:
case SYS_POWER_OFF:
main_ctx.did_hibernate = true;
break;
}
return 0;
}
static int suspend_notifier(struct notifier_block *nb, unsigned long event,
void *dummy)
{
int ret = 0;
main_ctx.did_hibernate = false;
switch (event) {
case PM_SUSPEND_PREPARE:
return mc_scheduler_suspend();
case PM_POST_SUSPEND:
return mc_scheduler_resume();
case PM_HIBERNATION_PREPARE:
/* Try to stop the TEE nicely (ignore failure) */
mc_scheduler_suspend();
/* Make sure the TEE cannot run anymore */
mc_scheduler_stop();
/* Flush log buffer */
mc_logging_run();
break;
case PM_POST_HIBERNATION:
if (main_ctx.did_hibernate) {
/* Really did hibernate */
clients_kill_sessions();
return mobicore_start();
}
/* Did not hibernate, just restart the TEE */
ret = mc_scheduler_start();
if (!ret)
ret = mc_scheduler_resume();
}
return ret;
}
#endif /* MC_PM_RUNTIME */
static int mobicore_start(void)
{
struct mc_version_info version_info;
bool dynamic_lpae = false;
int ret;
ret = mc_logging_start();
if (ret) {
mc_dev_err("Log start failed\n");
goto err_log;
}
ret = mcp_start();
if (ret) {
mc_dev_err("TEE start failed\n");
goto err_mcp;
}
ret = mc_scheduler_start();
if (ret) {
mc_dev_err("Scheduler start failed\n");
goto err_sched;
}
ret = mc_pm_start();
if (ret) {
mc_dev_err("Power Management start failed\n");
goto err_pm;
}
/* Must be called before creating the user device node to avoid race */
ret = mcp_get_version(&version_info);
if (ret)
goto err_mcp_cmd;
/* CMP version is meaningless in this case and is thus not printed */
mc_dev_info("\n"
" product_id = %s\n"
" version_mci = 0x%08x\n"
" version_so = 0x%08x\n"
" version_mclf = 0x%08x\n"
" version_container = 0x%08x\n"
" version_mc_config = 0x%08x\n"
" version_tl_api = 0x%08x\n"
" version_dr_api = 0x%08x\n"
" version_nwd = 0x%08x\n",
version_info.product_id,
version_info.version_mci,
version_info.version_so,
version_info.version_mclf,
version_info.version_container,
version_info.version_mc_config,
version_info.version_tl_api,
version_info.version_dr_api,
version_info.version_nwd);
if (MC_VERSION_MAJOR(version_info.version_mci) > 1) {
mc_dev_err("MCI too recent for this driver");
goto err_version;
}
if ((MC_VERSION_MAJOR(version_info.version_mci) == 0) &&
(MC_VERSION_MINOR(version_info.version_mci) < 6)) {
mc_dev_err("MCI too old for this driver");
goto err_version;
}
/* Determine which features are supported */
switch (version_info.version_mci) {
case MC_VERSION(1, 4): /* 310 */
dynamic_lpae = true;
/* Fall through */
case MC_VERSION(1, 3):
g_ctx.f_time = true;
/* Fall through */
case MC_VERSION(1, 2):
g_ctx.f_client_login = true;
/* Fall through */
case MC_VERSION(1, 1):
g_ctx.f_multimap = true;
/* Fall through */
case MC_VERSION(1, 0): /* 302 */
g_ctx.f_mem_ext = true;
g_ctx.f_ta_auth = true;
/* Fall through */
case MC_VERSION(0, 7):
g_ctx.f_timeout = true;
/* Fall through */
case MC_VERSION(0, 6): /* 301 */
break;
}
#ifdef CONFIG_TRUSTONIC_TEE_LPAE
if (!dynamic_lpae)
g_ctx.f_lpae = true;
#endif
mc_dev_info("SWd uses %sLPAE MMU table format\n",
g_ctx.f_lpae ? "" : "non-");
#ifdef MC_PM_RUNTIME
main_ctx.reboot_notifier.notifier_call = reboot_notifier;
ret = register_reboot_notifier(&main_ctx.reboot_notifier);
if (ret) {
mc_dev_err("reboot notifier register failed\n");
goto err_pm_notif;
}
main_ctx.pm_notifier.notifier_call = suspend_notifier;
ret = register_pm_notifier(&main_ctx.pm_notifier);
if (ret) {
unregister_reboot_notifier(&main_ctx.reboot_notifier);
mc_dev_err("PM notifier register failed\n");
goto err_pm_notif;
}
#endif
ret = device_user_init();
if (ret)
goto err_create_dev_user;
return 0;
err_create_dev_user:
#ifdef MC_PM_RUNTIME
unregister_reboot_notifier(&main_ctx.reboot_notifier);
unregister_pm_notifier(&main_ctx.pm_notifier);
err_pm_notif:
#endif
err_version:
err_mcp_cmd:
mc_pm_stop();
err_pm:
mc_scheduler_stop();
err_sched:
mcp_stop();
err_mcp:
mc_logging_stop();
err_log:
return ret;
}
static void mobicore_stop(void)
{
device_user_exit();
#ifdef MC_PM_RUNTIME
unregister_reboot_notifier(&main_ctx.reboot_notifier);
unregister_pm_notifier(&main_ctx.pm_notifier);
#endif
mc_pm_stop();
mc_scheduler_stop();
mc_logging_stop();
mcp_stop();
}
static ssize_t debug_sessions_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
return debug_generic_read(file, user_buf, count, ppos,
mcp_debug_sessions);
}
static const struct file_operations mc_debug_sessions_ops = {
.read = debug_sessions_read,
.llseek = default_llseek,
.release = debug_generic_release,
};
static ssize_t debug_mcpcmds_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
return debug_generic_read(file, user_buf, count, ppos,
mcp_debug_mcpcmds);
}
static const struct file_operations mc_debug_mcpcmds_ops = {
.read = debug_mcpcmds_read,
.llseek = default_llseek,
.release = debug_generic_release,
};
static inline int device_admin_init(void)
{
struct device *dev;
int ret = 0;
ret = alloc_chrdev_region(&main_ctx.device, 0, 2, "trustonic_tee");
if (ret) {
mc_dev_err("alloc_chrdev_region failed\n");
return ret;
}
main_ctx.class = class_create(THIS_MODULE, "trustonic_tee");
if (IS_ERR(main_ctx.class)) {
mc_dev_err("class_create failed\n");
ret = PTR_ERR(main_ctx.class);
goto err_class;
}
/* Create the ADMIN node */
ret = mc_admin_init(&main_ctx.admin_cdev, mobicore_start,
mobicore_stop);
if (ret)
goto err_init;
ret = cdev_add(&main_ctx.admin_cdev, main_ctx.device, 1);
if (ret) {
mc_dev_err("admin cdev_add failed\n");
goto err_cdev;
}
main_ctx.admin_cdev.owner = THIS_MODULE;
dev = device_create(main_ctx.class, NULL, main_ctx.device, NULL,
MC_ADMIN_DEVNODE);
if (IS_ERR(dev)) {
mc_dev_err("admin device_create failed\n");
ret = PTR_ERR(dev);
goto err_device;
}
/* Create debugfs sessions and MCP commands entries */
debugfs_create_file("sessions", 0400, g_ctx.debug_dir, NULL,
&mc_debug_sessions_ops);
debugfs_create_file("last_mcp_commands", 0400, g_ctx.debug_dir, NULL,
&mc_debug_mcpcmds_ops);
return 0;
err_device:
cdev_del(&main_ctx.admin_cdev);
err_cdev:
mc_admin_exit();
err_init:
class_destroy(main_ctx.class);
err_class:
unregister_chrdev_region(main_ctx.device, 2);
return ret;
}
static inline void device_admin_exit(void)
{
device_destroy(main_ctx.class, main_ctx.device);
cdev_del(&main_ctx.admin_cdev);
mc_admin_exit();
class_destroy(main_ctx.class);
unregister_chrdev_region(main_ctx.device, 2);
}
/*
* This function is called by the kernel during startup or by a insmod command.
* This device is installed and registered as cdev, then interrupt and
* queue handling is set up
*/
static int mobicore_probe(struct platform_device *pdev)
{
int err = 0;
if (pdev)
g_ctx.mcd->of_node = pdev->dev.of_node;
#ifdef MOBICORE_COMPONENT_BUILD_TAG
mc_dev_info("MobiCore %s\n", MOBICORE_COMPONENT_BUILD_TAG);
#endif
/* Hardware does not support ARM TrustZone -> Cannot continue! */
if (!has_security_extensions()) {
mc_dev_err("Hardware doesn't support ARM TrustZone!\n");
return -ENODEV;
}
/* Running in secure mode -> Cannot load the driver! */
if (is_secure_mode()) {
mc_dev_err("Running in secure MODE!\n");
return -ENODEV;
}
/* Make sure we can create debugfs entries */
g_ctx.debug_dir = debugfs_create_dir("trustonic_tee", NULL);
/* Initialize debug counters */
atomic_set(&g_ctx.c_clients, 0);
atomic_set(&g_ctx.c_cbufs, 0);
atomic_set(&g_ctx.c_sessions, 0);
atomic_set(&g_ctx.c_wsms, 0);
atomic_set(&g_ctx.c_mmus, 0);
atomic_set(&g_ctx.c_maps, 0);
mutex_init(&main_ctx.struct_counters_buf_mutex);
/* Create debugfs info entry */
debugfs_create_file("structs_counters", 0400, g_ctx.debug_dir, NULL,
&mc_debug_struct_counters_ops);
/* Initialize common API layer */
client_init();
/* Initialize plenty of nice features */
err = mc_fastcall_init();
if (err) {
mc_dev_err("Fastcall support init failed!\n");
goto fail_fastcall_init;
}
err = mcp_init();
if (err) {
mc_dev_err("MCP init failed!\n");
goto fail_mcp_init;
}
err = mc_logging_init();
if (err) {
mc_dev_err("Log init failed!\n");
goto fail_log_init;
}
err = mc_scheduler_init();
if (err) {
mc_dev_err("Scheduler init failed!\n");
goto fail_mc_device_sched_init;
}
/*
* Create admin dev so that daemon can already communicate with
* the driver
*/
err = device_admin_init();
if (err)
goto fail_creat_dev_admin;
return 0;
fail_creat_dev_admin:
mc_scheduler_exit();
fail_mc_device_sched_init:
mc_logging_exit();
fail_log_init:
mcp_exit();
fail_mcp_init:
mc_fastcall_exit();
fail_fastcall_init:
debugfs_remove_recursive(g_ctx.debug_dir);
return err;
}
#ifdef MC_DEVICE_PROPNAME
static const struct of_device_id of_match_table[] = {
{ .compatible = MC_DEVICE_PROPNAME },
{ }
};
static struct platform_driver mc_plat_driver = {
.probe = mobicore_probe,
.driver = {
.name = "mcd",
.owner = THIS_MODULE,
.of_match_table = of_match_table,
}
};
#endif /* MC_DEVICE_PROPNAME */
static int __init mobicore_init(void)
{
dev_set_name(g_ctx.mcd, "TEE");
/*
* Do not remove or change the following trace.
* The string "MobiCore" is used to detect if the TEE is in of the image
*/
mc_dev_info("MobiCore mcDrvModuleApi version is %d.%d\n",
MCDRVMODULEAPI_VERSION_MAJOR, MCDRVMODULEAPI_VERSION_MINOR);
#ifdef MC_DEVICE_PROPNAME
return platform_driver_register(&mc_plat_driver);
#else
return mobicore_probe(NULL);
#endif
}
static void __exit mobicore_exit(void)
{
#ifdef MC_DEVICE_PROPNAME
platform_driver_unregister(&mc_plat_driver);
#endif
device_admin_exit();
mc_scheduler_exit();
mc_logging_exit();
mcp_exit();
mc_fastcall_exit();
debugfs_remove_recursive(g_ctx.debug_dir);
}
module_init(mobicore_init);
module_exit(mobicore_exit);
MODULE_AUTHOR("Trustonic Limited");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MobiCore driver");

View file

@ -0,0 +1,92 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_MAIN_H_
#define _MC_MAIN_H_
#include <linux/slab.h> /* gfp_t */
#include <linux/fs.h> /* struct inode and struct file */
#define MC_VERSION(major, minor) \
(((major & 0x0000ffff) << 16) | (minor & 0x0000ffff))
#define MC_VERSION_MAJOR(x) ((x) >> 16)
#define MC_VERSION_MINOR(x) ((x) & 0xffff)
#define mc_dev_err(fmt, ...) \
dev_err(g_ctx.mcd, "%s: " fmt, __func__, ##__VA_ARGS__)
#define mc_dev_info(fmt, ...) \
dev_info(g_ctx.mcd, "%s: " fmt, __func__, ##__VA_ARGS__)
#ifdef DEBUG
#define mc_dev_devel(fmt, ...) \
dev_info(g_ctx.mcd, "%s: " fmt, __func__, ##__VA_ARGS__)
#else /* DEBUG */
#define mc_dev_devel(...) do {} while (0)
#endif /* !DEBUG */
/* MobiCore Driver Kernel Module context data. */
struct mc_device_ctx {
struct device *mcd;
/* debugfs root */
struct dentry *debug_dir;
/* Features */
/* - SWd uses LPAE MMU table format */
bool f_lpae;
/* - SWd can set a time out to get scheduled at a future time */
bool f_timeout;
/* - SWd supports memory extension which allows for bigger TAs */
bool f_mem_ext;
/* - SWd supports TA authorisation */
bool f_ta_auth;
/* - SWd can map several buffers at once */
bool f_multimap;
/* - SWd supports GP client authentication */
bool f_client_login;
/* - SWd needs time updates */
bool f_time;
/* Debug counters */
atomic_t c_clients;
atomic_t c_cbufs;
atomic_t c_sessions;
atomic_t c_wsms;
atomic_t c_mmus;
atomic_t c_maps;
};
extern struct mc_device_ctx g_ctx;
/* Debug stuff */
struct kasnprintf_buf {
gfp_t gfp;
void *buf;
int size;
int off;
};
extern __printf(2, 3)
int kasnprintf(struct kasnprintf_buf *buf, const char *fmt, ...);
ssize_t debug_generic_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos,
int (*function)(struct kasnprintf_buf *buf));
int debug_generic_release(struct inode *inode, struct file *file);
static inline int kref_read(struct kref *kref)
{
return atomic_read(&kref->refcount);
}
#endif /* _MC_MAIN_H_ */

View file

@ -0,0 +1,151 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MCIFC_H_
#define MCIFC_H_
#include "platform.h"
/** @name MobiCore FastCall Defines
* Defines for the two different FastCall's.
*/
/**/
/* --- global ---- */
#define MC_FC_INVALID ((u32)0) /**< Invalid FastCall ID */
#if defined(CONFIG_ARM64) && !defined(MC_ARMV7_FC)
/* These should be handled as 64-bit FCs; now they are more like 32bits... */
#define MC_FC_STD64_BASE ((u32)0xFF000000)
#define MC_FC_STD64(x) ((u32)(MC_FC_STD64_BASE + (x)))
#define MC_FC_INIT MC_FC_STD64(1) /**< Initializing FastCall. */
#define MC_FC_INFO MC_FC_STD64(2) /**< Info FastCall. */
#define MC_FC_MEM_TRACE MC_FC_STD64(10) /**< Enable SWd tracing via memory */
#define MC_FC_SWAP_CPU MC_FC_STD64(54) /**< Change new active Core */
#else
#define MC_FC_INIT ((u32)(-1)) /**< Initializing FastCall. */
#define MC_FC_INFO ((u32)(-2)) /**< Info FastCall. */
#define MC_FC_MEM_TRACE ((u32)(-31)) /**< Enable SWd tracing via memory */
#define MC_FC_SWAP_CPU ((u32)(0x84000005)) /**< Change new active Core */
#endif
/** @} */
/** @name MobiCore SMC Defines
* Defines the different secure monitor calls (SMC) for world switching.
*/
/**< Yield to switch from NWd to SWd. */
#define MC_SMC_N_YIELD 3
/**< SIQ to switch from NWd to SWd. */
#define MC_SMC_N_SIQ 4
/** @} */
/** @name MobiCore status
* MobiCore status information.
*/
/**< MobiCore is not yet initialized. FastCall FcInit() to set up MobiCore.*/
#define MC_STATUS_NOT_INITIALIZED 0
/**< Bad parameters have been passed in FcInit(). */
#define MC_STATUS_BAD_INIT 1
/**< MobiCore did initialize properly. */
#define MC_STATUS_INITIALIZED 2
/**< MobiCore kernel halted due to an unrecoverable exception. Further
* information is available extended info
*/
#define MC_STATUS_HALT 3
/** @} */
/** @name Extended Info Identifiers
* Extended info parameters for MC_FC_INFO to obtain further information
* depending on MobiCore state.
*/
/**< Version of the MobiCore Control Interface (MCI) */
#define MC_EXT_INFO_ID_MCI_VERSION 0
/**< MobiCore control flags */
#define MC_EXT_INFO_ID_FLAGS 1
/**< MobiCore halt condition code */
#define MC_EXT_INFO_ID_HALT_CODE 2
/**< MobiCore halt condition instruction pointer */
#define MC_EXT_INFO_ID_HALT_IP 3
/**< MobiCore fault counter */
#define MC_EXT_INFO_ID_FAULT_CNT 4
/**< MobiCore last fault cause */
#define MC_EXT_INFO_ID_FAULT_CAUSE 5
/**< MobiCore last fault meta */
#define MC_EXT_INFO_ID_FAULT_META 6
/**< MobiCore last fault threadid */
#define MC_EXT_INFO_ID_FAULT_THREAD 7
/**< MobiCore last fault instruction pointer */
#define MC_EXT_INFO_ID_FAULT_IP 8
/**< MobiCore last fault stack pointer */
#define MC_EXT_INFO_ID_FAULT_SP 9
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_DFSR 10
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_ADFSR 11
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_DFAR 12
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_IFSR 13
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_AIFSR 14
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_IFAR 15
/**< MobiCore configured by Daemon via fc_init flag */
#define MC_EXT_INFO_ID_MC_CONFIGURED 16
/**< MobiCore scheduling status: idle/non-idle */
#define MC_EXT_INFO_ID_MC_SCHED_STATUS 17
/**< MobiCore runtime status: initialized, halted */
#define MC_EXT_INFO_ID_MC_STATUS 18
/**< MobiCore exception handler last partner */
#define MC_EXT_INFO_ID_MC_EXC_PARTNER 19
/**< MobiCore exception handler last peer */
#define MC_EXT_INFO_ID_MC_EXC_IPCPEER 20
/**< MobiCore exception handler last IPC message */
#define MC_EXT_INFO_ID_MC_EXC_IPCMSG 21
/**< MobiCore exception handler last IPC data */
#define MC_EXT_INFO_ID_MC_EXC_IPCDATA 22
/**< MobiCore exception handler last UUID (uses 4 slots: 23 to 26) */
#define MC_EXT_INFO_ID_MC_EXC_UUID 23
#define MC_EXT_INFO_ID_MC_EXC_UUID1 24
#define MC_EXT_INFO_ID_MC_EXC_UUID2 25
#define MC_EXT_INFO_ID_MC_EXC_UUID3 26
/** @} */
/** @name FastCall return values
* Return values of the MobiCore FastCalls.
*/
/**< No error. Everything worked fine. */
#define MC_FC_RET_OK 0
/**< FastCall was not successful. */
#define MC_FC_RET_ERR_INVALID 1
/**< MobiCore has already been initialized. */
#define MC_FC_RET_ERR_ALREADY_INITIALIZED 5
/** @} */
/** @name Init FastCall flags
* Return flags of the Init FastCall.
*/
/**< SWd uses LPAE MMU table format. */
#define MC_FC_INIT_FLAG_LPAE BIT(0)
/** @} */
#endif /** MCIFC_H_ */
/** @} */

View file

@ -0,0 +1,527 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MCP_H_
#define MCP_H_
#include "mci/mcloadformat.h"
/** Indicates a response */
#define FLAG_RESPONSE BIT(31)
/** Maximum number of buffers that can be mapped at once */
#define MCP_MAP_MAX_BUF 4
/** MobiCore Return Code Defines.
* List of the possible MobiCore return codes.
*/
enum mcp_result {
/** Memory has successfully been mapped */
MC_MCP_RET_OK = 0,
/** The session ID is invalid */
MC_MCP_RET_ERR_INVALID_SESSION = 1,
/** The UUID of the Trustlet is unknown */
MC_MCP_RET_ERR_UNKNOWN_UUID = 2,
/** The ID of the driver is unknown */
MC_MCP_RET_ERR_UNKNOWN_DRIVER_ID = 3,
/** No more session are allowed */
MC_MCP_RET_ERR_NO_MORE_SESSIONS = 4,
/** The container is invalid */
MC_MCP_RET_ERR_CONTAINER_INVALID = 5,
/** The Trustlet is invalid */
MC_MCP_RET_ERR_TRUSTLET_INVALID = 6,
/** The memory block has already been mapped before */
MC_MCP_RET_ERR_ALREADY_MAPPED = 7,
/** Alignment or length error in the command parameters */
MC_MCP_RET_ERR_INVALID_PARAM = 8,
/** No space left in the virtual address space of the session */
MC_MCP_RET_ERR_OUT_OF_RESOURCES = 9,
/** WSM type unknown or broken WSM */
MC_MCP_RET_ERR_INVALID_WSM = 10,
/** unknown error */
MC_MCP_RET_ERR_UNKNOWN = 11,
/** Length of map invalid */
MC_MCP_RET_ERR_INVALID_MAPPING_LENGTH = 12,
/** Map can only be applied to Trustlet session */
MC_MCP_RET_ERR_MAPPING_TARGET = 13,
/** Couldn't open crypto session */
MC_MCP_RET_ERR_OUT_OF_CRYPTO_RESOURCES = 14,
/** System Trustlet signature verification failed */
MC_MCP_RET_ERR_SIGNATURE_VERIFICATION_FAILED = 15,
/** System Trustlet public key is wrong */
MC_MCP_RET_ERR_WRONG_PUBLIC_KEY = 16,
/** Wrong containter type(s) */
MC_MCP_RET_ERR_CONTAINER_TYPE_MISMATCH = 17,
/** Container is locked (or not activated) */
MC_MCP_RET_ERR_CONTAINER_LOCKED = 18,
/** SPID is not registered with root container */
MC_MCP_RET_ERR_SP_NO_CHILD = 19,
/** UUID is not registered with sp container */
MC_MCP_RET_ERR_TL_NO_CHILD = 20,
/** Unwrapping of root container failed */
MC_MCP_RET_ERR_UNWRAP_ROOT_FAILED = 21,
/** Unwrapping of service provider container failed */
MC_MCP_RET_ERR_UNWRAP_SP_FAILED = 22,
/** Unwrapping of Trustlet container failed */
MC_MCP_RET_ERR_UNWRAP_TRUSTLET_FAILED = 23,
/** Container version mismatch */
MC_MCP_RET_ERR_CONTAINER_VERSION_MISMATCH = 24,
/** Decryption of service provider trustlet failed */
MC_MCP_RET_ERR_SP_TL_DECRYPTION_FAILED = 25,
/** Hash check of service provider trustlet failed */
MC_MCP_RET_ERR_SP_TL_HASH_CHECK_FAILED = 26,
/** Activation/starting of task failed */
MC_MCP_RET_ERR_LAUNCH_TASK_FAILED = 27,
/** Closing of task not yet possible, try again later */
MC_MCP_RET_ERR_CLOSE_TASK_FAILED = 28,
/**< Service is blocked and a session cannot be opened to it */
MC_MCP_RET_ERR_SERVICE_BLOCKED = 29,
/**< Service is locked and a session cannot be opened to it */
MC_MCP_RET_ERR_SERVICE_LOCKED = 30,
/**< Service was forcefully killed (due to an administrative command) */
MC_MCP_RET_ERR_SERVICE_KILLED = 31,
/** The command is unknown */
MC_MCP_RET_ERR_UNKNOWN_COMMAND = 50,
/** The command data is invalid */
MC_MCP_RET_ERR_INVALID_DATA = 51
};
/** Possible MCP Command IDs
* Command ID must be between 0 and 0x7FFFFFFF.
*/
enum cmd_id {
/** Invalid command ID */
MC_MCP_CMD_ID_INVALID = 0x00,
/** Open a session */
MC_MCP_CMD_OPEN_SESSION = 0x01,
/** Close an existing session */
MC_MCP_CMD_CLOSE_SESSION = 0x03,
/** Map WSM to session */
MC_MCP_CMD_MAP = 0x04,
/** Unmap WSM from session */
MC_MCP_CMD_UNMAP = 0x05,
/** Prepare for suspend */
MC_MCP_CMD_SUSPEND = 0x06,
/** Resume from suspension */
MC_MCP_CMD_RESUME = 0x07,
/** Get MobiCore version information */
MC_MCP_CMD_GET_MOBICORE_VERSION = 0x09,
/** Close MCP and unmap MCI */
MC_MCP_CMD_CLOSE_MCP = 0x0A,
/** Load token for device attestation */
MC_MCP_CMD_LOAD_TOKEN = 0x0B,
/** Check that TA can be loaded */
MC_MCP_CMD_CHECK_LOAD_TA = 0x0C,
/** Map multiple WSMs to session */
MC_MCP_CMD_MULTIMAP = 0x0D,
/** Unmap multiple WSMs to session */
MC_MCP_CMD_MULTIUNMAP = 0x0E,
};
/*
* Types of WSM known to the MobiCore.
*/
#define WSM_TYPE_MASK 0xFF
#define WSM_INVALID 0 /** Invalid memory type */
#define WSM_L2 2 /** Buffer mapping uses L2/L3 table */
#define WSM_L1 3 /** Buffer mapping uses fake L1 table */
/*
* Magic number used to identify if Open Command supports GP client
* authentication.
*/
#define MC_GP_CLIENT_AUTH_MAGIC 0x47504131 /* "GPA1" */
/*
* Initialisation values flags
*/
#define MC_IV_FLAG_IRQ BIT(0) /* Set if IRQ is present */
#define MC_IV_FLAG_TIME BIT(1) /* Set if GP TIME is supported */
struct init_values {
u32 flags;
u32 irq;
u32 time_ofs;
u32 time_len;
};
/** Command header.
* It just contains the command ID. Only values specified in cmd_id are
* allowed as command IDs. If the command ID is unspecified the MobiCore
* returns an empty response with the result set to
* MC_MCP_RET_ERR_UNKNOWN_COMMAND.
*/
struct cmd_header {
enum cmd_id cmd_id; /** Command ID of the command */
};
/** Response header.
* MobiCore will reply to every MCP command with an MCP response. Like the MCP
* command the response consists of a header followed by response data. The
* response is written to the same memory location as the MCP command.
*/
struct rsp_header {
u32 rsp_id; /** Command ID | FLAG_RESPONSE */
enum mcp_result result; /** Result of the command execution */
};
/** @defgroup CMD MCP Commands
*/
/** @defgroup ASMCMD Administrative Commands
*/
/** @defgroup MCPGETMOBICOREVERSION GET_MOBICORE_VERSION
* Get MobiCore version info.
*
*/
/** Get MobiCore Version Command */
struct cmd_get_version {
struct cmd_header cmd_header; /** Command header */
};
/** Get MobiCore Version Command Response */
struct rsp_get_version {
struct rsp_header rsp_header; /** Response header */
struct mc_version_info version_info; /** MobiCore version info */
};
/** @defgroup POWERCMD Power Management Commands
*/
/** @defgroup MCPSUSPEND SUSPEND
* Prepare MobiCore suspension.
* This command allows MobiCore and MobiCore drivers to release or clean
* resources and save device state.
*
*/
/** Suspend Command */
struct cmd_suspend {
struct cmd_header cmd_header; /** Command header */
};
/** Suspend Command Response */
struct rsp_suspend {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPRESUME RESUME
* Resume MobiCore from suspension.
* This command allows MobiCore and MobiCore drivers to reinitialize hardware
* affected by suspension.
*
*/
/** Resume Command */
struct cmd_resume {
struct cmd_header cmd_header; /** Command header */
};
/** Resume Command Response */
struct rsp_resume {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup SESSCMD Session Management Commands
*/
/** @defgroup MCPOPEN OPEN
* Load and open a session to a Trustlet.
* The OPEN command loads Trustlet data to the MobiCore context and opens a
* session to the Trustlet. If wsm_data_type is WSM_INVALID MobiCore tries to
* start a pre-installed Trustlet associated with the uuid passed. The uuid
* passed must match the uuid contained in the load data (if available).
* On success, MobiCore returns the session ID which can be used for further
* communication.
*/
/** GP client authentication data */
struct cmd_open_data {
u32 mclf_magic; /** ASCII "MCLF" on older versions */
struct identity identity; /** Login method and data */
};
/** Open Command */
struct cmd_open {
struct cmd_header cmd_header; /** Command header */
struct mc_uuid_t uuid; /** Service UUID */
u8 unused[4]; /** Padding to be 64-bit aligned */
u64 adr_tci_buffer; /** Physical address of the TCI MMU */
u64 adr_load_data; /** Physical address of the data MMU */
u32 ofs_tci_buffer; /** Offset to the data */
u32 len_tci_buffer; /** Length of the TCI */
u32 wsmtype_tci; /** Type of WSM used for the TCI */
u32 wsm_data_type; /** Type of MMU */
u32 ofs_load_data; /** Offset to the data */
u32 len_load_data; /** Length of the data to load */
union {
struct cmd_open_data cmd_open_data; /** Client login data */
union mclf_header tl_header; /** Service header */
};
u32 is_gpta; /** true if looking for an SD/GP-TA */
};
/** Open Command Response */
struct rsp_open {
struct rsp_header rsp_header; /** Response header */
u32 session_id; /** Session ID */
};
/** TA Load Check Command */
struct cmd_check_load {
struct cmd_header cmd_header; /** Command header */
struct mc_uuid_t uuid; /** Service UUID */
u8 unused[4]; /** Padding to be 64-bit aligned */
u64 adr_load_data; /** Physical address of the data */
u32 wsm_data_type; /** Type of MMU */
u32 ofs_load_data; /** Offset to the data */
u32 len_load_data; /** Length of the data to load */
union mclf_header tl_header; /** Service header */
};
/** TA Load Check Response */
struct rsp_check_load {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPCLOSE CLOSE
* Close an existing session to a Trustlet.
* The CLOSE command terminates a session and frees all resources in the
* MobiCore system which are currently occupied by the session. Before closing
* the session, the MobiCore runtime management waits until all pending
* operations, like calls to drivers, invoked by the Trustlet have been
* terminated. Mapped memory will automatically be unmapped from the MobiCore
* context. The NWd is responsible for processing the freed memory according to
* the Rich-OS needs.
*
*/
/** Close Command */
struct cmd_close {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
};
/** Close Command Response */
struct rsp_close {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPMAP MAP
* Map a portion of memory to a session.
* The MAP command provides a block of memory to the context of a service.
* The memory then becomes world-shared memory (WSM).
* The only allowed memory type here is WSM_L2.
*/
/** Map Command */
struct cmd_map {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
u32 wsm_type; /** Type of MMU */
u32 ofs_buffer; /** Offset to the payload */
u64 adr_buffer; /** Physical address of the MMU */
u32 len_buffer; /** Length of the buffer */
u8 unused[4]; /** Padding to be 64-bit aligned */
};
#define MCP_MAP_MAX 0x100000 /** Maximum length for MCP map */
/** Map Command Response */
struct rsp_map {
struct rsp_header rsp_header; /** Response header */
/** Virtual address the WSM is mapped to, may include an offset! */
u32 secure_va;
};
/** @defgroup MCPUNMAP UNMAP
* Unmap a portion of world-shared memory from a session.
* The UNMAP command is used to unmap a previously mapped block of
* world shared memory from the context of a session.
*
* Attention: The memory block will be immediately unmapped from the specified
* session. If the service is still accessing the memory, the service will
* trigger a segmentation fault.
*/
/** Unmap Command */
struct cmd_unmap {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
u32 wsm_type; /** Type of WSM used of the memory */
/** Virtual address the WSM is mapped to, may include an offset! */
u32 secure_va;
u32 virtual_buffer_len; /** Length of virtual buffer */
};
/** Unmap Command Response */
struct rsp_unmap {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPLOADTOKEN
* Load a token from the normal world and share it with the TEE
* If something fails, the device attestation functionality will be disabled
*/
/** Load Token */
struct cmd_load_token {
struct cmd_header cmd_header; /** Command header */
u32 wsm_data_type; /** Type of MMU */
u64 adr_load_data; /** Physical address of the MMU */
u64 ofs_load_data; /** Offset to the data */
u64 len_load_data; /** Length of the data */
};
/** Load Token Command Response */
struct rsp_load_token {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPMULTIMAP MULTIMAP
* Map up to MCP_MAP_MAX_BUF portions of memory to a session.
* The MULTIMAP command provides MCP_MAP_MAX_BUF blocks of memory to the context
* of a service.
* The memory then becomes world-shared memory (WSM).
* The only allowed memory type here is WSM_L2.
*/
/** NWd physical buffer description
*
* Note: Information is coming from NWd kernel. So it should not be trusted
* more than NWd kernel is trusted.
*/
struct buffer_map {
u64 adr_buffer; /**< Physical address */
u32 ofs_buffer; /**< Offset of buffer */
u32 len_buffer; /**< Length of buffer */
u32 wsm_type; /**< Type of address */
u8 unused[4]; /** Padding to be 64-bit aligned */
};
/** MultiMap Command */
struct cmd_multimap {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
struct buffer_map bufs[MC_MAP_MAX]; /** NWd buffer info */
};
/** Multimap Command Response */
struct rsp_multimap {
struct rsp_header rsp_header; /** Response header */
/** Virtual address the WSM is mapped to, may include an offset! */
u64 secure_va[MC_MAP_MAX];
};
/** @defgroup MCPMULTIUNMAP MULTIUNMAP
* Unmap up to MCP_MAP_MAX_BUF portions of world-shared memory from a session.
* The MULTIUNMAP command is used to unmap MCP_MAP_MAX_BUF previously mapped
* blocks of world shared memory from the context of a session.
*
* Attention: The memory blocks will be immediately unmapped from the specified
* session. If the service is still accessing the memory, the service will
* trigger a segmentation fault.
*/
/** NWd mapped buffer description
*
* Note: Information is coming from NWd kernel. So it should not be trusted more
* than NWd kernel is trusted.
*/
struct buffer_unmap {
u64 secure_va; /**< Secure virtual address */
u32 len_buffer; /**< Length of buffer */
u8 unused[4]; /** Padding to be 64-bit aligned */
};
/** Multiunmap Command */
struct cmd_multiunmap {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
struct buffer_unmap bufs[MC_MAP_MAX]; /** NWd buffer info */
};
/** Multiunmap Command Response */
struct rsp_multiunmap {
struct rsp_header rsp_header; /** Response header */
};
/** Structure of the MCP buffer */
union mcp_message {
struct init_values init_values; /** Intialisation values */
struct cmd_header cmd_header; /** Command header */
struct rsp_header rsp_header;
struct cmd_open cmd_open; /** Load and open service */
struct rsp_open rsp_open;
struct cmd_close cmd_close; /** Close command */
struct rsp_close rsp_close;
struct cmd_map cmd_map; /** Map WSM to service */
struct rsp_map rsp_map;
struct cmd_unmap cmd_unmap; /** Unmap WSM from service */
struct rsp_unmap rsp_unmap;
struct cmd_suspend cmd_suspend; /** Suspend MobiCore */
struct rsp_suspend rsp_suspend;
struct cmd_resume cmd_resume; /** Resume MobiCore */
struct rsp_resume rsp_resume;
struct cmd_get_version cmd_get_version; /** Get MobiCore Version */
struct rsp_get_version rsp_get_version;
struct cmd_load_token cmd_load_token; /** Load token */
struct rsp_load_token rsp_load_token;
struct cmd_check_load cmd_check_load; /** TA load check */
struct rsp_check_load rsp_check_load;
struct cmd_multimap cmd_multimap; /** Map multiple WSMs */
struct rsp_multimap rsp_multimap;
struct cmd_multiunmap cmd_multiunmap; /** Map multiple WSMs */
struct rsp_multiunmap rsp_multiunmap;
};
/** Minimum MCP buffer length (in bytes) */
#define MIN_MCP_LEN sizeof(mcp_message_t)
#define MC_FLAG_NO_SLEEP_REQ 0
#define MC_FLAG_REQ_TO_SLEEP 1
#define MC_STATE_NORMAL_EXECUTION 0
#define MC_STATE_READY_TO_SLEEP 1
struct sleep_mode {
u16 sleep_req; /** Ask SWd to get ready to sleep */
u16 ready_to_sleep; /** SWd is now ready to sleep */
};
/** MobiCore status flags */
struct mcp_flags {
/** If not MC_FLAG_SCHEDULE_IDLE, MobiCore needsscheduling */
u32 schedule;
struct sleep_mode sleep_mode;
/** Secure-world sleep timeout in milliseconds */
s32 timeout_ms;
/** Reserved for future use: Must not be interpreted */
u32 RFU3;
};
/** MobiCore is idle. No scheduling required */
#define MC_FLAG_SCHEDULE_IDLE 0
/** MobiCore is non idle, scheduling is required */
#define MC_FLAG_SCHEDULE_NON_IDLE 1
/** MCP buffer structure */
struct mcp_buffer {
struct mcp_flags flags; /** MobiCore Flags */
union mcp_message message; /** MCP message buffer */
};
#endif /* MCP_H_ */

View file

@ -0,0 +1,90 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef NQ_H_
#define NQ_H_
/** \name NQ Size Defines
* Minimum and maximum count of elements in the notification queue.
*/
#define MIN_NQ_ELEM 1 /** Minimum notification queue elements */
#define MAX_NQ_ELEM 64 /** Maximum notification queue elements */
/* Compute notification queue size in bytes from its number of elements */
#define NQ_SIZE(n) (2 * (sizeof(struct notification_queue_header) \
+ (n) * sizeof(struct notification)))
/** \name NQ Length Defines
* Note that there is one queue for NWd->SWd and one queue for SWd->NWd
*/
/** Minimum size for the notification queue data structure */
#define MIN_NQ_LEN NQ_SIZE(MIN_NQ_ELEM)
/** Maximum size for the notification queue data structure */
#define MAX_NQ_LEN NQ_SIZE(MAX_NQ_ELEM)
/** \name Session ID Defines
* Standard Session IDs.
*/
/** MCP session ID, used to communicate with MobiCore (e.g. to start/stop TA) */
#define SID_MCP 0
/** Invalid session id, returned in case of error */
#define SID_INVALID 0xffffffff
/** Notification data structure */
struct notification {
u32 session_id; /** Session ID */
s32 payload; /** Additional notification info */
};
/** Notification payload codes.
* 0 indicated a plain simple notification,
* a positive value is a termination reason from the task,
* a negative value is a termination reason from MobiCore.
* Possible negative values are given below.
*/
enum notification_payload {
/** task terminated, but exit code is invalid */
ERR_INVALID_EXIT_CODE = -1,
/** task terminated due to session end, no exit code available */
ERR_SESSION_CLOSE = -2,
/** task terminated due to invalid operation */
ERR_INVALID_OPERATION = -3,
/** session ID is unknown */
ERR_INVALID_SID = -4,
/** session is not active */
ERR_SID_NOT_ACTIVE = -5,
/** session was force-killed (due to an administrative command). */
ERR_SESSION_KILLED = -6,
};
/** Declaration of the notification queue header.
* layout as specified in the data structure specification.
*/
struct notification_queue_header {
u32 write_cnt; /** Write counter */
u32 read_cnt; /** Read counter */
u32 queue_size; /** Queue size */
};
/** Queue struct which defines a queue object.
* The queue struct is accessed by the queue<operation> type of
* function. elementCnt must be a power of two and the power needs
* to be smaller than power of u32 (obviously 32).
*/
struct notification_queue {
struct notification_queue_header hdr; /** Queue header */
struct notification notification[MIN_NQ_ELEM]; /** Elements */
};
#endif /** NQ_H_ */

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MCITIME_H_
#define MCITIME_H_
/*
* Trustonic TEE RICH OS Time:
* Seconds and nanoseconds since Jan 1, 1970, UTC
*/
struct mcp_time {
u64 seconds;
u64 nsec;
};
#endif /* MCITIME_H_ */

View file

@ -0,0 +1,134 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MCLOADFORMAT_H_
#define MCLOADFORMAT_H_
/** Trustlet Blob length info */
#define MC_TLBLOBLEN_MAGIC 0x7672746C /* Magic for SWd: vrtl */
#define MAX_SO_CONT_SIZE 512 /* Max size for a container */
/** MCLF flags */
/**< Loaded service cannot be unloaded from MobiCore. */
#define MC_SERVICE_HEADER_FLAGS_PERMANENT BIT(0)
/**< Service has no WSM control interface. */
#define MC_SERVICE_HEADER_FLAGS_NO_CONTROL_INTERFACE BIT(1)
/**< Service can be debugged. */
#define MC_SERVICE_HEADER_FLAGS_DEBUGGABLE BIT(2)
/**< New-layout trusted application or trusted driver. */
#define MC_SERVICE_HEADER_FLAGS_EXTENDED_LAYOUT BIT(3)
/** Service type.
* The service type defines the type of executable.
*/
enum service_type {
SERVICE_TYPE_ILLEGAL = 0,
SERVICE_TYPE_DRIVER = 1,
SERVICE_TYPE_SP_TRUSTLET = 2,
SERVICE_TYPE_SYSTEM_TRUSTLET = 3,
SERVICE_TYPE_MIDDLEWARE = 4,
SERVICE_TYPE_LAST_ENTRY = 5,
};
/**
* Descriptor for a memory segment.
*/
struct segment_descriptor {
u32 start; /**< Virtual start address */
u32 len; /**< Segment length in bytes */
};
/**
* MCLF intro for data structure identification.
* Must be the first element of a valid MCLF file.
*/
struct mclf_intro {
u32 magic; /**< Header magic value ASCII "MCLF" */
u32 version; /**< Version the MCLF header struct */
};
/**
* @defgroup MCLF_VER_V2 MCLF Version 32
* @ingroup MCLF_VER
*
* @addtogroup MCLF_VER_V2
*/
/*
* GP TA identity.
*/
struct identity {
/**< GP TA login type */
u32 login_type;
/**< GP TA login data */
u8 login_data[16];
};
/**
* Version 2.1/2.2 MCLF header.
*/
struct mclf_header_v2 {
/**< MCLF header start with the mandatory intro */
struct mclf_intro intro;
/**< Service flags */
u32 flags;
/**< Type of memory the service must be executed from */
u32 mem_type;
/**< Type of service */
enum service_type service_type;
/**< Number of instances which can be run simultaneously */
u32 num_instances;
/**< Loadable service unique identifier (UUID) */
struct mc_uuid_t uuid;
/**< If the service_type is SERVICE_TYPE_DRIVER the Driver ID is used */
u32 driver_id;
/**<
* Number of threads (N) in a service:
* SERVICE_TYPE_SP_TRUSTLET: N = 1
* SERVICE_TYPE_SYSTEM_TRUSTLET: N = 1
* SERVICE_TYPE_DRIVER: N >= 1
*/
u32 num_threads;
/**< Virtual text segment */
struct segment_descriptor text;
/**< Virtual data segment */
struct segment_descriptor data;
/**< Length of the BSS segment in bytes. MUST be at least 8 byte */
u32 bss_len;
/**< Virtual start address of service code */
u32 entry;
/**< Version of the interface the driver exports */
u32 service_version;
};
/**
* @addtogroup MCLF
*/
/** MCLF header */
union mclf_header {
/**< Intro for data identification */
struct mclf_intro intro;
/**< Version 2 header */
struct mclf_header_v2 mclf_header_v2;
};
struct mc_blob_len_info {
u32 magic; /**< New blob format magic number */
u32 root_size; /**< Root container size */
u32 sp_size; /**< SP container size */
u32 ta_size; /**< TA container size */
u32 reserved[4]; /**< Reserved for further Use */
};
#endif /* MCLOADFORMAT_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,137 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_MCP_H_
#define _MC_MCP_H_
#include "mci/mcloadformat.h" /* struct identity */
/* Structure to hold the TA/driver descriptor to pass to MCP */
struct tee_object {
u32 length; /* Total length */
u32 header_length; /* Length of header before payload */
u8 data[]; /* Header followed by payload */
};
/* Structure to hold all mapped buffer data to pass to MCP */
struct mcp_buffer_map {
u64 phys_addr; /** Page-aligned physical address */
u64 secure_va; /** Page-aligned physical address */
u32 offset; /** Data offset inside the first page */
u32 length; /** Length of the data */
u32 type; /** Type of MMU */
};
struct mcp_session {
/* Work descriptor to handle delayed closing, set by upper layer */
struct work_struct close_work;
/* Sessions list (protected by mcp sessions_lock) */
struct list_head list;
/* Notifications list (protected by mcp notifications_mutex) */
struct list_head notifications_list;
/* Notification waiter lock */
struct mutex notif_wait_lock; /* Only one at a time */
/* Notification debug (protected by mcp notifications_mutex) */
enum mcp_notification_state {
MCP_NOTIF_IDLE, /* Nothing happened yet */
MCP_NOTIF_QUEUED, /* Notification in overflow queue */
MCP_NOTIF_SENT, /* Notification in send queue */
MCP_NOTIF_RECEIVED, /* Notification received */
MCP_NOTIF_CONSUMED, /* Notification reported to CA */
MCP_NOTIF_DEAD, /* Error reported to CA */
} notif_state;
/* Notification received */
struct completion completion;
/* Notification lock */
struct mutex exit_code_lock;
/* Last notification */
s32 exit_code;
/* Session id */
u32 id;
/* Session state (protected by mcp sessions_lock) */
enum mcp_session_state {
MCP_SESSION_RUNNING,
MCP_SESSION_CLOSE_FAILED,
MCP_SESSION_CLOSE_REQUESTED,
MCP_SESSION_CLOSE_NOTIFIED,
MCP_SESSION_CLOSING_GP,
MCP_SESSION_CLOSED,
} state;
/* This TA is of Global Platform type, set by upper layer */
bool is_gp;
/* GP TAs have login information */
struct identity identity;
};
/* Init for the mcp_session structure */
void mcp_session_init(struct mcp_session *session, bool is_gp,
const struct identity *identity);
int mcp_session_waitnotif(struct mcp_session *session, s32 timeout,
bool silent_expiry);
s32 mcp_session_exitcode(struct mcp_session *mcp_session);
/* SWd suspend/resume */
int mcp_suspend(void);
int mcp_resume(void);
bool mcp_suspended(void);
/* Callback to scheduler registration */
enum mcp_scheduler_commands {
MCP_YIELD,
MCP_NSIQ,
};
void mcp_register_scheduler(int (*scheduler_cb)(enum mcp_scheduler_commands));
bool mcp_notifications_flush(void);
void mcp_register_crashhandler(void (*crashhandler_cb)(void));
/*
* Get the requested SWd sleep timeout value (ms)
* - if the timeout is -1, wait indefinitely
* - if the timeout is 0, re-schedule immediately (timeouts in µs in the SWd)
* - otherwise sleep for the required time
* returns true if sleep is required, false otherwise
*/
bool mcp_get_idle_timeout(s32 *timeout);
void mcp_reset_idle_timeout(void);
void mcp_update_time(void);
/* MCP commands */
int mcp_get_version(struct mc_version_info *version_info);
int mcp_load_token(uintptr_t data, const struct mcp_buffer_map *buffer_map);
int mcp_load_check(const struct tee_object *obj,
const struct mcp_buffer_map *buffer_map);
int mcp_open_session(struct mcp_session *session,
const struct tee_object *obj,
const struct mcp_buffer_map *map,
const struct mcp_buffer_map *tci_map);
int mcp_close_session(struct mcp_session *session);
void mcp_kill_session(struct mcp_session *session);
int mcp_map(u32 session_id, struct mcp_buffer_map *buffer_map);
int mcp_unmap(u32 session_id, const struct mcp_buffer_map *buffer_map);
int mcp_multimap(u32 session_id, struct mcp_buffer_map *buffer_maps);
int mcp_multiunmap(u32 session_id, const struct mcp_buffer_map *buffer_maps);
int mcp_notify(struct mcp_session *mcp_session);
/* MCP initialisation/cleanup */
int mcp_init(void);
void mcp_exit(void);
int mcp_start(void);
void mcp_stop(void);
/* MCP debug */
int mcp_debug_sessions(struct kasnprintf_buf *buf);
int mcp_debug_mcpcmds(struct kasnprintf_buf *buf);
#endif /* _MC_MCP_H_ */

View file

@ -0,0 +1,478 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/pgtable.h>
#include <linux/semaphore.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <linux/device.h>
#include "public/mc_user.h"
#include "mci/mcimcp.h"
#include "platform.h" /* CONFIG_TRUSTONIC_TEE_LPAE */
#include "main.h"
#include "mcp.h" /* mcp_buffer_map */
#include "mmu.h"
/* Common */
#define MMU_BUFFERABLE BIT(2) /* AttrIndx[0] */
#define MMU_CACHEABLE BIT(3) /* AttrIndx[1] */
#define MMU_EXT_NG BIT(11) /* ARMv6 and higher */
/* LPAE */
#define MMU_TYPE_PAGE (3 << 0)
#define MMU_NS BIT(5)
#define MMU_AP_RW_ALL BIT(6) /* AP[2:1], RW, at any privilege level */
#define MMU_EXT_SHARED_64 (3 << 8) /* SH[1:0], inner shareable */
#define MMU_EXT_AF BIT(10) /* Access Flag */
#define MMU_EXT_XN (((u64)1) << 54) /* XN */
/* Non-LPAE */
#define MMU_TYPE_EXT (3 << 0) /* v5 */
#define MMU_TYPE_SMALL (2 << 0)
#define MMU_EXT_AP0 BIT(4)
#define MMU_EXT_AP1 (2 << 4)
#define MMU_EXT_TEX(x) ((x) << 6) /* v5 */
#define MMU_EXT_SHARED_32 BIT(10) /* ARMv6 and higher */
/*
* MobiCore specific page tables for world shared memory.
* Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
* MobiCore uses the default ARM format.
*
* Number of page table entries in one L2 MMU table. This is ARM specific, an
* MMU table covers 1 MiB by using 256 entries referring to 4KiB pages each.
*/
#define L2_ENTRIES_MAX 256
/*
* Small buffers (below 1MiB) are mapped using the legacy L2 table, but bigger
* buffers now use a fake L1 table that holds 64-bit pointers to L2 tables. As
* this must be exactly one page, we can hold up to 512 entries.
*/
#define L1_ENTRIES_MAX 512
/*
* Fake L1 MMU table.
*/
union l1_table {
u64 *pages_phys; /* Array of physical page addresses */
unsigned long page;
};
/*
* L2 MMU table, which is more a L3 table in the LPAE case.
*/
union l2_table {
union { /* Array of PTEs */
u32 *ptes_32;
u64 *ptes_64;
};
unsigned long page;
};
/*
* MMU table allocated to the Daemon or a TLC describing a world shared
* buffer.
* When users map a malloc()ed area into SWd, a MMU table is allocated.
* In addition, the area of maximum 1MB virtual address space is mapped into
* the MMU table and a handle for this table is returned to the user.
*/
struct tee_mmu {
union l2_table l2_tables[L1_ENTRIES_MAX]; /* L2 tables */
size_t l2_tables_nr; /* Actual number of L2 tables */
union l1_table l1_table; /* Fake L1 table */
union l2_table l1_l2_table; /* L2 table for the L1 table */
u32 offset;
u32 length;
bool user; /* Pages are from user space */
};
/*
* Linux uses different mappings for SMP systems(the sharing flag is set for the
* pte. In order not to confuse things too much in Mobicore make sure the shared
* buffers have the same flags. This should also be done in SWD side.
*/
static u64 pte_flags_64 = MMU_BUFFERABLE | MMU_CACHEABLE | MMU_EXT_NG |
#ifdef CONFIG_SMP
MMU_EXT_SHARED_64 |
#endif /* CONFIG_SMP */
MMU_EXT_XN | MMU_EXT_AF | MMU_AP_RW_ALL |
MMU_NS | MMU_TYPE_PAGE;
static u32 pte_flags_32 = MMU_BUFFERABLE | MMU_CACHEABLE | MMU_EXT_NG |
#ifdef CONFIG_SMP
MMU_EXT_SHARED_32 | MMU_EXT_TEX(1) |
#endif /* CONFIG_SMP */
MMU_EXT_AP1 | MMU_EXT_AP0 |
MMU_TYPE_SMALL | MMU_TYPE_EXT;
static uintptr_t mmu_table_pointer(const struct tee_mmu *mmu)
{
if (mmu->l1_table.page) {
return g_ctx.f_lpae ?
(uintptr_t)mmu->l1_l2_table.ptes_64 :
(uintptr_t)mmu->l1_l2_table.ptes_32;
} else {
return g_ctx.f_lpae ?
(uintptr_t)mmu->l2_tables[0].ptes_64 :
(uintptr_t)mmu->l2_tables[0].ptes_32;
}
}
static void free_all_pages(struct tee_mmu *mmu_table)
{
union l2_table *l2_table = &mmu_table->l2_tables[0];
size_t i;
for (i = 0; i < mmu_table->l2_tables_nr; i++, l2_table++) {
if (!l2_table->page)
break;
free_page(l2_table->page);
}
if (mmu_table->l1_l2_table.page)
free_page(mmu_table->l1_l2_table.page);
if (mmu_table->l1_table.page)
free_page(mmu_table->l1_table.page);
}
/*
* Create a MMU table for a buffer or trustlet.
*/
static inline int map_buffer(struct task_struct *task, const void *data,
unsigned int length, struct tee_mmu *mmu_table)
{
const void *reader = (const void *)((uintptr_t)data & PAGE_MASK);
struct page **pages; /* Same as above, conveniently typed */
unsigned long pages_page; /* Page to contain the page pointers */
size_t chunk;
unsigned long total_pages_nr;
int l1_entries_max;
int ret = 0;
/* Check that we have enough space to map data */
mmu_table->length = length;
mmu_table->offset = (u32)((uintptr_t)data & ~PAGE_MASK);
total_pages_nr = PAGE_ALIGN(mmu_table->offset + length) / PAGE_SIZE;
if (g_ctx.f_mem_ext)
l1_entries_max = L1_ENTRIES_MAX;
else
l1_entries_max = 1;
if (total_pages_nr > (l1_entries_max * L2_ENTRIES_MAX)) {
mc_dev_err("data mapping exceeds %d pages",
l1_entries_max * L2_ENTRIES_MAX);
return -EINVAL;
}
/* Get number of L2 tables needed */
mmu_table->l2_tables_nr = (total_pages_nr + L2_ENTRIES_MAX - 1) /
L2_ENTRIES_MAX;
mc_dev_devel("total_pages_nr %lu l2_tables_nr %zu",
total_pages_nr, mmu_table->l2_tables_nr);
/* Get a page to store page pointers */
pages_page = get_zeroed_page(GFP_KERNEL);
if (!pages_page)
return -ENOMEM;
pages = (struct page **)pages_page;
/* Allocate a page for the L1 table */
if (mmu_table->l2_tables_nr > 1) {
mmu_table->l1_table.page = get_zeroed_page(GFP_KERNEL);
mmu_table->l1_l2_table.page = get_zeroed_page(GFP_KERNEL);
if (!mmu_table->l1_table.page || !mmu_table->l1_l2_table.page) {
ret = -ENOMEM;
goto end;
}
/* Map it */
if (g_ctx.f_lpae) {
u64 *pte;
pte = &mmu_table->l1_l2_table.ptes_64[0];
*pte = virt_to_phys(mmu_table->l1_table.pages_phys);
*pte |= pte_flags_64;
} else {
u32 *pte;
pte = &mmu_table->l1_l2_table.ptes_32[0];
*pte = virt_to_phys(mmu_table->l1_table.pages_phys);
*pte |= pte_flags_32;
}
}
for (chunk = 0; chunk < mmu_table->l2_tables_nr; chunk++) {
unsigned long pages_nr, i;
struct page **page_ptr;
/* Size to map for this chunk */
if (chunk == (mmu_table->l2_tables_nr - 1))
pages_nr = ((total_pages_nr - 1) % L2_ENTRIES_MAX) + 1;
else
pages_nr = L2_ENTRIES_MAX;
/* Allocate a page for the MMU descriptor */
mmu_table->l2_tables[chunk].page = get_zeroed_page(GFP_KERNEL);
if (!mmu_table->l2_tables[chunk].page) {
ret = -ENOMEM;
goto end;
}
/* Add page address to L1 table if needed */
if (mmu_table->l1_table.page) {
void *table;
if (g_ctx.f_lpae)
table = mmu_table->l2_tables[chunk].ptes_64;
else
table = mmu_table->l2_tables[chunk].ptes_32;
mmu_table->l1_table.pages_phys[chunk] =
virt_to_phys(table);
}
/* Get pages */
if (task) {
long gup_ret;
/* ExySp: for page migration */
unsigned int foll_flags =
FOLL_TOUCH | FOLL_GET | FOLL_WRITE | FOLL_CMA;
/* Buffer was allocated in user space */
down_read(&task->mm->mmap_sem);
gup_ret = __get_user_pages(task, task->mm,
(uintptr_t)reader, pages_nr,
foll_flags, pages, 0, 0);
/* ExySp: end */
up_read(&task->mm->mmap_sem);
if (gup_ret < 0) {
ret = gup_ret;
mc_dev_err("failed to get user pages @%p: %d",
reader, ret);
goto end;
}
/* check if we could lock all pages. */
if (gup_ret != pages_nr) {
mc_dev_err("get_user_pages() failed, ret: %ld",
gup_ret);
release_pages(pages, gup_ret, 0);
ret = -ENOMEM;
goto end;
}
reader += pages_nr * PAGE_SIZE;
mmu_table->user = true;
} else if (is_vmalloc_addr(data)) {
/* Buffer vmalloc'ed in kernel space */
page_ptr = &pages[0];
for (i = 0; i < pages_nr; i++) {
struct page *page = vmalloc_to_page(reader);
if (!page) {
mc_dev_err("failed to map address");
ret = -EINVAL;
goto end;
}
*page_ptr++ = page;
reader += PAGE_SIZE;
}
} else {
/* Buffer kmalloc'ed in kernel space */
struct page *page = virt_to_page(reader);
reader += pages_nr * PAGE_SIZE;
page_ptr = &pages[0];
for (i = 0; i < pages_nr; i++)
*page_ptr++ = page++;
}
/* Create MMU Table entries */
page_ptr = &pages[0];
/*
* Create MMU table entry, see ARM MMU docu for details about
* flags stored in the lowest 12 bits. As a side reference, the
* Article "ARM's multiply-mapped memory mess" found in the
* collection at http://lwn.net/Articles/409032/ is also worth
* reading.
*/
if (g_ctx.f_lpae) {
u64 *pte = &mmu_table->l2_tables[chunk].ptes_64[0];
for (i = 0; i < pages_nr; i++, page_ptr++, pte++) {
unsigned long phys = page_to_phys(*page_ptr);
*pte = phys;
*pte |= pte_flags_64;
}
} else {
u32 *pte = &mmu_table->l2_tables[chunk].ptes_32[0];
for (i = 0; i < pages_nr; i++, page_ptr++, pte++) {
unsigned long phys = page_to_phys(*page_ptr);
#if defined CONFIG_ARM64
if (phys & 0xffffffff00000000) {
mc_dev_err("64-bit pointer: 0x%16lx",
phys);
ret = -EFAULT;
goto end;
}
#endif
*pte = (u32)phys;
*pte |= pte_flags_32;
}
}
}
end:
if (ret)
free_all_pages(mmu_table);
free_page(pages_page);
return ret;
}
static inline void unmap_buffer(struct tee_mmu *mmu_table)
{
size_t t;
mc_dev_devel("clear MMU table, virt %p", mmu_table);
if (!mmu_table->user)
goto end;
/* Release all locked user space pages */
for (t = 0; t < (size_t)mmu_table->l2_tables_nr; t++) {
if (g_ctx.f_lpae) {
u64 *pte = mmu_table->l2_tables[t].ptes_64;
int i;
for (i = 0; i < L2_ENTRIES_MAX; i++, pte++) {
/* Unused entries are 0 */
if (!*pte)
break;
/* pte_page() cannot return NULL */
page_cache_release(pte_page(*pte));
}
} else {
u32 *pte = mmu_table->l2_tables[t].ptes_32;
int i;
for (i = 0; i < L2_ENTRIES_MAX; i++, pte++) {
/* Unused entries are 0 */
if (!*pte)
break;
/* pte_page() cannot return NULL */
page_cache_release(pte_page(*pte));
}
}
}
end:
free_all_pages(mmu_table);
}
/*
* Delete a MMU table.
*/
void tee_mmu_delete(struct tee_mmu *mmu)
{
if (WARN(!mmu, "NULL mmu pointer given"))
return;
unmap_buffer(mmu);
mc_dev_devel("freed mmu %p: %s len %u off %u L%d table %lx\n",
mmu, mmu->user ? "user" : "kernel", mmu->length,
mmu->offset, mmu->l1_table.page ? 1 : 2,
mmu_table_pointer(mmu));
kfree(mmu);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_mmus);
}
/*
* Allocate MMU table and map buffer into it.
* That is, create respective table entries.
*/
struct tee_mmu *tee_mmu_create(struct task_struct *task, const void *addr,
unsigned int length)
{
struct tee_mmu *mmu;
int ret;
/* Check input arguments */
if (WARN(!addr, "data address is NULL"))
return ERR_PTR(-EINVAL);
if (WARN(!length, "data length is 0"))
return ERR_PTR(-EINVAL);
/* Allocate the struct */
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
return ERR_PTR(-ENOMEM);
/* Increment debug counter */
atomic_inc(&g_ctx.c_mmus);
/* Create the MMU mapping for the data */
ret = map_buffer(task, addr, length, mmu);
if (ret) {
kfree(mmu);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_mmus);
return ERR_PTR(ret);
}
mc_dev_devel("created mmu %p: %s addr %p len %u off %u L%d table %lx\n",
mmu, mmu->user ? "user" : "kernel", addr, mmu->length,
mmu->offset, mmu->l1_table.page ? 1 : 2,
mmu_table_pointer(mmu));
return mmu;
}
void tee_mmu_buffer(const struct tee_mmu *mmu, struct mcp_buffer_map *map)
{
uintptr_t table = mmu_table_pointer(mmu);
map->phys_addr = virt_to_phys((void *)table);
map->secure_va = 0;
map->offset = mmu->offset;
map->length = mmu->length;
if (mmu->l1_table.page)
map->type = WSM_L1;
else
map->type = WSM_L2;
}
int tee_mmu_debug_structs(struct kasnprintf_buf *buf, const struct tee_mmu *mmu)
{
return kasnprintf(buf,
"\t\t\tmmu %p: %s len %u off %u table %lx type L%d\n",
mmu, mmu->user ? "user" : "kernel", mmu->length,
mmu->offset, mmu_table_pointer(mmu),
mmu->l1_table.page ? 1 : 2);
}

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _TBASE_MEM_H_
#define _TBASE_MEM_H_
struct tee_mmu;
struct mcp_buffer_map;
/*
* Allocate MMU table and map buffer into it.
* That is, create respective table entries.
*/
struct tee_mmu *tee_mmu_create(struct task_struct *task, const void *wsm_buffer,
unsigned int wsm_len);
/*
* Delete a used MMU table.
*/
void tee_mmu_delete(struct tee_mmu *mmu);
/*
* Fill in buffer info for MMU table.
*/
void tee_mmu_buffer(const struct tee_mmu *mmu, struct mcp_buffer_map *map);
/*
* Add info to debug buffer.
*/
int tee_mmu_debug_structs(struct kasnprintf_buf *buf,
const struct tee_mmu *mmu);
#endif /* _TBASE_MEM_H_ */

View file

@ -0,0 +1,103 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Header file of MobiCore Driver Kernel Module Platform
* specific structures
*
* Internal structures of the McDrvModule
*
* Header file the MobiCore Driver Kernel Module,
* its internal structures and defines.
*/
#ifndef _MC_DRV_PLATFORM_H_
#define _MC_DRV_PLATFORM_H_
#define IRQ_SPI(x) (x + 32)
/* MobiCore Interrupt. */
#if defined(CONFIG_SOC_EXYNOS3250) || defined(CONFIG_SOC_EXYNOS3472)
#define MC_INTR_SSIQ 254
#elif defined(CONFIG_SOC_EXYNOS3475) || defined(CONFIG_SOC_EXYNOS5430) || \
defined(CONFIG_SOC_EXYNOS5433) || defined(CONFIG_SOC_EXYNOS7870) || \
defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS7880) || \
defined(CONFIG_SOC_EXYNOS7570)
#define MC_INTR_SSIQ 255
#elif defined(CONFIG_SOC_EXYNOS7420) || defined(CONFIG_SOC_EXYNOS7580)
#define MC_INTR_SSIQ 246
#endif
/* Enable Runtime Power Management */
#if defined(CONFIG_SOC_EXYNOS3472)
#ifdef CONFIG_PM_RUNTIME
#define MC_PM_RUNTIME
#endif
#endif /* CONFIG_SOC_EXYNOS3472 */
#if !defined(CONFIG_SOC_EXYNOS3472)
#define TBASE_CORE_SWITCHER
#if defined(CONFIG_SOC_EXYNOS3250)
#define COUNT_OF_CPUS 2
#elif defined(CONFIG_SOC_EXYNOS3475) || defined(CONFIG_SOC_EXYNOS7570)
#define COUNT_OF_CPUS 4
#else
#define COUNT_OF_CPUS 8
#endif
/* Values of MPIDR regs */
#if defined(CONFIG_SOC_EXYNOS3250) || defined(CONFIG_SOC_EXYNOS3475) || defined(CONFIG_SOC_EXYNOS7570)
#define CPU_IDS {0x0000, 0x0001, 0x0002, 0x0003}
#elif defined(CONFIG_SOC_EXYNOS7580) || defined(CONFIG_SOC_EXYNOS7870) || defined(CONFIG_SOC_EXYNOS7880)
#define CPU_IDS {0x0000, 0x0001, 0x0002, 0x0003, 0x0100, 0x0101, 0x0102, 0x0103}
#else
#define CPU_IDS {0x0100, 0x0101, 0x0102, 0x0103, 0x0000, 0x0001, 0x0002, 0x0003}
#endif
#endif /* !CONFIG_SOC_EXYNOS3472 */
/* uidgid.h does not exist in kernels before 3.5 */
#if defined(CONFIG_SOC_EXYNOS3250) || defined(CONFIG_SOC_EXYNOS3472) || \
defined(CONFIG_SOC_EXYNOS3475)
#define MC_NO_UIDGIT_H
#endif /* CONFIG_SOC_EXYNOS3250|CONFIG_SOC_EXYNOS3472|CONFIG_SOC_EXYNOS3475 */
/* SWd LPAE */
#if defined(CONFIG_SOC_EXYNOS5433) || defined(CONFIG_SOC_EXYNOS7420) || \
defined(CONFIG_SOC_EXYNOS7580) || defined(CONFIG_SOC_EXYNOS7870) || \
defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS7880) || \
defined(CONFIG_SOC_EXYNOS7570)
#ifndef CONFIG_TRUSTONIC_TEE_LPAE
#define CONFIG_TRUSTONIC_TEE_LPAE
#endif
#endif /* CONFIG_SOC_EXYNOS5433 */
/* Enable Fastcall worker thread */
#define MC_FASTCALL_WORKER_THREAD
/* Set Parameters for Secure OS Boosting */
#define DEFAULT_LITTLE_CORE 0
#define DEFAULT_BIG_CORE 3
#define MIGRATE_TARGET_CORE DEFAULT_BIG_CORE
#define MC_INTR_LOCAL_TIMER (IRQ_SPI(106) + DEFAULT_BIG_CORE)
#define LOCAL_TIMER_PERIOD 50
#define DEFAULT_SECOS_BOOST_TIME 5000
#define MAX_SECOS_BOOST_TIME 600000 /* 600 sec */
#define DUMP_TBASE_HALT_STATUS
#endif /* _MC_DRV_PLATFORM_H_ */

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "platform.h" /* MC_BL_NOTIFIER */
#ifdef MC_BL_NOTIFIER
#include "main.h"
#include "scheduler.h" /* SWd suspend/resume commands */
#include "pm.h"
#include <asm/bL_switcher.h>
static struct pm_context {
struct notifier_block bl_swicher_notifier;
} pm_ctx;
static int bl_switcher_notifier_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
unsigned int mpidr, cpu, cluster;
int ret = 0;
asm volatile ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
cpu = mpidr & 0x3;
cluster = (mpidr >> 8) & 0xf;
mc_dev_devel("%s switching!!, cpu: %u, Out=%u\n",
event == SWITCH_ENTER ? "Before" : "After", cpu, cluster);
if (cpu != 0)
return 0;
switch (event) {
case SWITCH_ENTER:
ret = mc_scheduler_suspend();
break;
case SWITCH_EXIT:
ret = mc_scheduler_resume();
break;
default:
mc_dev_devel("MobiCore: Unknown switch event!\n");
}
return 0;
}
int mc_pm_start(void)
{
pm_ctx.bl_swicher_notifier.notifier_call = bl_switcher_notifier_handler;
register_bL_swicher_notifier(&pm_ctx.bl_swicher_notifier);
return 0;
}
void mc_pm_stop(void)
{
unregister_bL_swicher_notifier(&pm_ctx.bl_swicher_notifier);
}
#endif /* MC_BL_NOTIFIER */

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_PM_H_
#define _MC_PM_H_
#include "platform.h" /* MC_BL_NOTIFIER */
#ifdef MC_BL_NOTIFIER
/* Initialize Power Management */
int mc_pm_start(void);
/* Free all Power Management resources*/
void mc_pm_stop(void);
#else
static inline int mc_pm_start(void)
{
return 0;
}
static inline void mc_pm_stop(void)
{
}
#endif
#endif /* _MC_PM_H_ */

View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MC_ADMIN_IOCTL_H__
#define __MC_ADMIN_IOCTL_H__
#include <linux/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define MC_ADMIN_DEVNODE "mobicore"
/* Driver/daemon commands */
enum {
/* Command 0 is reserved */
MC_DRV_GET_ROOT_CONTAINER = 1,
MC_DRV_GET_SP_CONTAINER = 2,
MC_DRV_GET_TRUSTLET_CONTAINER = 3,
MC_DRV_GET_TRUSTLET = 4,
MC_DRV_SIGNAL_CRASH = 5,
};
/* MobiCore IOCTL magic number */
#define MC_IOC_MAGIC 'M'
struct mc_admin_request {
__u32 request_id; /* Unique request identifier */
__u32 command; /* Command to daemon */
struct mc_uuid_t uuid; /* UUID of trustlet, if relevant */
__u32 is_gp; /* Whether trustlet is GP */
__u32 spid; /* SPID of trustlet, if relevant */
};
struct mc_admin_response {
__u32 request_id; /* Unique request identifier */
__u32 error_no; /* Errno from daemon */
__u32 spid; /* SPID of trustlet, if relevant */
__u32 service_type; /* Type of trustlet being returned */
__u32 length; /* Length of data to get */
/* Any data follows */
};
struct mc_admin_driver_info {
/* Version, and something else..*/
__u32 drv_version;
__u32 initial_cmd_id;
};
struct mc_admin_load_info {
__u32 spid; /* SPID of trustlet, if relevant */
__u64 address; /* Address of the data */
__u32 length; /* Length of data to get */
};
#define MC_ADMIN_IO_GET_DRIVER_REQUEST \
_IOR(MC_IOC_MAGIC, 0, struct mc_admin_request)
#define MC_ADMIN_IO_GET_INFO \
_IOR(MC_IOC_MAGIC, 1, struct mc_admin_driver_info)
#define MC_ADMIN_IO_LOAD_DRIVER \
_IOW(MC_IOC_MAGIC, 2, struct mc_admin_load_info)
#define MC_ADMIN_IO_LOAD_TOKEN \
_IOW(MC_IOC_MAGIC, 3, struct mc_admin_load_info)
#define MC_ADMIN_IO_LOAD_CHECK \
_IOW(MC_IOC_MAGIC, 4, struct mc_admin_load_info)
#ifdef __cplusplus
}
#endif
#endif /* __MC_ADMIN_IOCTL_H__ */

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_LINUX_API_H_
#define _MC_LINUX_API_H_
#include <linux/types.h>
/*
* Switch TEE active core to core_num, defined as linux
* core id
*/
int mc_switch_core(int core_num);
/*
* Return TEE active core as Linux core id
*/
int mc_active_core(void);
#endif /* _MC_LINUX_API_H_ */

View file

@ -0,0 +1,178 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_USER_H_
#define _MC_USER_H_
#define MCDRVMODULEAPI_VERSION_MAJOR 2
#define MCDRVMODULEAPI_VERSION_MINOR 10
#include <linux/types.h>
#define MC_USER_DEVNODE "mobicore-user"
/** Maximum length of MobiCore product ID string. */
#define MC_PRODUCT_ID_LEN 64
/** Number of buffers that can be mapped at once */
#define MC_MAP_MAX 4
/** Max length for buffers */
#define BUFFER_LENGTH_MAX 0x100000
/** Flags for buffers to map (aligned on GP) */
#define MC_IO_MAP_INPUT 0x1
#define MC_IO_MAP_OUTPUT 0x2
#define MC_IO_MAP_INPUT_OUTPUT (MC_IO_MAP_INPUT | MC_IO_MAP_OUTPUT)
/*
* Universally Unique Identifier (UUID) according to ISO/IEC 11578.
*/
struct mc_uuid_t {
__u8 value[16]; /* Value of the UUID. */
};
/*
* GP TA login types.
*/
enum mc_login_type {
LOGIN_PUBLIC = 0,
LOGIN_USER,
LOGIN_GROUP,
LOGIN_APPLICATION = 4,
LOGIN_USER_APPLICATION,
LOGIN_GROUP_APPLICATION,
};
/*
* GP TA identity structure.
*/
struct mc_identity {
enum mc_login_type login_type;
union {
__u8 login_data[16];
gid_t gid; /* Requested group id */
struct {
uid_t euid;
uid_t ruid;
} uid;
};
pid_t pid; /* Client, when using proxy */
};
/*
* Data exchange structure of the MC_IO_OPEN_SESSION ioctl command.
*/
struct mc_ioctl_open_session {
struct mc_uuid_t uuid; /* trustlet uuid */
__u32 is_gp_uuid; /* uuid is for GP TA */
__u32 sid; /* session id (out) */
__u64 tci; /* tci buffer pointer */
__u32 tcilen; /* tci length */
struct mc_identity identity; /* GP TA identity */
};
/*
* Data exchange structure of the MC_IO_OPEN_TRUSTLET ioctl command.
*/
struct mc_ioctl_open_trustlet {
__u32 sid; /* session id (out) */
__u32 spid; /* trustlet spid */
__u64 buffer; /* trustlet binary pointer */
__u32 tlen; /* binary length */
__u64 tci; /* tci buffer pointer */
__u32 tcilen; /* tci length */
};
/*
* Data exchange structure of the MC_IO_WAIT ioctl command.
*/
struct mc_ioctl_wait {
__u32 sid; /* session id (in) */
__s32 timeout; /* notification timeout */
__u32 partial; /* for proxy server to retry silently */
};
/*
* Data exchange structure of the MC_IO_ALLOC ioctl command.
*/
struct mc_ioctl_alloc {
__u32 len; /* buffer length */
__u32 handle; /* user handle for the buffer (out) */
};
/*
* Buffer mapping incoming and outgoing information.
*/
struct mc_ioctl_buffer {
__u64 va; /* user space address of buffer */
__u32 len; /* buffer length */
__u64 sva; /* SWd virt address of buffer (out) */
__u32 flags; /* buffer flags */
};
/*
* Data exchange structure of the MC_IO_MAP and MC_IO_UNMAP ioctl commands.
*/
struct mc_ioctl_map {
__u32 sid; /* session id */
struct mc_ioctl_buffer bufs[MC_MAP_MAX]; /* buffers info */
};
/*
* Data exchange structure of the MC_IO_ERR ioctl command.
*/
struct mc_ioctl_geterr {
__u32 sid; /* session id */
__s32 value; /* error value (out) */
};
/*
* Global MobiCore Version Information.
*/
struct mc_version_info {
char product_id[MC_PRODUCT_ID_LEN]; /** Product ID string */
__u32 version_mci; /** Mobicore Control Interface */
__u32 version_so; /** Secure Objects */
__u32 version_mclf; /** MobiCore Load Format */
__u32 version_container; /** MobiCore Container Format */
__u32 version_mc_config; /** MobiCore Config. Block Format */
__u32 version_tl_api; /** MobiCore Trustlet API */
__u32 version_dr_api; /** MobiCore Driver API */
__u32 version_nwd; /** This Driver */
};
/*
* defines for the ioctl mobicore driver module function call from user space.
*/
/* MobiCore IOCTL magic number */
#define MC_IOC_MAGIC 'M'
/*
* Implement corresponding functions from user api
*/
#define MC_IO_OPEN_SESSION \
_IOWR(MC_IOC_MAGIC, 0, struct mc_ioctl_open_session)
#define MC_IO_OPEN_TRUSTLET \
_IOWR(MC_IOC_MAGIC, 1, struct mc_ioctl_open_trustlet)
#define MC_IO_CLOSE_SESSION _IO(MC_IOC_MAGIC, 2)
#define MC_IO_NOTIFY _IO(MC_IOC_MAGIC, 3)
#define MC_IO_WAIT _IOW(MC_IOC_MAGIC, 4, struct mc_ioctl_wait)
#define MC_IO_MAP _IOWR(MC_IOC_MAGIC, 5, struct mc_ioctl_map)
#define MC_IO_UNMAP _IOW(MC_IOC_MAGIC, 6, struct mc_ioctl_map)
#define MC_IO_ERR _IOWR(MC_IOC_MAGIC, 7, struct mc_ioctl_geterr)
#define MC_IO_HAS_SESSIONS _IO(MC_IOC_MAGIC, 8)
#define MC_IO_VERSION _IOR(MC_IOC_MAGIC, 9, struct mc_version_info)
#endif /* _MC_USER_H_ */

View file

@ -0,0 +1,466 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MOBICORE_DRIVER_API_H_
#define _MOBICORE_DRIVER_API_H_
#include "mc_user.h"
#define __MC_CLIENT_LIB_API
/*
* Return values of MobiCore driver functions.
*/
enum mc_result {
/* Function call succeeded. */
MC_DRV_OK = 0,
/* No notification available. */
MC_DRV_NO_NOTIFICATION = 1,
/* Error during notification on communication level. */
MC_DRV_ERR_NOTIFICATION = 2,
/* Function not implemented. */
MC_DRV_ERR_NOT_IMPLEMENTED = 3,
/* No more resources available. */
MC_DRV_ERR_OUT_OF_RESOURCES = 4,
/* Driver initialization failed. */
MC_DRV_ERR_INIT = 5,
/* Unknown error. */
MC_DRV_ERR_UNKNOWN = 6,
/* The specified device is unknown. */
MC_DRV_ERR_UNKNOWN_DEVICE = 7,
/* The specified session is unknown.*/
MC_DRV_ERR_UNKNOWN_SESSION = 8,
/* The specified operation is not allowed. */
MC_DRV_ERR_INVALID_OPERATION = 9,
/* The response header from the MC is invalid. */
MC_DRV_ERR_INVALID_RESPONSE = 10,
/* Function call timed out. */
MC_DRV_ERR_TIMEOUT = 11,
/* Can not allocate additional memory. */
MC_DRV_ERR_NO_FREE_MEMORY = 12,
/* Free memory failed. */
MC_DRV_ERR_FREE_MEMORY_FAILED = 13,
/* Still some open sessions pending. */
MC_DRV_ERR_SESSION_PENDING = 14,
/* MC daemon not reachable */
MC_DRV_ERR_DAEMON_UNREACHABLE = 15,
/* The device file of the kernel module could not be opened. */
MC_DRV_ERR_INVALID_DEVICE_FILE = 16,
/* Invalid parameter. */
MC_DRV_ERR_INVALID_PARAMETER = 17,
/* Unspecified error from Kernel Module*/
MC_DRV_ERR_KERNEL_MODULE = 18,
/* Error during mapping of additional bulk memory to session. */
MC_DRV_ERR_BULK_MAPPING = 19,
/* Error during unmapping of additional bulk memory to session. */
MC_DRV_ERR_BULK_UNMAPPING = 20,
/* Notification received, exit code available. */
MC_DRV_INFO_NOTIFICATION = 21,
/* Set up of NWd connection failed. */
MC_DRV_ERR_NQ_FAILED = 22,
/* Wrong daemon version. */
MC_DRV_ERR_DAEMON_VERSION = 23,
/* Wrong container version. */
MC_DRV_ERR_CONTAINER_VERSION = 24,
/* System Trustlet public key is wrong. */
MC_DRV_ERR_WRONG_PUBLIC_KEY = 25,
/* Wrong container type(s). */
MC_DRV_ERR_CONTAINER_TYPE_MISMATCH = 26,
/* Container is locked (or not activated). */
MC_DRV_ERR_CONTAINER_LOCKED = 27,
/* SPID is not registered with root container. */
MC_DRV_ERR_SP_NO_CHILD = 28,
/* UUID is not registered with sp container. */
MC_DRV_ERR_TL_NO_CHILD = 29,
/* Unwrapping of root container failed. */
MC_DRV_ERR_UNWRAP_ROOT_FAILED = 30,
/* Unwrapping of service provider container failed. */
MC_DRV_ERR_UNWRAP_SP_FAILED = 31,
/* Unwrapping of Trustlet container failed. */
MC_DRV_ERR_UNWRAP_TRUSTLET_FAILED = 32,
/* No device associated with connection. */
MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN = 33,
/* TA blob attestation is incorrect. */
MC_DRV_ERR_TA_ATTESTATION_ERROR = 34,
/* Interrupted system call. */
MC_DRV_ERR_INTERRUPTED_BY_SIGNAL = 35,
/* Service is blocked and opensession is thus not allowed. */
MC_DRV_ERR_SERVICE_BLOCKED = 36,
/* Service is locked and opensession is thus not allowed. */
MC_DRV_ERR_SERVICE_LOCKED = 37,
/* Service was killed by the TEE (due to an administrative command). */
MC_DRV_ERR_SERVICE_KILLED = 38,
/* All permitted instances to the service are used */
MC_DRV_ERR_NO_FREE_INSTANCES = 39,
/* TA blob header is incorrect. */
MC_DRV_ERR_TA_HEADER_ERROR = 40,
};
/*
* Structure of Session Handle, includes the Session ID and the Device ID the
* Session belongs to.
* The session handle will be used for session-based MobiCore communication.
* It will be passed to calls which address a communication end point in the
* MobiCore environment.
*/
struct mc_session_handle {
u32 session_id; /* MobiCore session ID */
u32 device_id; /* Device ID the session belongs to */
};
/*
* Information structure about additional mapped Bulk buffer between the
* Trustlet Connector (NWd) and the Trustlet (SWd). This structure is
* initialized from a Trustlet Connector by calling mc_map().
* In order to use the memory within a Trustlet the Trustlet Connector has to
* inform the Trustlet with the content of this structure via the TCI.
*/
struct mc_bulk_map {
/*
* The virtual address of the Bulk buffer regarding the address space
* of the Trustlet, already includes a possible offset!
*/
u32 secure_virt_addr;
u32 secure_virt_len; /* Length of the mapped Bulk buffer */
};
/* The default device ID */
#define MC_DEVICE_ID_DEFAULT 0
/* Wait infinite for a response of the MC. */
#define MC_INFINITE_TIMEOUT ((s32)(-1))
/* Do not wait for a response of the MC. */
#define MC_NO_TIMEOUT 0
/* TCI/DCI must not exceed 1MiB */
#define MC_MAX_TCI_LEN 0x100000
/**
* mc_open_device() - Open a new connection to a MobiCore device.
* @device_id: Identifier for the MobiCore device to be used.
* MC_DEVICE_ID_DEFAULT refers to the default device.
*
* Initializes all device specific resources required to communicate with a
* MobiCore instance located on the specified device in the system. If the
* device does not exist the function will return MC_DRV_ERR_UNKNOWN_DEVICE.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_ERR_INVALID_OPERATION: device already opened
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon
* MC_DRV_ERR_UNKNOWN_DEVICE: device_id unknown
* MC_DRV_ERR_INVALID_DEVICE_FILE: kernel module under /dev/mobicore
* cannot be opened
*/
__MC_CLIENT_LIB_API enum mc_result mc_open_device(
u32 device_id);
/**
* mc_close_device() - Close the connection to a MobiCore device.
* @device_id: Identifier for the MobiCore device.
*
* When closing a device, active sessions have to be closed beforehand.
* Resources associated with the device will be released.
* The device may be opened again after it has been closed.
*
* MC_DEVICE_ID_DEFAULT refers to the default device.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_SESSION_PENDING: a session is still open
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
*/
__MC_CLIENT_LIB_API enum mc_result mc_close_device(
u32 device_id);
/**
* mc_open_session() - Open a new session to a Trustlet.
* @session: On success, the session data will be returned
* @uuid: UUID of the Trustlet to be opened
* @tci: TCI buffer for communicating with the Trustlet
* @tci_len: Length of the TCI buffer. Maximum allowed value
* is MC_MAX_TCI_LEN
*
* The Trustlet with the given UUID has to be available in the flash filesystem.
*
* Write MCP open message to buffer and notify MobiCore about the availability
* of a new command.
*
* Waits till the MobiCore responses with the new session ID (stored in the MCP
* buffer).
*
* Note that session.device_id has to be the device id of an opened device.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon socket occur
* MC_DRV_ERR_NQ_FAILED: daemon returns an error
*/
__MC_CLIENT_LIB_API enum mc_result mc_open_session(
struct mc_session_handle *session,
const struct mc_uuid_t *uuid,
u8 *tci,
u32 tci_len);
/**
* mc_open_trustlet() - Open a new session to the provided Trustlet.
* @session: On success, the session data will be returned
* @spid: Service Provider ID (for SP trustlets otherwise ignored)
* @trustlet Memory buffer containing the Trusted Application binary
* @trustlet_len Trusted Application length
* @tci: TCI buffer for communicating with the Trustlet
* @tci_len: Length of the TCI buffer. Maximum allowed value
* is MC_MAX_TCI_LEN
*
* Write MCP open message to buffer and notify MobiCore about the availability
* of a new command.
*
* Waits till the MobiCore responses with the new session ID (stored in the MCP
* buffer).
*
* Note that session.device_id has to be the device id of an opened device.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon socket occur
* MC_DRV_ERR_NQ_FAILED: daemon returns an error
*/
__MC_CLIENT_LIB_API enum mc_result mc_open_trustlet(
struct mc_session_handle *session,
u32 spid,
u8 *trustlet,
u32 trustlet_len,
u8 *tci,
u32 len);
/**
* mc_close_session() - Close a Trustlet session.
* @session: Session to be closed.
*
* Closes the specified MobiCore session. The call will block until the
* session has been closed.
*
* Device device_id has to be opened in advance.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
* MC_DRV_ERR_INVALID_DEVICE_FILE: daemon cannot open Trustlet file
*/
__MC_CLIENT_LIB_API enum mc_result mc_close_session(
struct mc_session_handle *session);
/**
* mc_notify() - Notify a session.
* @session: The session to be notified.
*
* Notifies the session end point about available message data.
* If the session parameter is correct, notify will always succeed.
* Corresponding errors can only be received by mc_wait_notification().
*
* A session has to be opened in advance.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
*/
__MC_CLIENT_LIB_API enum mc_result mc_notify(
struct mc_session_handle *session);
/**
* mc_wait_notification() - Wait for a notification.
* @session: The session the notification should correspond to.
* @timeout: Time in milliseconds to wait
* (MC_NO_TIMEOUT : direct return, > 0 : milliseconds,
* MC_INFINITE_TIMEOUT : wait infinitely)
*
* Wait for a notification issued by the MobiCore for a specific session.
* The timeout parameter specifies the number of milliseconds the call will wait
* for a notification.
*
* If the caller passes 0 as timeout value the call will immediately return.
* If timeout value is below 0 the call will block until a notification for the
* session has been received.
*
* If timeout is below 0, call will block.
*
* Caller has to trust the other side to send a notification to wake him up
* again.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_ERR_TIMEOUT: no notification arrived in time
* MC_DRV_INFO_NOTIFICATION: a problem with the session was
* encountered. Get more details with
* mc_get_session_error_code()
* MC_DRV_ERR_NOTIFICATION: a problem with the socket occurred
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
*/
__MC_CLIENT_LIB_API enum mc_result mc_wait_notification(
struct mc_session_handle *session,
s32 timeout);
/**
* mc_malloc_wsm() - Allocate a block of world shared memory (WSM).
* @device_id: The ID of an opened device to retrieve the WSM from.
* @align: The alignment (number of pages) of the memory block
* (e.g. 0x00000001 for 4kb).
* @len: Length of the block in bytes.
* @wsm: Virtual address of the world shared memory block.
* @wsm_flags: Platform specific flags describing the memory to
* be allocated.
*
* The MC driver allocates a contiguous block of memory which can be used as
* WSM.
* This implicates that the allocated memory is aligned according to the
* alignment parameter.
*
* Always returns a buffer of size WSM_SIZE aligned to 4K.
*
* Align and wsm_flags are currently ignored
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_NO_FREE_MEMORY: no more contiguous memory is
* available in this size or for this
* process
*/
__MC_CLIENT_LIB_API enum mc_result mc_malloc_wsm(
u32 device_id,
u32 align,
u32 len,
u8 **wsm,
u32 wsm_flags);
/**
* mc_free_wsm() - Free a block of world shared memory (WSM).
* @device_id: The ID to which the given address belongs
* @wsm: Address of WSM block to be freed
*
* The MC driver will free a block of world shared memory (WSM) previously
* allocated with mc_malloc_wsm(). The caller has to assure that the address
* handed over to the driver is a valid WSM address.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: when device id is invalid
* MC_DRV_ERR_FREE_MEMORY_FAILED: on failure
*/
__MC_CLIENT_LIB_API enum mc_result mc_free_wsm(
u32 device_id,
u8 *wsm);
/**
*mc_map() - Map additional bulk buffer between a Trustlet Connector (TLC)
* and the Trustlet (TL) for a session
* @session: Session handle with information of the device_id and
* the session_id. The given buffer is mapped to the
* session specified in the sessionHandle
* @buf: Virtual address of a memory portion (relative to TLC)
* to be shared with the Trustlet, already includes a
* possible offset!
* @len: length of buffer block in bytes.
* @map_info: Information structure about the mapped Bulk buffer
* between the TLC (NWd) and the TL (SWd).
*
* Memory allocated in user space of the TLC can be mapped as additional
* communication channel (besides TCI) to the Trustlet. Limitation of the
* Trustlet memory structure apply: only 6 chunks can be mapped with a maximum
* chunk size of 1 MiB each.
*
* It is up to the application layer (TLC) to inform the Trustlet
* about the additional mapped bulk memory.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
* MC_DRV_ERR_BULK_MAPPING: buf is already uses as bulk buffer or
* when registering the buffer failed
*/
__MC_CLIENT_LIB_API enum mc_result mc_map(
struct mc_session_handle *session,
void *buf,
u32 len,
struct mc_bulk_map *map_info);
/**
* mc_unmap() - Remove additional mapped bulk buffer between Trustlet Connector
* (TLC) and the Trustlet (TL) for a session
* @session: Session handle with information of the device_id and
* the session_id. The given buffer is unmapped from the
* session specified in the sessionHandle.
* @buf: Virtual address of a memory portion (relative to TLC)
* shared with the TL, already includes a possible offset!
* @map_info: Information structure about the mapped Bulk buffer
* between the TLC (NWd) and the TL (SWd)
*
* The bulk buffer will immediately be unmapped from the session context.
*
* The application layer (TLC) must inform the TL about unmapping of the
* additional bulk memory before calling mc_unmap!
*
* The clientlib currently ignores the len field in map_info.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
* MC_DRV_ERR_BULK_UNMAPPING: buf was not registered earlier
* or when unregistering failed
*/
__MC_CLIENT_LIB_API enum mc_result mc_unmap(
struct mc_session_handle *session,
void *buf,
struct mc_bulk_map *map_info);
/*
* mc_get_session_error_code() - Get additional error information of the last
* error that occurred on a session.
* @session: Session handle with information of the device_id and
* the session_id
* @exit_code: >0 Trustlet has terminated itself with this value,
* <0 Trustlet is dead because of an error within the
* MobiCore (e.g. Kernel exception). See also MCI
* definition.
*
* After the request the stored error code will be deleted.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
*/
__MC_CLIENT_LIB_API enum mc_result mc_get_session_error_code(
struct mc_session_handle *session,
s32 *exit_code);
#endif /* _MOBICORE_DRIVER_API_H_ */

View file

@ -0,0 +1,237 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/stringify.h>
#include <linux/version.h>
#include "public/mc_user.h"
#include "main.h"
#include "fastcall.h"
#include "logging.h"
#include "mcp.h"
#include "scheduler.h"
#define SCHEDULING_FREQ 5 /**< N-SIQ every n-th time */
#define DEFAULT_TIMEOUT_MS 60000
static struct sched_ctx {
struct task_struct *thread;
bool thread_run;
struct completion idle_complete; /* Unblock scheduler thread */
struct completion sleep_complete; /* Wait for sleep status */
struct mutex sleep_mutex; /* Protect sleep request */
struct mutex request_mutex; /* Protect all below */
/* The order of this enum matters */
enum {
NONE, /* No specific request */
YIELD, /* Run the SWd */
NSIQ, /* Schedule the SWd */
SUSPEND, /* Suspend the SWd */
RESUME, /* Resume the SWd */
} request;
bool suspended;
} sched_ctx;
static int mc_scheduler_command(int command)
{
if (IS_ERR_OR_NULL(sched_ctx.thread))
return -EFAULT;
mutex_lock(&sched_ctx.request_mutex);
if (sched_ctx.request < command) {
sched_ctx.request = command;
complete(&sched_ctx.idle_complete);
}
mutex_unlock(&sched_ctx.request_mutex);
return 0;
}
static int mc_scheduler_pm_command(int command)
{
int ret = -EPERM;
if (IS_ERR_OR_NULL(sched_ctx.thread))
return -EFAULT;
mutex_lock(&sched_ctx.sleep_mutex);
/* Send request */
mc_scheduler_command(command);
/* Wait for scheduler to reply */
wait_for_completion(&sched_ctx.sleep_complete);
mutex_lock(&sched_ctx.request_mutex);
if (command == SUSPEND) {
if (sched_ctx.suspended)
ret = 0;
} else {
if (!sched_ctx.suspended)
ret = 0;
}
mutex_unlock(&sched_ctx.request_mutex);
mutex_unlock(&sched_ctx.sleep_mutex);
return ret;
}
static int mc_dev_command(enum mcp_scheduler_commands command)
{
switch (command) {
case MCP_YIELD:
return mc_scheduler_command(YIELD);
case MCP_NSIQ:
return mc_scheduler_command(NSIQ);
}
return -EINVAL;
}
int mc_scheduler_suspend(void)
{
return mc_scheduler_pm_command(SUSPEND);
}
int mc_scheduler_resume(void)
{
return mc_scheduler_pm_command(RESUME);
}
/*
* This thread, and only this thread, schedules the SWd. Hence, reading the idle
* status and its associated timeout is safe from race conditions.
*/
static int tee_scheduler(void *arg)
{
int timeslice = 0; /* Actually scheduling period */
int ret = 0;
while (1) {
s32 timeout_ms = -1;
bool pm_request = false;
if (sched_ctx.suspended || mcp_get_idle_timeout(&timeout_ms)) {
/* If timeout is 0 we keep scheduling the SWd */
if (!timeout_ms) {
mc_scheduler_command(NSIQ);
} else {
if (timeout_ms < 0)
timeout_ms = DEFAULT_TIMEOUT_MS;
if (!wait_for_completion_timeout(
&sched_ctx.idle_complete,
msecs_to_jiffies(timeout_ms))) {
/* Timed out, force SWd schedule */
mc_scheduler_command(NSIQ);
}
}
}
if (kthread_should_stop() || !sched_ctx.thread_run)
break;
/* Get requested command if any */
mutex_lock(&sched_ctx.request_mutex);
if (sched_ctx.request == YIELD)
/* Yield forced: increment timeslice */
timeslice++;
else if (sched_ctx.request >= NSIQ) {
/* Force N_SIQ, also to suspend/resume SWd */
timeslice = 0;
if (sched_ctx.request == SUSPEND) {
mcp_suspend();
pm_request = true;
} else if (sched_ctx.request == RESUME) {
mcp_resume();
pm_request = true;
}
}
if (g_ctx.f_time)
mcp_update_time();
sched_ctx.request = NONE;
mutex_unlock(&sched_ctx.request_mutex);
/* Reset timeout so we don't loop if SWd halted */
mcp_reset_idle_timeout();
if (timeslice--) {
/* Resume SWd from where it was */
ret = mc_fc_yield();
} else {
timeslice = SCHEDULING_FREQ;
/* Call SWd scheduler */
ret = mc_fc_nsiq();
}
/* Always flush log buffer after the SWd has run */
mc_logging_run();
if (ret)
break;
/* Should have suspended by now if requested */
mutex_lock(&sched_ctx.request_mutex);
if (pm_request) {
sched_ctx.suspended = mcp_suspended();
complete(&sched_ctx.sleep_complete);
}
mutex_unlock(&sched_ctx.request_mutex);
/* Flush pending notifications if possible */
if (mcp_notifications_flush())
complete(&sched_ctx.idle_complete);
}
mc_dev_devel("exit, ret is %d\n", ret);
return ret;
}
int mc_scheduler_start(void)
{
sched_ctx.thread_run = true;
sched_ctx.thread = kthread_run(tee_scheduler, NULL, "tee_scheduler");
if (IS_ERR(sched_ctx.thread)) {
mc_dev_err("tee_scheduler thread creation failed\n");
return PTR_ERR(sched_ctx.thread);
}
mcp_register_scheduler(mc_dev_command);
complete(&sched_ctx.idle_complete);
return 0;
}
void mc_scheduler_stop(void)
{
mcp_register_scheduler(NULL);
sched_ctx.thread_run = false;
complete(&sched_ctx.idle_complete);
kthread_stop(sched_ctx.thread);
}
int mc_scheduler_init(void)
{
init_completion(&sched_ctx.idle_complete);
init_completion(&sched_ctx.sleep_complete);
mutex_init(&sched_ctx.sleep_mutex);
mutex_init(&sched_ctx.request_mutex);
return 0;
}

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MC_SCHEDULER_H__
#define __MC_SCHEDULER_H__
int mc_scheduler_init(void);
static inline void mc_scheduler_exit(void) {}
int mc_scheduler_start(void);
void mc_scheduler_stop(void);
int mc_scheduler_suspend(void);
int mc_scheduler_resume(void);
#endif /* __MC_SCHEDULER_H__ */

View file

@ -0,0 +1,829 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <crypto/hash.h>
#include <linux/scatterlist.h>
#include <linux/fs.h>
#include "public/mc_user.h"
#include "public/mc_admin.h"
#include "platform.h" /* MC_NO_UIDGIT_H */
#ifndef MC_NO_UIDGIT_H
#include <linux/uidgid.h>
#else
#define kuid_t uid_t
#define kgid_t gid_t
#define KGIDT_INIT(value) ((kgid_t)value)
static inline uid_t __kuid_val(kuid_t uid)
{
return uid;
}
static inline gid_t __kgid_val(kgid_t gid)
{
return gid;
}
static inline bool gid_eq(kgid_t left, kgid_t right)
{
return __kgid_val(left) == __kgid_val(right);
}
static inline bool gid_gt(kgid_t left, kgid_t right)
{
return __kgid_val(left) > __kgid_val(right);
}
static inline bool gid_lt(kgid_t left, kgid_t right)
{
return __kgid_val(left) < __kgid_val(right);
}
#endif
#include "main.h"
#include "admin.h" /* mc_is_admin_tgid */
#include "mmu.h"
#include "mcp.h"
#include "client.h" /* *cbuf* */
#include "session.h"
#include "mci/mcimcp.h" /* WSM_INVALID */
#define SHA1_HASH_SIZE 20
struct wsm {
/* Buffer NWd addr (uva or kva, used only for lookup) */
uintptr_t va;
/* buffer length */
u32 len;
/* Buffer SWd addr */
u32 sva;
/* mmu L2 table */
struct tee_mmu *mmu;
/* possibly a pointer to a cbuf */
struct cbuf *cbuf;
/* list node */
struct list_head list;
};
/* Cleanup for GP TAs, implemented as a worker to not impact other sessions */
static void session_close_worker(struct work_struct *work)
{
struct mcp_session *mcp_session;
struct tee_session *session;
mcp_session = container_of(work, struct mcp_session, close_work);
mc_dev_devel("session %x worker", mcp_session->id);
session = container_of(mcp_session, struct tee_session, mcp_session);
if (!mcp_close_session(mcp_session))
complete(&session->close_completion);
}
static struct wsm *wsm_create(struct tee_session *session, uintptr_t va,
u32 len)
{
struct wsm *wsm;
/* Allocate structure */
wsm = kzalloc(sizeof(*wsm), GFP_KERNEL);
if (!wsm)
return ERR_PTR(-ENOMEM);
wsm->mmu = client_mmu_create(session->client, va, len, &wsm->cbuf);
if (IS_ERR(wsm->mmu)) {
int ret = PTR_ERR(wsm->mmu);
kfree(wsm);
return ERR_PTR(ret);
}
/* Increment debug counter */
atomic_inc(&g_ctx.c_wsms);
wsm->va = va;
wsm->len = len;
mc_dev_devel("created wsm %p: mmu %p cbuf %p va %lx len %u\n",
wsm, wsm->mmu, wsm->cbuf, wsm->va, wsm->len);
return wsm;
}
/*
* Free a WSM object, must be called under the session's wsms_lock
*/
static void wsm_free(struct tee_session *session, struct wsm *wsm)
{
/* Free MMU table */
client_mmu_free(session->client, wsm->va, wsm->mmu, wsm->cbuf);
/* Delete wsm object */
mc_dev_devel("freed wsm %p: mmu %p cbuf %p va %lx len %u sva %x\n",
wsm, wsm->mmu, wsm->cbuf, wsm->va, wsm->len, wsm->sva);
kfree(wsm);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_wsms);
}
static int hash_path_and_data(struct task_struct *task, u8 *hash,
const void *data, unsigned int data_len)
{
struct mm_struct *mm = task->mm;
struct hash_desc desc;
struct scatterlist sg;
char *buf;
char *path;
unsigned int path_len;
int ret = 0;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
down_read(&mm->mmap_sem);
if (!mm->exe_file) {
ret = -ENOENT;
goto end;
}
path = d_path(&mm->exe_file->f_path, buf, PAGE_SIZE);
if (IS_ERR(path)) {
ret = PTR_ERR(path);
goto end;
}
mc_dev_devel("process path =\n");
{
char *c;
for (c = path; *c; c++)
mc_dev_devel("%c %d\n", *c, *c);
}
path_len = (unsigned int)strnlen(path, PAGE_SIZE);
mc_dev_devel("path_len = %u\n", path_len);
desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm)) {
ret = PTR_ERR(desc.tfm);
mc_dev_devel("could not alloc hash = %d\n", ret);
goto end;
}
desc.flags = 0;
sg_init_one(&sg, path, path_len);
crypto_hash_init(&desc);
crypto_hash_update(&desc, &sg, path_len);
if (data) {
mc_dev_devel("current process path: hashing additional data\n");
sg_init_one(&sg, data, data_len);
crypto_hash_update(&desc, &sg, data_len);
}
crypto_hash_final(&desc, hash);
crypto_free_hash(desc.tfm);
end:
up_read(&mm->mmap_sem);
free_page((unsigned long)buf);
return ret;
}
/*
* groups_search is not EXPORTed so copied from kernel/groups.c
* a simple bsearch
*/
int has_group(const struct cred *cred, gid_t id_gid)
{
const struct group_info *group_info = cred->group_info;
unsigned int left, right;
kgid_t gid = KGIDT_INIT(id_gid);
if (gid_eq(gid, cred->fsgid) || gid_eq(gid, cred->egid))
return 1;
if (!group_info)
return 0;
left = 0;
right = group_info->ngroups;
while (left < right) {
unsigned int mid = (left + right) / 2;
if (gid_gt(gid, GROUP_AT(group_info, mid)))
left = mid + 1;
else if (gid_lt(gid, GROUP_AT(group_info, mid)))
right = mid;
else
return 1;
}
return 0;
}
static int check_prepare_identity(const struct mc_identity *identity,
struct identity *mcp_identity)
{
struct mc_identity *mcp_id = (struct mc_identity *)mcp_identity;
u8 hash[SHA1_HASH_SIZE];
bool application = false;
const void *data;
unsigned int data_len;
struct task_struct *task;
/* Mobicore doesn't support GP client authentication. */
if (!g_ctx.f_client_login &&
(identity->login_type != LOGIN_PUBLIC)) {
mc_dev_err("Unsupported login type %d\n", identity->login_type);
return -EINVAL;
}
/* Only proxy can provide a PID */
if (identity->pid) {
if (!mc_is_admin_tgid(current->tgid)) {
mc_dev_err("Incorrect PID %d\n", current->tgid);
return -EPERM;
}
rcu_read_lock();
task = pid_task(find_vpid(identity->pid), PIDTYPE_PID);
if (!task) {
rcu_read_unlock();
mc_dev_err("No task for PID %d\n", identity->gid);
return -EINVAL;
}
} else {
rcu_read_lock();
task = current;
}
/* Copy login type */
mcp_identity->login_type = identity->login_type;
/* Fill in uid field */
if ((identity->login_type == LOGIN_USER) ||
(identity->login_type == LOGIN_USER_APPLICATION)) {
/* Set euid and ruid of the process. */
mcp_id->uid.euid = __kuid_val(task_euid(task));
mcp_id->uid.ruid = __kuid_val(task_uid(task));
}
/* Check gid field */
if ((identity->login_type == LOGIN_GROUP) ||
(identity->login_type == LOGIN_GROUP_APPLICATION)) {
const struct cred *cred = __task_cred(task);
/*
* Check if gid is one of: egid of the process, its rgid or one
* of its supplementary groups
*/
if (!has_group(cred, identity->gid)) {
rcu_read_unlock();
mc_dev_err("group %d not allowed\n", identity->gid);
return -EACCES;
}
mc_dev_devel("group %d found\n", identity->gid);
mcp_id->gid = identity->gid;
}
rcu_read_unlock();
switch (identity->login_type) {
case LOGIN_PUBLIC:
case LOGIN_USER:
case LOGIN_GROUP:
break;
case LOGIN_APPLICATION:
application = true;
data = NULL;
data_len = 0;
break;
case LOGIN_USER_APPLICATION:
application = true;
data = &mcp_id->uid;
data_len = sizeof(mcp_id->uid);
break;
case LOGIN_GROUP_APPLICATION:
application = true;
data = &identity->gid;
data_len = sizeof(identity->gid);
break;
default:
/* Any other login_type value is invalid. */
mc_dev_err("Invalid login type %d\n", identity->login_type);
return -EINVAL;
}
if (application) {
if (hash_path_and_data(task, hash, data, data_len)) {
mc_dev_devel("error in hash calculation\n");
return -EAGAIN;
}
memcpy(&mcp_id->login_data, hash, sizeof(mcp_id->login_data));
}
return 0;
}
/*
* Create a session object.
* Note: object is not attached to client yet.
*/
struct tee_session *session_create(struct tee_client *client, bool is_gp,
struct mc_identity *identity)
{
struct tee_session *session;
struct identity mcp_identity;
if (is_gp) {
/* Check identity method and data. */
int ret = check_prepare_identity(identity, &mcp_identity);
if (ret)
return ERR_PTR(ret);
}
/* Allocate session object */
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return ERR_PTR(-ENOMEM);
/* Increment debug counter */
atomic_inc(&g_ctx.c_sessions);
mutex_init(&session->close_lock);
init_completion(&session->close_completion);
/* Initialise object members */
mcp_session_init(&session->mcp_session, is_gp, &mcp_identity);
INIT_WORK(&session->mcp_session.close_work, session_close_worker);
client_get(client);
session->client = client;
kref_init(&session->kref);
INIT_LIST_HEAD(&session->list);
mutex_init(&session->wsms_lock);
INIT_LIST_HEAD(&session->wsms);
mc_dev_devel("created session %p: client %p\n",
session, session->client);
return session;
}
int session_open(struct tee_session *session, const struct tee_object *obj,
const struct tee_mmu *obj_mmu, uintptr_t tci, size_t len)
{
struct mcp_buffer_map map;
tee_mmu_buffer(obj_mmu, &map);
/* Create wsm object for tci */
if (tci && len) {
struct wsm *wsm;
struct mcp_buffer_map tci_map;
int ret = 0;
wsm = wsm_create(session, tci, len);
if (IS_ERR(wsm))
return PTR_ERR(wsm);
tee_mmu_buffer(wsm->mmu, &tci_map);
ret = mcp_open_session(&session->mcp_session, obj, &map,
&tci_map);
if (ret) {
wsm_free(session, wsm);
return ret;
}
mutex_lock(&session->wsms_lock);
list_add_tail(&wsm->list, &session->wsms);
mutex_unlock(&session->wsms_lock);
return 0;
}
if (tci || len) {
mc_dev_err("Tci pointer and length are incoherent\n");
return -EINVAL;
}
return mcp_open_session(&session->mcp_session, obj, &map, NULL);
}
/*
* Close TA and unreference session object.
* Object will be freed if reference reaches 0.
* Session object is assumed to have been removed from main list, which means
* that session_close cannot be called anymore.
*/
int session_close(struct tee_session *session)
{
int ret = 0;
mutex_lock(&session->close_lock);
switch (mcp_close_session(&session->mcp_session)) {
case 0:
break;
case -EAGAIN:
/*
* GP TAs need time to close. The "TA closed" notification shall
* trigger the session_close_worker which will unblock us
*/
mc_dev_devel("wait for session %x worker",
session->mcp_session.id);
wait_for_completion(&session->close_completion);
break;
default:
mc_dev_err("failed to close session %x in SWd\n",
session->mcp_session.id);
ret = -EPERM;
}
mutex_unlock(&session->close_lock);
if (ret)
return ret;
mc_dev_devel("closed session %x", session->mcp_session.id);
/* Remove session from client's closing list */
mutex_lock(&session->client->sessions_lock);
list_del(&session->list);
mutex_unlock(&session->client->sessions_lock);
/* Remove the ref we took on creation */
session_put(session);
return ret;
}
/*
* Free session object and all objects it contains (wsm).
*/
static void session_release(struct kref *kref)
{
struct tee_session *session;
struct wsm *wsm, *next;
/* Remove remaining shared buffers (unmapped in SWd by mcp_close) */
session = container_of(kref, struct tee_session, kref);
list_for_each_entry_safe(wsm, next, &session->wsms, list) {
mc_dev_devel("session %p: free wsm %p\n", session, wsm);
if (wsm->sva)
atomic_dec(&g_ctx.c_maps);
wsm_free(session, wsm);
}
mc_dev_devel("freed session %p: client %p id %x\n",
session, session->client, session->mcp_session.id);
client_put(session->client);
kfree(session);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_sessions);
}
/*
* Unreference session.
* Free session object if reference reaches 0.
*/
int session_put(struct tee_session *session)
{
return kref_put(&session->kref, session_release);
}
/*
* Session is to be removed from NWd records as SWd is dead
*/
int session_kill(struct tee_session *session)
{
mcp_kill_session(&session->mcp_session);
return session_put(session);
}
/*
* Send a notification to TA
*/
int session_notify_swd(struct tee_session *session)
{
if (!session) {
mc_dev_err("Session pointer is null\n");
return -EINVAL;
}
return mcp_notify(&session->mcp_session);
}
/*
* Read and clear last notification received from TA
*/
s32 session_exitcode(struct tee_session *session)
{
return mcp_session_exitcode(&session->mcp_session);
}
static inline int wsm_debug_structs(struct kasnprintf_buf *buf, struct wsm *wsm)
{
ssize_t ret;
ret = kasnprintf(buf,
"\t\twsm %p: mmu %p cbuf %p va %lx len %u sva %x\n",
wsm, wsm->mmu, wsm->cbuf, wsm->va, wsm->len, wsm->sva);
if (ret < 0)
return ret;
if (wsm->mmu) {
ret = tee_mmu_debug_structs(buf, wsm->mmu);
if (ret < 0)
return ret;
}
return 0;
}
/*
* Share buffers with SWd and add corresponding WSM objects to session.
*/
int session_wsms_add(struct tee_session *session,
struct mc_ioctl_buffer *bufs)
{
struct wsm *wsms[MC_MAP_MAX] = { 0 };
struct mcp_buffer_map maps[MC_MAP_MAX];
int i, ret = 0;
bool at_least_one = false;
/* Check parameters */
if (!session)
return -ENXIO;
/* Create MMU and map for each buffer */
for (i = 0; i < MC_MAP_MAX; i++) {
if (!bufs[i].va) {
maps[i].type = WSM_INVALID;
continue;
}
wsms[i] = wsm_create(session, bufs[i].va, bufs[i].len);
if (IS_ERR(wsms[i])) {
ret = PTR_ERR(wsms[i]);
mc_dev_err("maps[%d] va=%llx create failed: %d\n",
i, bufs[i].va, ret);
goto err;
}
tee_mmu_buffer(wsms[i]->mmu, &maps[i]);
mc_dev_devel("maps[%d] va=%llx: t:%u a:%llx o:%u l:%u\n",
i, bufs[i].va, maps[i].type, maps[i].phys_addr,
maps[i].offset, maps[i].length);
at_least_one = true;
}
if (!at_least_one) {
mc_dev_err("no buffers to map\n");
return -EINVAL;
}
/* Map buffers */
if (g_ctx.f_multimap) {
/* Send MCP message to map buffers in SWd */
ret = mcp_multimap(session->mcp_session.id, maps);
if (ret) {
mc_dev_err("multimap failed: %d\n", ret);
goto err;
}
for (i = 0; i < MC_MAP_MAX; i++) {
if (!wsms[i])
continue;
wsms[i]->sva = maps[i].secure_va;
atomic_inc(&g_ctx.c_maps);
}
} else {
/* Map each buffer */
for (i = 0; i < MC_MAP_MAX; i++) {
if (!wsms[i])
continue;
/* Send MCP message to map buffer in SWd */
ret = mcp_map(session->mcp_session.id, &maps[i]);
if (ret) {
mc_dev_err("maps[%d] va=%llx map failed: %d\n",
i, bufs[i].va, ret);
break;
}
wsms[i]->sva = maps[i].secure_va;
atomic_inc(&g_ctx.c_maps);
}
/* Unmap what was mapped on failure */
if (ret) {
for (i = 0; i < MC_MAP_MAX; i++) {
if (!wsms[i] || !wsms[i]->sva)
continue;
if (mcp_unmap(session->mcp_session.id,
&maps[i]))
mc_dev_err("unmap failed: %d\n", ret);
else
atomic_dec(&g_ctx.c_maps);
}
goto err;
}
}
for (i = 0; i < MC_MAP_MAX; i++) {
if (!wsms[i])
continue;
/* Store WSM into session */
mutex_lock(&session->wsms_lock);
list_add_tail(&wsms[i]->list, &session->wsms);
mutex_unlock(&session->wsms_lock);
bufs[i].sva = wsms[i]->sva;
mc_dev_devel("maps[%d] va=%llx map'd len=%u sva=%llx\n",
i, bufs[i].va, bufs[i].len, bufs[i].sva);
}
return 0;
err:
for (i = 0; i < MC_MAP_MAX; i++)
if (!IS_ERR_OR_NULL(wsms[i]))
wsm_free(session, wsms[i]);
return ret;
}
static inline struct wsm *wsm_find(struct tee_session *session, uintptr_t sva)
{
struct wsm *wsm;
list_for_each_entry(wsm, &session->wsms, list)
if (wsm->sva == sva)
return wsm;
return NULL;
}
/*
* Stop sharing buffers and delete corrsponding WSM objects.
*/
int session_wsms_remove(struct tee_session *session,
const struct mc_ioctl_buffer *bufs)
{
struct wsm *wsms[MC_MAP_MAX] = { 0 };
struct mcp_buffer_map maps[MC_MAP_MAX];
int i, ret = 0;
bool at_least_one = false;
if (!session) {
mc_dev_err("session pointer is null\n");
return -EINVAL;
}
mutex_lock(&session->wsms_lock);
/* Find, check and map buffer */
for (i = 0; i < MC_MAP_MAX; i++) {
struct wsm *wsm;
if (!bufs[i].va) {
maps[i].secure_va = 0;
continue;
}
wsm = wsm_find(session, bufs[i].sva);
if (!wsm) {
ret = -EINVAL;
mc_dev_err("maps[%d] va=%llx sva=%llx not found\n",
i, bufs[i].va, bufs[i].sva);
goto out;
}
/* Check VA */
if (wsm->va != bufs[i].va) {
ret = -EINVAL;
mc_dev_err("maps[%d] va=%llx does not match %lx\n",
i, bufs[i].va, wsm->va);
goto out;
}
/* Check length */
if (wsm->len != bufs[i].len) {
ret = -EINVAL;
mc_dev_err("maps[%d] va=%llx len mismatch: %u != %u\n",
i, bufs[i].va, wsm->len, bufs[i].len);
goto out;
}
wsms[i] = wsm;
tee_mmu_buffer(wsms[i]->mmu, &maps[i]);
maps[i].secure_va = wsms[i]->sva;
mc_dev_devel("maps[%d] va=%llx: t:%u a:%llx o:%u l:%u s:%llx\n",
i, bufs[i].va, maps[i].type, maps[i].phys_addr,
maps[i].offset, maps[i].length, maps[i].secure_va);
at_least_one = true;
}
if (!at_least_one) {
ret = -EINVAL;
mc_dev_err("no buffers to unmap\n");
goto out;
}
if (g_ctx.f_multimap) {
/* Send MCP command to unmap buffers in SWd */
ret = mcp_multiunmap(session->mcp_session.id, maps);
if (ret) {
mc_dev_err("mcp_multiunmap failed: %d\n", ret);
} else {
for (i = 0; i < MC_MAP_MAX; i++)
if (maps[i].secure_va)
atomic_dec(&g_ctx.c_maps);
}
} else {
for (i = 0; i < MC_MAP_MAX; i++) {
if (!maps[i].secure_va)
continue;
/* Send MCP command to unmap buffer in SWd */
ret = mcp_unmap(session->mcp_session.id, &maps[i]);
if (ret) {
mc_dev_err(
"maps[%d] va=%llx unmap failed: %d\n",
i, bufs[i].va, ret);
/* Keep going */
} else {
atomic_dec(&g_ctx.c_maps);
}
}
}
for (i = 0; i < MC_MAP_MAX; i++) {
if (!wsms[i])
continue;
/* Remove wsm from its parent session's list */
list_del(&wsms[i]->list);
/* Free wsm */
wsm_free(session, wsms[i]);
mc_dev_devel("maps[%d] va=%llx unmap'd len=%u sva=%llx\n",
i, bufs[i].va, bufs[i].len, bufs[i].sva);
}
out:
mutex_unlock(&session->wsms_lock);
return ret;
}
/*
* Sleep until next notification from SWd.
*/
int session_waitnotif(struct tee_session *session, s32 timeout,
bool silent_expiry)
{
return mcp_session_waitnotif(&session->mcp_session, timeout,
silent_expiry);
}
int session_debug_structs(struct kasnprintf_buf *buf,
struct tee_session *session, bool is_closing)
{
struct wsm *wsm;
s32 exit_code;
int ret;
exit_code = mcp_session_exitcode(&session->mcp_session);
ret = kasnprintf(buf, "\tsession %p [%d]: %x %s ec %d%s\n", session,
kref_read(&session->kref), session->mcp_session.id,
session->mcp_session.is_gp ? "GP" : "MC", exit_code,
is_closing ? " <closing>" : "");
if (ret < 0)
return ret;
/* WMSs */
mutex_lock(&session->wsms_lock);
if (list_empty(&session->wsms))
goto done;
list_for_each_entry(wsm, &session->wsms, list) {
ret = wsm_debug_structs(buf, wsm);
if (ret < 0)
goto done;
}
done:
mutex_unlock(&session->wsms_lock);
if (ret < 0)
return ret;
return 0;
}

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _SESSION_H_
#define _SESSION_H_
#include <linux/list.h>
#include "mcp.h"
struct tee_object;
struct tee_mmu;
struct mc_ioctl_buffer;
struct tee_session {
/* Session closing lock, so two calls cannot be made simultaneously */
struct mutex close_lock;
/* Asynchronous session close (GP) requires a callback to unblock */
struct completion close_completion;
/* MCP session descriptor (MUST BE FIRST) */
struct mcp_session mcp_session;
/* Owner */
struct tee_client *client;
/* Number of references kept to this object */
struct kref kref;
/* The list entry to attach to session list of owner */
struct list_head list;
/* Session WSMs lock */
struct mutex wsms_lock;
/* List of WSMs for a session */
struct list_head wsms;
};
struct tee_session *session_create(struct tee_client *client, bool is_gp,
struct mc_identity *identity);
int session_open(struct tee_session *session, const struct tee_object *obj,
const struct tee_mmu *obj_mmu, uintptr_t tci, size_t len);
int session_close(struct tee_session *session);
static inline void session_get(struct tee_session *session)
{
kref_get(&session->kref);
}
int session_put(struct tee_session *session);
int session_kill(struct tee_session *session);
int session_wsms_add(struct tee_session *session,
struct mc_ioctl_buffer *bufs);
int session_wsms_remove(struct tee_session *session,
const struct mc_ioctl_buffer *bufs);
s32 session_exitcode(struct tee_session *session);
int session_notify_swd(struct tee_session *session);
int session_waitnotif(struct tee_session *session, s32 timeout,
bool silent_expiry);
int session_debug_structs(struct kasnprintf_buf *buf,
struct tee_session *session, bool is_closing);
#endif /* _SESSION_H_ */

View file

@ -0,0 +1,304 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/uaccess.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/fs.h>
#include "public/mc_user.h"
#include "main.h"
#include "user.h"
#include "client.h"
#include "mcp.h" /* mcp_get_version */
/*
* Get client object from file pointer
*/
static inline struct tee_client *get_client(struct file *file)
{
return (struct tee_client *)file->private_data;
}
/*
* Callback for system open()
* A set of internal client data are created and initialized.
*
* @inode
* @file
* Returns 0 if OK or -ENOMEM if no allocation was possible.
*/
static int user_open(struct inode *inode, struct file *file)
{
struct tee_client *client;
/* Create client */
mc_dev_devel("from %s (%d)\n", current->comm, current->pid);
client = client_create(false);
if (!client)
return -ENOMEM;
/* Store client in user file */
file->private_data = client;
return 0;
}
/*
* Callback for system close()
* The client object is freed.
* @inode
* @file
* Returns 0
*/
static int user_release(struct inode *inode, struct file *file)
{
struct tee_client *client = get_client(file);
/* Close client */
mc_dev_devel("from %s (%d)\n", current->comm, current->pid);
if (WARN(!client, "No client data available"))
return -EPROTO;
/* Detach client from user file */
file->private_data = NULL;
/* Destroy client, including remaining sessions */
client_close(client);
return 0;
}
/*
* Check r/w access to referenced memory
*/
static inline int ioctl_check_pointer(unsigned int cmd, int __user *uarg)
{
int err = 0;
if (_IOC_DIR(cmd) & _IOC_READ)
err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
if (err)
return -EFAULT;
return 0;
}
/*
* Callback for system ioctl()
* Implement most of ClientLib API functions
* @file pointer to file
* @cmd command
* @arg arguments
*
* Returns 0 for OK and an errno in case of error
*/
static long user_ioctl(struct file *file, unsigned int id, unsigned long arg)
{
struct tee_client *client = get_client(file);
int __user *uarg = (int __user *)arg;
int ret = -EINVAL;
mc_dev_devel("%u from %s\n", _IOC_NR(id), current->comm);
if (WARN(!client, "No client data available"))
return -EPROTO;
if (ioctl_check_pointer(id, uarg))
return -EFAULT;
switch (id) {
case MC_IO_HAS_SESSIONS:
/* Freeze the client */
if (client_has_sessions(client))
ret = -ENOTEMPTY;
else
ret = 0;
break;
case MC_IO_OPEN_SESSION: {
struct mc_ioctl_open_session session;
if (copy_from_user(&session, uarg, sizeof(session))) {
ret = -EFAULT;
break;
}
ret = client_open_session(client, &session.sid, &session.uuid,
session.tci, session.tcilen,
session.is_gp_uuid,
&session.identity);
if (ret)
break;
if (copy_to_user(uarg, &session, sizeof(session))) {
ret = -EFAULT;
client_remove_session(client, session.sid);
break;
}
break;
}
case MC_IO_OPEN_TRUSTLET: {
struct mc_ioctl_open_trustlet trustlet;
if (copy_from_user(&trustlet, uarg, sizeof(trustlet))) {
ret = -EFAULT;
break;
}
/* Call internal api */
ret = client_open_trustlet(client, &trustlet.sid, trustlet.spid,
trustlet.buffer, trustlet.tlen,
trustlet.tci, trustlet.tcilen);
if (ret)
break;
if (copy_to_user(uarg, &trustlet, sizeof(trustlet))) {
ret = -EFAULT;
client_remove_session(client, trustlet.sid);
break;
}
break;
}
case MC_IO_CLOSE_SESSION: {
u32 sid = (u32)arg;
ret = client_remove_session(client, sid);
break;
}
case MC_IO_NOTIFY: {
u32 sid = (u32)arg;
ret = client_notify_session(client, sid);
break;
}
case MC_IO_WAIT: {
struct mc_ioctl_wait wait;
if (copy_from_user(&wait, uarg, sizeof(wait))) {
ret = -EFAULT;
break;
}
ret = client_waitnotif_session(client, wait.sid, wait.timeout,
wait.partial);
break;
}
case MC_IO_MAP: {
struct mc_ioctl_map map;
if (copy_from_user(&map, uarg, sizeof(map))) {
ret = -EFAULT;
break;
}
ret = client_map_session_wsms(client, map.sid, map.bufs);
if (ret)
break;
/* Fill in return struct */
if (copy_to_user(uarg, &map, sizeof(map))) {
ret = -EFAULT;
client_unmap_session_wsms(client, map.sid, map.bufs);
break;
}
break;
}
case MC_IO_UNMAP: {
struct mc_ioctl_map map;
if (copy_from_user(&map, uarg, sizeof(map))) {
ret = -EFAULT;
break;
}
ret = client_unmap_session_wsms(client, map.sid, map.bufs);
break;
}
case MC_IO_ERR: {
struct mc_ioctl_geterr *uerr = (struct mc_ioctl_geterr *)uarg;
u32 sid;
s32 exit_code;
if (get_user(sid, &uerr->sid)) {
ret = -EFAULT;
break;
}
ret = client_get_session_exitcode(client, sid, &exit_code);
if (ret)
break;
/* Fill in return struct */
if (put_user(exit_code, &uerr->value)) {
ret = -EFAULT;
break;
}
break;
}
case MC_IO_VERSION: {
struct mc_version_info version_info;
ret = mcp_get_version(&version_info);
if (ret)
break;
if (copy_to_user(uarg, &version_info, sizeof(version_info)))
ret = -EFAULT;
break;
}
default:
mc_dev_err("unsupported cmd=0x%x\n", id);
ret = -ENOIOCTLCMD;
}
return ret;
}
/*
* Callback for system mmap()
*/
static int user_mmap(struct file *file, struct vm_area_struct *vmarea)
{
struct tee_client *client = get_client(file);
if ((vmarea->vm_end - vmarea->vm_start) > BUFFER_LENGTH_MAX)
return -EINVAL;
/* Alloc contiguous buffer for this client */
return client_cbuf_create(client,
(u32)(vmarea->vm_end - vmarea->vm_start),
NULL, vmarea);
}
static const struct file_operations mc_user_fops = {
.owner = THIS_MODULE,
.open = user_open,
.release = user_release,
.unlocked_ioctl = user_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = user_ioctl,
#endif
.mmap = user_mmap,
};
int mc_user_init(struct cdev *cdev)
{
cdev_init(cdev, &mc_user_fops);
return 0;
}

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _USER_H_
#define _USER_H_
struct cdev;
int mc_user_init(struct cdev *cdev);
static inline void mc_user_exit(void)
{
}
#endif /* _USER_H_ */

View file

@ -0,0 +1,23 @@
#
# Makefile for the <t-base trusted UI driver
#
GUD_ROOT_FOLDER := drivers/gud/
# add our modules to kernel.
obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui.o
TlcTui-y := main.o tlcTui.o trustedui.o tui-hal.o
# Release mode by default
ccflags-y += -DNDEBUG
ccflags-y += -Wno-declaration-after-statement
ccflags-$(CONFIG_TRUSTONIC_TEE_DEBUG) += -DDEBUG
# MobiCore Driver includes
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public
# MobiCore TlcTui required includes
ccflags-y += -I$(GUD_ROOT_FOLDER)/TlcTui/inc \
-I$(GUD_ROOT_FOLDER)/TlcTui/public

View file

@ -0,0 +1,17 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MOBICORE_COMPONENT_BUILD_TAG
#define MOBICORE_COMPONENT_BUILD_TAG \
"t-base-EXYNOS64-Android-310B-V005-20151218_124504_60"
#endif

View file

@ -0,0 +1,106 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DCITUI_H__
#define __DCITUI_H__
/**< Responses have bit 31 set */
#define RSP_ID_MASK (1U << 31)
#define RSP_ID(cmd_id) (((uint32_t)(cmd_id)) | RSP_ID_MASK)
#define IS_CMD(cmd_id) ((((uint32_t)(cmd_id)) & RSP_ID_MASK) == 0)
#define IS_RSP(cmd_id) ((((uint32_t)(cmd_id)) & RSP_ID_MASK) == RSP_ID_MASK)
#define CMD_ID_FROM_RSP(rsp_id) (rsp_id & (~RSP_ID_MASK))
/**
* Return codes of driver commands.
*/
#define TUI_DCI_OK 0x00030000
#define TUI_DCI_ERR_UNKNOWN_CMD 0x00030001
#define TUI_DCI_ERR_NOT_SUPPORTED 0x00030002
#define TUI_DCI_ERR_INTERNAL_ERROR 0x00030003
#define TUI_DCI_ERR_NO_RESPONSE 0x00030004
#define TUI_DCI_ERR_BAD_PARAMETERS 0x00030005
#define TUI_DCI_ERR_NO_EVENT 0x00030006
#define TUI_DCI_ERR_OUT_OF_DISPLAY 0x00030007
/* ... add more error codes when needed */
/**
* Notification ID's for communication Trustlet Connector -> Driver.
*/
#define NOT_TUI_NONE 0
/* NWd system event that closes the current TUI session*/
#define NOT_TUI_CANCEL_EVENT 1
/**
* Command ID's for communication Driver -> Trustlet Connector.
*/
#define CMD_TUI_SW_NONE 0
/* SWd request to NWd to start the TUI session */
#define CMD_TUI_SW_OPEN_SESSION 1
/* SWd request to NWd to close the TUI session */
#define CMD_TUI_SW_CLOSE_SESSION 2
/* SWd request to NWd stop accessing display controller */
#define CMD_TUI_SW_STOP_DISPLAY 3
/**
* Maximum data length.
*/
#define MAX_DCI_DATA_LEN (1024*100)
/* Command payload */
struct tui_alloc_data_t {
uint32_t alloc_size;
uint32_t num_of_buff;
};
union dci_cmd_payload_t {
struct tui_alloc_data_t alloc_data;
};
/* Command */
struct dci_command_t {
uint32_t id;
union dci_cmd_payload_t payload;
};
/* TUI frame buffer (output from NWd) */
struct tui_alloc_buffer_t {
uint64_t pa;
};
#define MAX_DCI_BUFFER_NUMBER 4
/* Response */
struct dci_response_t {
uint32_t id; /* must be command ID | RSP_ID_MASK */
uint32_t return_code;
struct tui_alloc_buffer_t alloc_buffer[MAX_DCI_BUFFER_NUMBER];
};
/* DCI buffer */
struct tui_dci_msg_t {
uint32_t nwd_notif; /* Notification from TlcTui to DrTui */
struct dci_command_t cmd_nwd; /* Command from DrTui to TlcTui */
struct dci_response_t nwd_rsp; /* Response from TlcTui to DrTui */
};
/**
* Driver UUID. Update accordingly after reserving UUID
*/
#define DR_TUI_UUID { { 7, 0xC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
#endif /* __DCITUI_H__ */

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __TBASE_TUI_H__
#define __TBASE_TUI_H__
#define TRUSTEDUI_MODE_OFF 0x00
#define TRUSTEDUI_MODE_ALL 0xff
#define TRUSTEDUI_MODE_TUI_SESSION 0x01
#define TRUSTEDUI_MODE_VIDEO_SECURED 0x02
#define TRUSTEDUI_MODE_INPUT_SECURED 0x04
#ifdef CONFIG_TRUSTONIC_TRUSTED_UI
int trustedui_blank_inc(void);
int trustedui_blank_dec(void);
int trustedui_blank_get_counter(void);
void trustedui_blank_set_counter(int counter);
int trustedui_get_current_mode(void);
void trustedui_set_mode(int mode);
int trustedui_set_mask(int mask);
int trustedui_clear_mask(int mask);
#endif /* CONFIG_TRUSTONIC_TRUSTED_UI */
#endif /* __TBASE_TUI_H__ */

View file

@ -0,0 +1,190 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "tui_ioctl.h"
#include "tlcTui.h"
#include "mobicore_driver_api.h"
#include "dciTui.h"
#include "tui-hal.h"
#include "build_tag.h"
/*static int tui_dev_major_number = 122; */
/*module_param(tui_dev_major_number, int, 0000); */
/*MODULE_PARM_DESC(major, */
/* "The device major number used to register a unique char device driver"); */
/* Static variables */
static struct cdev tui_cdev;
static long tui_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
int ret = -ENOTTY;
int __user *uarg = (int __user *)arg;
if (_IOC_TYPE(cmd) != TUI_IO_MAGIC)
return -EINVAL;
pr_info("t-base-tui module: ioctl 0x%x ", cmd);
switch (cmd) {
case TUI_IO_NOTIFY:
pr_info("TUI_IO_NOTIFY\n");
if (tlc_notify_event(arg))
ret = 0;
else
ret = -EFAULT;
break;
case TUI_IO_WAITCMD: {
uint32_t cmd_id;
pr_info("TUI_IO_WAITCMD\n");
ret = tlc_wait_cmd(&cmd_id);
if (ret) {
pr_debug("ERROR %s:%d tlc_wait_cmd returned (0x%08X)\n",
__func__, __LINE__, ret);
return ret;
}
/* Write command id to user */
pr_debug("IOCTL: sending command %d to user.\n", cmd_id);
if (copy_to_user(uarg, &cmd_id, sizeof(cmd_id)))
ret = -EFAULT;
else
ret = 0;
break;
}
case TUI_IO_ACK: {
struct tlc_tui_response_t rsp_id;
pr_info("TUI_IO_ACK\n");
/* Read user response */
if (copy_from_user(&rsp_id, uarg, sizeof(rsp_id)))
ret = -EFAULT;
else
ret = 0;
pr_debug("IOCTL: User completed command %d.\n", rsp_id.id);
ret = tlc_ack_cmd(&rsp_id);
if (ret)
return ret;
break;
}
default:
pr_info("undefined!\n");
return -ENOTTY;
}
return ret;
}
atomic_t fileopened;
static int tui_open(struct inode *inode, struct file *file)
{
pr_info("TUI file opened\n");
atomic_inc(&fileopened);
return 0;
}
static int tui_release(struct inode *inode, struct file *file)
{
pr_info("TUI file closed\n");
atomic_dec(&fileopened);
return 0;
}
static const struct file_operations tui_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tui_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tui_ioctl,
#endif
.open = tui_open,
.release = tui_release
};
/*--------------------------------------------------------------------------- */
static int __init tlc_tui_init(void)
{
pr_info("Loading t-base-tui module.\n");
pr_debug("\n=============== Running TUI Kernel TLC ===============\n");
pr_info("%s\n", MOBICORE_COMPONENT_BUILD_TAG);
dev_t devno;
int err;
static struct class *tui_class;
atomic_set(&fileopened, 0);
err = alloc_chrdev_region(&devno, 0, 1, TUI_DEV_NAME);
if (err) {
pr_debug(KERN_ERR "Unable to allocate Trusted UI device number\n");
return err;
}
cdev_init(&tui_cdev, &tui_fops);
tui_cdev.owner = THIS_MODULE;
/* tui_cdev.ops = &tui_fops; */
err = cdev_add(&tui_cdev, devno, 1);
if (err) {
pr_debug(KERN_ERR "Unable to add Trusted UI char device\n");
unregister_chrdev_region(devno, 1);
return err;
}
tui_class = class_create(THIS_MODULE, "tui_cls");
device_create(tui_class, NULL, devno, NULL, TUI_DEV_NAME);
if (!hal_tui_init())
return -EPERM;
return 0;
}
static void __exit tlc_tui_exit(void)
{
pr_info("Unloading t-base-tui module.\n");
unregister_chrdev_region(tui_cdev.dev, 1);
cdev_del(&tui_cdev);
hal_tui_exit();
}
module_init(tlc_tui_init);
module_exit(tlc_tui_exit);
MODULE_AUTHOR("Trustonic Limited");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("<t-base TUI");

View file

@ -0,0 +1,295 @@
/*
* Copyright (c) 2014-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/version.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/fb.h>
#include <video/s3c-fb.h>
#include <t-base-tui.h>
#include "tui_ioctl.h"
#include "dciTui.h"
#include "tlcTui.h"
#include "tui-hal.h"
#define TUI_MEMPOOL_SIZE 0
struct tui_mempool {
void *va;
unsigned long pa;
size_t size;
};
static struct tui_mempool g_tui_mem_pool;
static bool allocate_tui_memory_pool(struct tui_mempool *pool, size_t size)
{
bool ret = false;
void *tui_mem_pool = NULL;
pr_info("%s %s:%d\n", __func__, __FILE__, __LINE__);
if (!size) {
pr_debug("TUI frame buffer: nothing to allocate.");
return true;
}
tui_mem_pool = kmalloc(size, GFP_KERNEL);
if (!tui_mem_pool) {
pr_debug("ERROR Could not allocate TUI memory pool");
} else if (ksize(tui_mem_pool) < size) {
pr_err("TUI mem pool size too small: req'd=%d alloc'd=%d", size,
ksize(tui_mem_pool));
kfree(tui_mem_pool);
} else {
pool->va = tui_mem_pool;
pool->pa = virt_to_phys(tui_mem_pool);
pool->size = ksize(tui_mem_pool);
ret = true;
}
return ret;
}
static void free_tui_memory_pool(struct tui_mempool *pool)
{
kfree(pool->va);
memset(pool, 0, sizeof(*pool));
}
static int is_device_ok(struct device *fbdev, void *p)
{
return 1;
}
static struct device *get_fb_dev(void)
{
struct device *fbdev = NULL;
/* get the first framebuffer device */
/* [TODO] Handle properly when there are more than one framebuffer */
fbdev = class_find_device(fb_class, NULL, NULL, is_device_ok);
if (NULL == fbdev) {
pr_debug("ERROR cannot get framebuffer device\n");
return NULL;
}
return fbdev;
}
static struct fb_info *get_fb_info(struct device *fbdev)
{
struct fb_info *fb_info;
if (!fbdev->p) {
pr_debug("ERROR framebuffer device has no private data\n");
return NULL;
}
fb_info = (struct fb_info *)dev_get_drvdata(fbdev);
if (!fb_info) {
pr_debug("ERROR framebuffer device has no fb_info\n");
return NULL;
}
return fb_info;
}
static void blank_framebuffer(int getref)
{
struct device *fbdev = NULL;
struct fb_info *fb_info;
struct s3c_fb_win *win;
struct s3c_fb *sfb;
fbdev = get_fb_dev();
if (!fbdev)
return;
fb_info = get_fb_info(fbdev);
if (!fb_info)
return;
/*
* hold a reference to the dsim device, to prevent it from going into
* power management during tui session
*/
win = fb_info->par;
sfb = win->parent;
if (getref)
pm_runtime_get_sync(sfb->dev);
/* blank the framebuffer */
lock_fb_info(fb_info);
console_lock();
fb_info->flags |= FBINFO_MISC_USEREVENT;
pr_info("%s call fb_blank\n", __func__);
fb_blank(fb_info, FB_BLANK_POWERDOWN);
fb_info->flags &= ~FBINFO_MISC_USEREVENT;
console_unlock();
unlock_fb_info(fb_info);
pr_info("%s call s3c_fb_deactivate_vsync\n", __func__);
s3c_fb_deactivate_vsync(sfb);
}
static void unblank_framebuffer(int releaseref)
{
struct device *fbdev = NULL;
struct fb_info *fb_info;
struct s3c_fb_win *win;
struct s3c_fb *sfb;
fbdev = get_fb_dev();
if (!fbdev)
return;
fb_info = get_fb_info(fbdev);
if (!fb_info)
return;
/*
* Release the reference we took at the beginning of the TUI session
*/
win = fb_info->par;
sfb = win->parent;
pr_info("%s call s3c_fb_activate_vsync\n", __func__);
s3c_fb_activate_vsync(sfb);
/*
* Unblank the framebuffer
*/
console_lock();
fb_info->flags |= FBINFO_MISC_USEREVENT;
fb_blank(fb_info, FB_BLANK_UNBLANK);
fb_info->flags &= ~FBINFO_MISC_USEREVENT;
console_unlock();
if (releaseref)
pm_runtime_put_sync(sfb->dev);
}
uint32_t hal_tui_init(void)
{
/* Allocate memory pool for the framebuffer
*/
if (!allocate_tui_memory_pool(&g_tui_mem_pool, TUI_MEMPOOL_SIZE))
return TUI_DCI_ERR_INTERNAL_ERROR;
return TUI_DCI_OK;
}
void hal_tui_exit(void)
{
/* delete memory pool if any */
if (g_tui_mem_pool.va)
free_tui_memory_pool(&g_tui_mem_pool);
}
uint32_t hal_tui_alloc(
struct tui_alloc_buffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
size_t allocsize, uint32_t number)
{
uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
if (!allocbuffer) {
pr_debug("%s(%d): allocbuffer is null\n", __func__, __LINE__);
return TUI_DCI_ERR_INTERNAL_ERROR;
}
pr_debug("%s(%d): Requested size=0x%x x %u chunks\n", __func__,
__LINE__, allocsize, number);
if ((size_t)allocsize == 0) {
pr_debug("%s(%d): Nothing to allocate\n", __func__, __LINE__);
return TUI_DCI_OK;
}
if (number != 3) {
pr_debug("%s(%d): Unexpected number of buffers requested\n",
__func__, __LINE__);
return TUI_DCI_ERR_INTERNAL_ERROR;
}
if ((size_t)(allocsize*number) <= g_tui_mem_pool.size) {
/* requested buffer fits in the memory pool */
unsigned int i;
for (i = 0; i < number; i++) {
pr_info("%s(%d): allocbuffer + %d = 0x%p\n", __func__,
__LINE__, i, allocbuffer+i);
allocbuffer[i].pa =
(uint64_t) (g_tui_mem_pool.pa + i * allocsize);
pr_info("%s(%d): allocated at %llx\n", __func__,
__LINE__, allocbuffer[i].pa);
}
ret = TUI_DCI_OK;
} else {
/* requested buffer is bigger than the memory pool, return an
error */
pr_debug("%s(%d): Memory pool too small\n", __func__, __LINE__);
ret = TUI_DCI_ERR_INTERNAL_ERROR;
}
return ret;
}
void hal_tui_free(void)
{
}
uint32_t hal_tui_deactivate(void)
{
/* Set linux TUI flag */
trustedui_set_mask(TRUSTEDUI_MODE_TUI_SESSION);
trustedui_blank_set_counter(0);
#ifdef CONFIG_TRUSTONIC_TRUSTED_UI_FB_BLANK
blank_framebuffer(1);
/* TODO-[2014-03-19]-julare01: disabled for Arndale board but this
* should be re enabled and put into a HAL */
/* disable_irq(gpio_to_irq(190)); */
#endif
trustedui_set_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
TRUSTEDUI_MODE_INPUT_SECURED);
return TUI_DCI_OK;
}
uint32_t hal_tui_activate(void)
{
/* Protect NWd */
trustedui_clear_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
TRUSTEDUI_MODE_INPUT_SECURED);
#ifdef CONFIG_TRUSTONIC_TRUSTED_UI_FB_BLANK
pr_info("Unblanking\n");
/* TODO-[2014-03-19]-julare01: disabled for Arndale board but this
* should be re enabled and put into a HAL */
/* enable_irq(gpio_to_irq(190));*/
unblank_framebuffer(1);
#endif
/* Clear linux TUI flag */
trustedui_set_mode(TRUSTEDUI_MODE_OFF);
#ifdef CONFIG_TRUSTONIC_TRUSTED_UI_FB_BLANK
pr_info("Unsetting TUI flag (blank counter=%d)",
trustedui_blank_get_counter());
if (0 < trustedui_blank_get_counter())
blank_framebuffer(0);
#endif
return TUI_DCI_OK;
}

View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef TUI_IOCTL_H_
#define TUI_IOCTL_H_
/* Response header */
struct tlc_tui_response_t {
uint32_t id;
uint32_t return_code;
};
/* Command IDs */
#define TLC_TUI_CMD_NONE 0
#define TLC_TUI_CMD_START_ACTIVITY 1
#define TLC_TUI_CMD_STOP_ACTIVITY 2
/* Return codes */
#define TLC_TUI_OK 0
#define TLC_TUI_ERROR 1
#define TLC_TUI_ERR_UNKNOWN_CMD 2
/*
* defines for the ioctl TUI driver module function call from user space.
*/
#define TUI_DEV_NAME "t-base-tui"
#define TUI_IO_MAGIC 't'
#define TUI_IO_NOTIFY _IOW(TUI_IO_MAGIC, 1, uint32_t)
#define TUI_IO_WAITCMD _IOR(TUI_IO_MAGIC, 2, uint32_t)
#define TUI_IO_ACK _IOW(TUI_IO_MAGIC, 3, struct tlc_tui_response_t)
#ifdef INIT_COMPLETION
#define reinit_completion(x) INIT_COMPLETION(*(x))
#endif
#endif /* TUI_IOCTL_H_ */

View file

@ -0,0 +1,405 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include "mobicore_driver_api.h"
#include "tui_ioctl.h"
#include "tlcTui.h"
#include "dciTui.h"
#include "tui-hal.h"
/* ------------------------------------------------------------- */
/* Globals */
struct tui_dci_msg_t *dci;
DECLARE_COMPLETION(dci_comp);
DECLARE_COMPLETION(io_comp);
/* ------------------------------------------------------------- */
/* Static */
static const uint32_t DEVICE_ID = MC_DEVICE_ID_DEFAULT;
static struct task_struct *thread_id;
static uint32_t g_cmd_id = TLC_TUI_CMD_NONE;
static struct mc_session_handle dr_session_handle = {0, 0};
static struct tlc_tui_response_t g_user_rsp = {
TLC_TUI_CMD_NONE, TLC_TUI_ERR_UNKNOWN_CMD};
/* Functions */
/* ------------------------------------------------------------- */
static bool tlc_open_driver(void)
{
bool ret = false;
enum mc_result mc_ret;
struct mc_uuid_t dr_uuid = DR_TUI_UUID;
/* Allocate WSM buffer for the DCI */
mc_ret = mc_malloc_wsm(DEVICE_ID, 0, sizeof(struct tui_dci_msg_t),
(uint8_t **)&dci, 0);
if (MC_DRV_OK != mc_ret) {
pr_debug("ERROR %s: Allocation of DCI WSM failed: %d\n",
__func__, mc_ret);
return false;
}
/* Clear the session handle */
memset(&dr_session_handle, 0, sizeof(dr_session_handle));
/* The device ID (default device is used */
dr_session_handle.device_id = DEVICE_ID;
/* Open session with the Driver */
mc_ret = mc_open_session(&dr_session_handle, &dr_uuid, (uint8_t *)dci,
(uint32_t)sizeof(struct tui_dci_msg_t));
if (MC_DRV_OK != mc_ret) {
pr_debug("ERROR %s: Open driver session failed: %d\n",
__func__, mc_ret);
ret = false;
} else {
ret = true;
}
return ret;
}
/* ------------------------------------------------------------- */
static bool tlc_open(void)
{
bool ret = false;
enum mc_result mc_ret;
/* Open the tbase device */
pr_debug("%s: Opening tbase device\n", __func__);
mc_ret = mc_open_device(DEVICE_ID);
/* In case the device is already open, mc_open_device will return an
* error (MC_DRV_ERR_INVALID_OPERATION). But in this case, we can
* continue, even though mc_open_device returned an error. Stop in all
* other case of error
*/
if (MC_DRV_OK != mc_ret && MC_DRV_ERR_INVALID_OPERATION != mc_ret) {
pr_debug("ERROR %s: Error %d opening device\n", __func__,
mc_ret);
return false;
}
pr_debug("%s: Opening driver session\n", __func__);
ret = tlc_open_driver();
return ret;
}
/* ------------------------------------------------------------- */
static void tlc_wait_cmd_from_driver(void)
{
uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
/* Wait for a command from secure driver */
ret = mc_wait_notification(&dr_session_handle, -1);
if (MC_DRV_OK == ret)
pr_debug("tlc_wait_cmd_from_driver: Got a command\n");
else
pr_debug("ERROR %s: mc_wait_notification() failed: %d\n",
__func__, ret);
}
static uint32_t send_cmd_to_user(uint32_t command_id)
{
uint32_t ret = TUI_DCI_ERR_NO_RESPONSE;
/* Init shared variables */
g_cmd_id = command_id;
g_user_rsp.id = TLC_TUI_CMD_NONE;
g_user_rsp.return_code = TLC_TUI_ERR_UNKNOWN_CMD;
/* Check that the client (TuiService) is still present before to return
* the command. */
if (atomic_read(&fileopened)) {
/* Unlock the ioctl thread (IOCTL_WAIT) in order to let the
* client know that there is a command to process. */
pr_info("%s: give way to ioctl thread\n", __func__);
complete(&dci_comp);
pr_info("TUI TLC is running, waiting for the userland response\n");
/* Wait for the client acknowledge (IOCTL_ACK). */
unsigned long completed = wait_for_completion_timeout(&io_comp,
msecs_to_jiffies(5000));
if (!completed) {
pr_debug("%s:%d No acknowledge from client, timeout!\n",
__func__, __LINE__);
}
} else {
/* There is no client, do nothing except reporting an error to
* SWd. */
pr_info("TUI TLC seems dead. Not waiting for userland answer\n");
ret = TUI_DCI_ERR_INTERNAL_ERROR;
goto end;
}
pr_debug("send_cmd_to_user: Got an answer from ioctl thread.\n");
reinit_completion(&io_comp);
/* Check id of the cmd processed by ioctl thread (paranoia) */
if (g_user_rsp.id != command_id) {
pr_debug("ERROR %s: Wrong response id 0x%08x iso 0x%08x\n",
__func__, dci->nwd_rsp.id, RSP_ID(command_id));
ret = TUI_DCI_ERR_INTERNAL_ERROR;
} else {
/* retrieve return code */
switch (g_user_rsp.return_code) {
case TLC_TUI_OK:
ret = TUI_DCI_OK;
break;
case TLC_TUI_ERROR:
ret = TUI_DCI_ERR_INTERNAL_ERROR;
break;
case TLC_TUI_ERR_UNKNOWN_CMD:
ret = TUI_DCI_ERR_UNKNOWN_CMD;
break;
}
}
end:
/* In any case, reset the value of the command, to ensure that commands
* sent due to inturrupted wait_for_completion are TLC_TUI_CMD_NONE. */
reset_global_command_id();
return ret;
}
/* ------------------------------------------------------------- */
static void tlc_process_cmd(void)
{
uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
uint32_t command_id = CMD_TUI_SW_NONE;
if (NULL == dci) {
pr_debug("ERROR %s: DCI has not been set up properly - exiting\n",
__func__);
return;
} else {
command_id = dci->cmd_nwd.id;
}
/* Warn if previous response was not acknowledged */
if (CMD_TUI_SW_NONE == command_id) {
pr_debug("ERROR %s: Notified without command\n", __func__);
return;
} else {
if (dci->nwd_rsp.id != CMD_TUI_SW_NONE)
pr_debug("%s: Warning, previous response not ack\n",
__func__);
}
/* Handle command */
switch (command_id) {
case CMD_TUI_SW_OPEN_SESSION:
pr_debug("%s: CMD_TUI_SW_OPEN_SESSION.\n", __func__);
/* Start android TUI activity */
ret = send_cmd_to_user(TLC_TUI_CMD_START_ACTIVITY);
if (TUI_DCI_OK != ret)
break;
/* allocate TUI frame buffer */
ret = hal_tui_alloc(dci->nwd_rsp.alloc_buffer,
dci->cmd_nwd.payload.alloc_data.alloc_size,
dci->cmd_nwd.payload.alloc_data.num_of_buff);
if (TUI_DCI_OK != ret)
break;
/* Deactivate linux UI drivers */
ret = hal_tui_deactivate();
if (TUI_DCI_OK != ret) {
hal_tui_free();
send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY);
break;
}
break;
case CMD_TUI_SW_CLOSE_SESSION:
pr_debug("%s: CMD_TUI_SW_CLOSE_SESSION.\n", __func__);
/* Activate linux UI drivers */
ret = hal_tui_activate();
hal_tui_free();
/* Stop android TUI activity */
/* Ignore return code, because an error means the TLC has been
* killed, which imply that the activity is stopped already.
*/
send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY);
ret = TUI_DCI_OK;
break;
default:
pr_debug("ERROR %s: Unknown command %d\n",
__func__, command_id);
break;
}
/* Fill in response to SWd, fill ID LAST */
pr_debug("%s: return 0x%08x to cmd 0x%08x\n",
__func__, ret, command_id);
dci->nwd_rsp.return_code = ret;
dci->nwd_rsp.id = RSP_ID(command_id);
/* Acknowledge command */
dci->cmd_nwd.id = CMD_TUI_SW_NONE;
/* Notify SWd */
pr_debug("DCI RSP NOTIFY CORE\n");
ret = mc_notify(&dr_session_handle);
if (MC_DRV_OK != ret)
pr_debug("ERROR %s: Notify failed: %d\n", __func__, ret);
}
/* ------------------------------------------------------------- */
static void tlc_close_driver(void)
{
enum mc_result ret;
/* Close session with the Driver */
ret = mc_close_session(&dr_session_handle);
if (MC_DRV_OK != ret) {
pr_debug("ERROR %s: Closing driver session failed: %d\n",
__func__, ret);
}
}
/* ------------------------------------------------------------- */
static void tlc_close(void)
{
enum mc_result ret;
pr_debug("%s: Closing driver session\n", __func__);
tlc_close_driver();
pr_debug("%s: Closing tbase\n", __func__);
/* Close the tbase device */
ret = mc_close_device(DEVICE_ID);
if (MC_DRV_OK != ret) {
pr_debug("ERROR %s: Closing tbase device failed: %d\n",
__func__, ret);
}
}
void reset_global_command_id(void)
{
g_cmd_id = TLC_TUI_CMD_NONE;
}
/* ------------------------------------------------------------- */
bool tlc_notify_event(uint32_t event_type)
{
bool ret = false;
enum mc_result result;
if (NULL == dci) {
pr_debug("ERROR tlc_notify_event: DCI has not been set up properly - exiting\n");
return false;
}
/* Prepare notification message in DCI */
pr_debug("tlc_notify_event: event_type = %d\n", event_type);
dci->nwd_notif = event_type;
/* Signal the Driver */
pr_debug("DCI EVENT NOTIFY CORE\n");
result = mc_notify(&dr_session_handle);
if (MC_DRV_OK != result) {
pr_debug("ERROR tlc_notify_event: mc_notify failed: %d\n",
result);
ret = false;
} else {
ret = true;
}
return ret;
}
/* ------------------------------------------------------------- */
/**
*/
int main_thread(void *uarg)
{
pr_debug("main_thread: TlcTui start!\n");
/* Open session on the driver */
if (!tlc_open()) {
pr_debug("ERROR main_thread: open driver failed!\n");
return 1;
}
/* TlcTui main thread loop */
for (;;) {
/* Wait for a command from the DrTui on DCI*/
tlc_wait_cmd_from_driver();
/* Something has been received, process it. */
tlc_process_cmd();
}
/* Close tlc. Note that this frees the DCI pointer.
* Do not use this pointer after tlc_close().*/
tlc_close();
return 0;
}
int tlc_wait_cmd(uint32_t *cmd_id)
{
/* Create the TlcTui Main thread and start secure driver (only
1st time) */
if (dr_session_handle.session_id == 0) {
thread_id = kthread_run(main_thread, NULL, "dci_thread");
if (!thread_id) {
pr_debug(KERN_ERR "Unable to start Trusted UI main thread\n");
return -EFAULT;
}
}
/* Wait for signal from DCI handler */
/* In case of an interrupted sys call, return with -EINTR */
if (wait_for_completion_interruptible(&dci_comp)) {
pr_debug("interrupted by system\n");
return -ERESTARTSYS;
}
reinit_completion(&dci_comp);
*cmd_id = g_cmd_id;
return 0;
}
int tlc_ack_cmd(struct tlc_tui_response_t *rsp_id)
{
g_user_rsp = *rsp_id;
/* Send signal to DCI */
complete(&io_comp);
return 0;
}
/** @} */

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef TLCTUI_H_
#define TLCTUI_H_
void reset_global_command_id(void);
int tlc_wait_cmd(uint32_t *cmd_id);
int tlc_ack_cmd(struct tlc_tui_response_t *rsp_id);
bool tlc_notify_event(uint32_t event_type);
extern atomic_t fileopened;
#endif /* TLCTUI_H_ */

View file

@ -0,0 +1,130 @@
/*
* Copyright (c) 2013 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/**
* File : trustedui.c
* Created : 26-02-2010
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/t-base-tui.h>
static int trustedui_mode = TRUSTEDUI_MODE_OFF;
static int trustedui_blank_counter;
static DEFINE_SPINLOCK(trustedui_lock);
int trustedui_blank_inc(void)
{
unsigned long flags;
int newvalue;
spin_lock_irqsave(&trustedui_lock, flags);
newvalue = ++trustedui_blank_counter;
spin_unlock_irqrestore(&trustedui_lock, flags);
return newvalue;
}
EXPORT_SYMBOL(trustedui_blank_inc);
int trustedui_blank_dec(void)
{
unsigned long flags;
int newvalue;
spin_lock_irqsave(&trustedui_lock, flags);
newvalue = --trustedui_blank_counter;
spin_unlock_irqrestore(&trustedui_lock, flags);
return newvalue;
}
EXPORT_SYMBOL(trustedui_blank_dec);
int trustedui_blank_get_counter(void)
{
unsigned long flags;
int newvalue;
spin_lock_irqsave(&trustedui_lock, flags);
newvalue = trustedui_blank_counter;
spin_unlock_irqrestore(&trustedui_lock, flags);
return newvalue;
}
EXPORT_SYMBOL(trustedui_blank_get_counter);
void trustedui_blank_set_counter(int counter)
{
unsigned long flags;
spin_lock_irqsave(&trustedui_lock, flags);
trustedui_blank_counter = counter;
spin_unlock_irqrestore(&trustedui_lock, flags);
}
EXPORT_SYMBOL(trustedui_blank_set_counter);
int trustedui_get_current_mode(void)
{
unsigned long flags;
int mode;
spin_lock_irqsave(&trustedui_lock, flags);
mode = trustedui_mode;
spin_unlock_irqrestore(&trustedui_lock, flags);
return mode;
}
EXPORT_SYMBOL(trustedui_get_current_mode);
void trustedui_set_mode(int mode)
{
unsigned long flags;
spin_lock_irqsave(&trustedui_lock, flags);
trustedui_mode = mode;
spin_unlock_irqrestore(&trustedui_lock, flags);
}
EXPORT_SYMBOL(trustedui_set_mode);
int trustedui_set_mask(int mask)
{
unsigned long flags;
int mode;
spin_lock_irqsave(&trustedui_lock, flags);
mode = trustedui_mode |= mask;
spin_unlock_irqrestore(&trustedui_lock, flags);
return mode;
}
EXPORT_SYMBOL(trustedui_set_mask);
int trustedui_clear_mask(int mask)
{
unsigned long flags;
int mode;
spin_lock_irqsave(&trustedui_lock, flags);
mode = trustedui_mode &= ~mask;
spin_unlock_irqrestore(&trustedui_lock, flags);
return mode;
}
EXPORT_SYMBOL(trustedui_clear_mask);
MODULE_AUTHOR("Trustonic Limited");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("<t-base TUI");

View file

@ -0,0 +1,295 @@
/*
* Copyright (c) 2014-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/version.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/fb.h>
#include <video/s3c-fb.h>
#include <t-base-tui.h>
#include "tui_ioctl.h"
#include "dciTui.h"
#include "tlcTui.h"
#include "tui-hal.h"
#define TUI_MEMPOOL_SIZE 0
struct tui_mempool {
void *va;
unsigned long pa;
size_t size;
};
static struct tui_mempool g_tui_mem_pool;
static bool allocate_tui_memory_pool(struct tui_mempool *pool, size_t size)
{
bool ret = false;
void *tui_mem_pool = NULL;
pr_info("%s %s:%d\n", __func__, __FILE__, __LINE__);
if (!size) {
pr_debug("TUI frame buffer: nothing to allocate.");
return true;
}
tui_mem_pool = kmalloc(size, GFP_KERNEL);
if (!tui_mem_pool) {
pr_debug("ERROR Could not allocate TUI memory pool");
} else if (ksize(tui_mem_pool) < size) {
pr_err("TUI mem pool size too small: req'd=%d alloc'd=%d", size,
ksize(tui_mem_pool));
kfree(tui_mem_pool);
} else {
pool->va = tui_mem_pool;
pool->pa = virt_to_phys(tui_mem_pool);
pool->size = ksize(tui_mem_pool);
ret = true;
}
return ret;
}
static void free_tui_memory_pool(struct tui_mempool *pool)
{
kfree(pool->va);
memset(pool, 0, sizeof(*pool));
}
static int is_device_ok(struct device *fbdev, void *p)
{
return 1;
}
static struct device *get_fb_dev(void)
{
struct device *fbdev = NULL;
/* get the first framebuffer device */
/* [TODO] Handle properly when there are more than one framebuffer */
fbdev = class_find_device(fb_class, NULL, NULL, is_device_ok);
if (NULL == fbdev) {
pr_debug("ERROR cannot get framebuffer device\n");
return NULL;
}
return fbdev;
}
static struct fb_info *get_fb_info(struct device *fbdev)
{
struct fb_info *fb_info;
if (!fbdev->p) {
pr_debug("ERROR framebuffer device has no private data\n");
return NULL;
}
fb_info = (struct fb_info *)dev_get_drvdata(fbdev);
if (!fb_info) {
pr_debug("ERROR framebuffer device has no fb_info\n");
return NULL;
}
return fb_info;
}
static void blank_framebuffer(int getref)
{
struct device *fbdev = NULL;
struct fb_info *fb_info;
struct s3c_fb_win *win;
struct s3c_fb *sfb;
fbdev = get_fb_dev();
if (!fbdev)
return;
fb_info = get_fb_info(fbdev);
if (!fb_info)
return;
/*
* hold a reference to the dsim device, to prevent it from going into
* power management during tui session
*/
win = fb_info->par;
sfb = win->parent;
if (getref)
pm_runtime_get_sync(sfb->dev);
/* blank the framebuffer */
lock_fb_info(fb_info);
console_lock();
fb_info->flags |= FBINFO_MISC_USEREVENT;
pr_info("%s call fb_blank\n", __func__);
fb_blank(fb_info, FB_BLANK_POWERDOWN);
fb_info->flags &= ~FBINFO_MISC_USEREVENT;
console_unlock();
unlock_fb_info(fb_info);
pr_info("%s call s3c_fb_deactivate_vsync\n", __func__);
s3c_fb_deactivate_vsync(sfb);
}
static void unblank_framebuffer(int releaseref)
{
struct device *fbdev = NULL;
struct fb_info *fb_info;
struct s3c_fb_win *win;
struct s3c_fb *sfb;
fbdev = get_fb_dev();
if (!fbdev)
return;
fb_info = get_fb_info(fbdev);
if (!fb_info)
return;
/*
* Release the reference we took at the beginning of the TUI session
*/
win = fb_info->par;
sfb = win->parent;
pr_info("%s call s3c_fb_activate_vsync\n", __func__);
s3c_fb_activate_vsync(sfb);
/*
* Unblank the framebuffer
*/
console_lock();
fb_info->flags |= FBINFO_MISC_USEREVENT;
fb_blank(fb_info, FB_BLANK_UNBLANK);
fb_info->flags &= ~FBINFO_MISC_USEREVENT;
console_unlock();
if (releaseref)
pm_runtime_put_sync(sfb->dev);
}
uint32_t hal_tui_init(void)
{
/* Allocate memory pool for the framebuffer
*/
if (!allocate_tui_memory_pool(&g_tui_mem_pool, TUI_MEMPOOL_SIZE))
return TUI_DCI_ERR_INTERNAL_ERROR;
return TUI_DCI_OK;
}
void hal_tui_exit(void)
{
/* delete memory pool if any */
if (g_tui_mem_pool.va)
free_tui_memory_pool(&g_tui_mem_pool);
}
uint32_t hal_tui_alloc(
struct tui_alloc_buffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
size_t allocsize, uint32_t number)
{
uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
if (!allocbuffer) {
pr_debug("%s(%d): allocbuffer is null\n", __func__, __LINE__);
return TUI_DCI_ERR_INTERNAL_ERROR;
}
pr_debug("%s(%d): Requested size=0x%x x %u chunks\n", __func__,
__LINE__, allocsize, number);
if ((size_t)allocsize == 0) {
pr_debug("%s(%d): Nothing to allocate\n", __func__, __LINE__);
return TUI_DCI_OK;
}
if (number != 3) {
pr_debug("%s(%d): Unexpected number of buffers requested\n",
__func__, __LINE__);
return TUI_DCI_ERR_INTERNAL_ERROR;
}
if ((size_t)(allocsize*number) <= g_tui_mem_pool.size) {
/* requested buffer fits in the memory pool */
unsigned int i;
for (i = 0; i < number; i++) {
pr_info("%s(%d): allocbuffer + %d = 0x%p\n", __func__,
__LINE__, i, allocbuffer+i);
allocbuffer[i].pa =
(uint64_t) (g_tui_mem_pool.pa + i * allocsize);
pr_info("%s(%d): allocated at %llx\n", __func__,
__LINE__, allocbuffer[i].pa);
}
ret = TUI_DCI_OK;
} else {
/* requested buffer is bigger than the memory pool, return an
error */
pr_debug("%s(%d): Memory pool too small\n", __func__, __LINE__);
ret = TUI_DCI_ERR_INTERNAL_ERROR;
}
return ret;
}
void hal_tui_free(void)
{
}
uint32_t hal_tui_deactivate(void)
{
/* Set linux TUI flag */
trustedui_set_mask(TRUSTEDUI_MODE_TUI_SESSION);
trustedui_blank_set_counter(0);
#ifdef CONFIG_TRUSTONIC_TRUSTED_UI_FB_BLANK
blank_framebuffer(1);
/* TODO-[2014-03-19]-julare01: disabled for Arndale board but this
* should be re enabled and put into a HAL */
/* disable_irq(gpio_to_irq(190)); */
#endif
trustedui_set_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
TRUSTEDUI_MODE_INPUT_SECURED);
return TUI_DCI_OK;
}
uint32_t hal_tui_activate(void)
{
/* Protect NWd */
trustedui_clear_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
TRUSTEDUI_MODE_INPUT_SECURED);
#ifdef CONFIG_TRUSTONIC_TRUSTED_UI_FB_BLANK
pr_info("Unblanking\n");
/* TODO-[2014-03-19]-julare01: disabled for Arndale board but this
* should be re enabled and put into a HAL */
/* enable_irq(gpio_to_irq(190));*/
unblank_framebuffer(1);
#endif
/* Clear linux TUI flag */
trustedui_set_mode(TRUSTEDUI_MODE_OFF);
#ifdef CONFIG_TRUSTONIC_TRUSTED_UI_FB_BLANK
pr_info("Unsetting TUI flag (blank counter=%d)",
trustedui_blank_get_counter());
if (0 < trustedui_blank_get_counter())
blank_framebuffer(0);
#endif
return TUI_DCI_OK;
}

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) 2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _TUI_HAL_H_
#define _TUI_HAL_H_
#include <linux/types.h>
uint32_t hal_tui_init(void);
void hal_tui_exit(void);
uint32_t hal_tui_alloc(
struct tui_alloc_buffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
size_t allocsize, uint32_t number);
void hal_tui_free(void);
uint32_t hal_tui_deactivate(void);
uint32_t hal_tui_activate(void);
#endif

View file

@ -0,0 +1,10 @@
#
# Secure OS control configuration
#
config SECURE_OS_BOOSTER_API
bool "Secure OS booster API"
depends on TRUSTONIC_TEE
---help---
The secure OS booster API is used for secure OS performance
enhancement. It can migrate a core that executes secure OS tasks
and lock CPU frequency.

View file

@ -0,0 +1,12 @@
#
# Makefile for Secure OS booster API
#
obj-$(CONFIG_SECURE_OS_BOOSTER_API) += sec_os_booster.o
# MobiCore kernel driver path
GUD_ROOT_FOLDER := drivers/gud/gud-exynos7570
ccflags-y += -Iinclude/soc/samsung/
# Includes MobiCore kernel driver
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public

View file

@ -0,0 +1,324 @@
/* drivers/gud/sec-os-ctrl/secos_booster.c
*
* Secure OS booster driver for Samsung Exynos
*
* Copyright (c) 2014 Samsung Electronics
* http://www.samsungsemi.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/of_gpio.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/pm_qos.h>
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/suspend.h>
#include <secos_booster.h>
#include <cpufreq.h>
#include "platform.h"
#define BOOST_POLICY_OFFSET 0
#define BOOST_TIME_OFFSET 16
#define NS_DIV_MS (1000ull * 1000ull)
#define WAIT_TIME (10ull * NS_DIV_MS)
int mc_switch_core(uint32_t core_num);
void mc_set_schedule_policy(int core);
uint32_t mc_active_core(void);
int mc_boost_usage_count;
struct mutex boost_lock;
unsigned int current_core;
unsigned int is_suspend_prepared;
struct timer_work {
struct kthread_work work;
};
static struct pm_qos_request secos_booster_cluster1_qos;
static struct hrtimer timer;
static int max_cpu_freq;
static struct task_struct *mc_timer_thread; /* Timer Thread task structure */
static DEFINE_KTHREAD_WORKER(mc_timer_worker);
static struct hrtimer mc_hrtimer;
static enum hrtimer_restart mc_hrtimer_func(struct hrtimer *timer)
{
struct irq_desc *desc = irq_to_desc(MC_INTR_LOCAL_TIMER);
if (desc->depth != 0)
enable_irq(MC_INTR_LOCAL_TIMER);
return HRTIMER_NORESTART;
}
static void mc_timer_work_func(struct kthread_work *work)
{
hrtimer_start(&mc_hrtimer, ns_to_ktime((u64)LOCAL_TIMER_PERIOD * NSEC_PER_MSEC), HRTIMER_MODE_REL);
}
int secos_booster_request_pm_qos(struct pm_qos_request *req, s32 freq)
{
static ktime_t recent_qos_req_time;
ktime_t current_time;
unsigned long long ns;
current_time = ktime_get();
ns = ktime_to_ns(ktime_sub(current_time, recent_qos_req_time));
if (ns > 0 && WAIT_TIME > ns) {
pr_info("%s: recalling time is too short. wait %lldms\n", __func__, (WAIT_TIME - ns) / NS_DIV_MS + 1);
msleep((WAIT_TIME - ns) / NS_DIV_MS + 1);
}
pm_qos_update_request(req, freq);
recent_qos_req_time = ktime_get();
return 0;
}
int mc_timer(void)
{
struct timer_work t_work = {
KTHREAD_WORK_INIT(t_work.work, mc_timer_work_func),
};
if (!queue_kthread_work(&mc_timer_worker, &t_work.work))
return false;
flush_kthread_work(&t_work.work);
return true;
}
static int mc_timer_init(void)
{
cpumask_t cpu;
mc_timer_thread = kthread_create(kthread_worker_fn, &mc_timer_worker, "mc_timer");
if (IS_ERR(mc_timer_thread)) {
mc_timer_thread = NULL;
pr_err("%s: timer thread creation failed!", __func__);
return -EFAULT;
}
wake_up_process(mc_timer_thread);
cpumask_setall(&cpu);
cpumask_clear_cpu(MIGRATE_TARGET_CORE, &cpu);
set_cpus_allowed(mc_timer_thread, cpu);
hrtimer_init(&mc_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
mc_hrtimer.function = mc_hrtimer_func;
return 0;
}
static void stop_wq(struct work_struct *work)
{
int ret;
ret = secos_booster_stop();
if (ret)
pr_err("%s: secos_booster_stop failed. err:%d\n", __func__, ret);
return;
}
static DECLARE_WORK(stopwq, stop_wq);
static enum hrtimer_restart secos_booster_hrtimer_fn(struct hrtimer *timer)
{
schedule_work_on(0, &stopwq);
return HRTIMER_NORESTART;
}
int secos_booster_start(enum secos_boost_policy policy)
{
int ret = 0;
int freq;
uint32_t boost_time; /* milli second */
enum secos_boost_policy boost_policy;
mutex_lock(&boost_lock);
mc_boost_usage_count++;
if (mc_boost_usage_count > 1) {
goto out;
} else if (mc_boost_usage_count <= 0) {
pr_err("boost usage count sync error. count : %d\n", mc_boost_usage_count);
mc_boost_usage_count = 0;
ret = -EINVAL;
goto error;
}
current_core = mc_active_core();
boost_time = (((uint32_t)policy) >> BOOST_TIME_OFFSET) & 0xFFFF;
boost_policy = (((uint32_t)policy) >> BOOST_POLICY_OFFSET) & 0xFFFF;
/* migrate to big Core */
if (boost_policy >= PERFORMANCE_MAX_CNT || boost_policy < 0) {
pr_err("%s: wrong secos boost policy:%d\n", __func__, boost_policy);
ret = -EINVAL;
goto error;
}
/* cpufreq configuration */
if (boost_policy == MAX_PERFORMANCE)
freq = max_cpu_freq;
else if (boost_policy == MID_PERFORMANCE)
freq = max_cpu_freq;
else if (boost_policy == STB_PERFORMANCE)
freq = max_cpu_freq;
else
freq = 0;
if (!cpu_online(MIGRATE_TARGET_CORE)) {
pr_debug("%s: %d core is offline\n", __func__, MIGRATE_TARGET_CORE);
udelay(100);
if (!cpu_online(MIGRATE_TARGET_CORE)) {
pr_debug("%s: %d core is offline\n", __func__, MIGRATE_TARGET_CORE);
ret = -EPERM;
goto error;
}
pr_debug("%s: %d core is online\n", __func__, MIGRATE_TARGET_CORE);
}
if (secos_booster_request_pm_qos(&secos_booster_cluster1_qos, freq)) { /* KHz */
ret = -EPERM;
goto error;
}
ret = mc_switch_core(MIGRATE_TARGET_CORE);
if (ret) {
pr_err("%s: mc switch failed : err:%d\n", __func__, ret);
secos_booster_request_pm_qos(&secos_booster_cluster1_qos, 0);
ret = -EPERM;
goto error;
}
if (boost_policy == STB_PERFORMANCE) {
/* Restore origin performance policy after spend default boost time */
if (boost_time == 0)
boost_time = DEFAULT_SECOS_BOOST_TIME;
hrtimer_cancel(&timer);
hrtimer_start(&timer, ns_to_ktime((u64)boost_time * NSEC_PER_MSEC),
HRTIMER_MODE_REL);
} else {
/* Change schedule policy */
mc_set_schedule_policy(MIGRATE_TARGET_CORE);
}
out:
mutex_unlock(&boost_lock);
return ret;
error:
mc_boost_usage_count--;
mutex_unlock(&boost_lock);
return ret;
}
int secos_booster_stop(void)
{
int ret = 0;
mutex_lock(&boost_lock);
mc_boost_usage_count--;
mc_set_schedule_policy(DEFAULT_LITTLE_CORE);
if (mc_boost_usage_count > 0) {
goto out;
} else if(mc_boost_usage_count == 0) {
hrtimer_cancel(&timer);
pr_debug("%s: mc switch to little core \n", __func__);
ret = mc_switch_core(current_core);
if (ret)
pr_err("%s: mc switch core failed. err:%d\n", __func__, ret);
secos_booster_request_pm_qos(&secos_booster_cluster1_qos, 0);
} else {
/* mismatched usage count */
pr_warn("boost usage count sync mismatched. count : %d\n", mc_boost_usage_count);
mc_boost_usage_count = 0;
}
out:
mutex_unlock(&boost_lock);
return ret;
}
static int secos_booster_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *dummy)
{
mutex_lock(&boost_lock);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
is_suspend_prepared = true;
break;
case PM_POST_SUSPEND:
is_suspend_prepared = false;
break;
}
mutex_unlock(&boost_lock);
return NOTIFY_OK;
}
static struct notifier_block secos_booster_pm_notifier_block = {
.notifier_call = secos_booster_pm_notifier,
};
static int __init secos_booster_init(void)
{
int ret;
mutex_init(&boost_lock);
ret = mc_timer_init();
if (ret) {
pr_err("%s: mc timer init error :%d\n", __func__, ret);
return ret;
}
hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer.function = secos_booster_hrtimer_fn;
max_cpu_freq = cpufreq_quick_get_max(MIGRATE_TARGET_CORE);
pm_qos_add_request(&secos_booster_cluster1_qos, PM_QOS_CLUSTER0_FREQ_MIN, 0);
register_pm_notifier(&secos_booster_pm_notifier_block);
return ret;
}
late_initcall(secos_booster_init);

View file

@ -0,0 +1,31 @@
/* linux/arch/arm/mach-exynos/include/mach/secos_booster.h
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Header file for secure OS booster API
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __LINUX_SECOS_BOOST_H__
#define __LINUX_SECOS_BOOST_H__
/*
* Secure OS Boost Policy
*/
enum secos_boost_policy {
MAX_PERFORMANCE,
MID_PERFORMANCE,
MIN_PERFORMANCE,
STB_PERFORMANCE,
PERFORMANCE_MAX_CNT,
};
int secos_booster_start(enum secos_boost_policy policy);
int secos_booster_stop(void);
#endif

View file

@ -0,0 +1,10 @@
#
# Secure OS control configuration
#
config SECURE_OS_CONTROL
bool "Secure OS control"
depends on TRUSTONIC_TEE
---help---
Enable Secure OS control sysfs.
It can migrate a core that executes secure OS tasks
and check a current core on secure OS.

View file

@ -0,0 +1,11 @@
#
# Makefile for Secure OS control sysfs
#
obj-$(CONFIG_SECURE_OS_CONTROL) += sec_os_ctrl.o
# MobiCore kernel driver path
GUD_ROOT_FOLDER := drivers/gud/gud-exynos7570/
# Includes MobiCore kernel driver
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public

View file

@ -0,0 +1,119 @@
/* drivers/gud/sec-os-ctrl/sec_os_ctrl.c
*
* Secure OS control driver for Samsung Exynos
*
* Copyright (c) 2014 Samsung Electronics
* http://www.samsungsemi.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/mutex.h>
#define DEFAULT_LITTLE_CORE 1
#define DEFAULT_BIG_CORE 4
#define ASCII_TO_DIGIT_NUM(ascii) (ascii - '0')
static unsigned int current_core, new_core;
static DEFINE_MUTEX(sec_os_ctrl_lock);
int mc_switch_core(uint32_t core_num);
uint32_t mc_active_core(void);
static struct bus_type sec_os_ctrl_subsys = {
.name = "sec_os_ctrl",
.dev_name = "sec_os_ctrl",
};
/* Migrate Secure OS */
static ssize_t migrate_os_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret = 0;
unsigned int core_num = 0;
/* Select only big or LITTLE */
if ((buf[0] != 'L') && (buf[0] != 'b')) {
pr_err("Invalid core number\n");
return count;
}
/* Derive core number */
core_num = ASCII_TO_DIGIT_NUM(buf[1]);
if (buf[0] == 'L') {
if ((buf[1] == 0xA) || (buf[1] == 0x0)) { /* if LF(Line Feed, 0xA) or NULL(0x0) */
new_core = DEFAULT_LITTLE_CORE;
} else if (core_num < 4) { /* From core 0 to core 3 */
new_core = core_num;
} else {
pr_err("[LITTLE] Enter correct core number(0~3)\n");
return count;
}
} else if (buf[0] == 'b') {
if ((buf[1] == 0xA) || (buf[1] == 0x0)) { /* if LF(Line Feed, 0xA) or NULL(0x0) */
new_core = DEFAULT_BIG_CORE;
} else if (core_num < 4) { /* From core 0 to core 3 */
new_core = core_num + 4;
} else {
pr_err("[big] Enter correct core number(0~3)\n");
return count;
}
}
pr_info("Secure OS will be migrated into core [%d]\n", new_core);
if (mutex_lock_interruptible(&sec_os_ctrl_lock)) {
pr_err("Fail to get lock\n");
return count;
}
ret = mc_switch_core(new_core);
mutex_unlock(&sec_os_ctrl_lock);
if (ret != 0) {
pr_err("Secure OS migration is failed!\n");
pr_err("Return value = %d\n", ret);
return count;
}
return count;
}
/* The current core where Secure OS is on */
static ssize_t current_core_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
current_core = mc_active_core();
return sprintf(buf, "Secure OS is on core [%c%d]\n",
(current_core < 4) ? 'L' : 'b', (current_core & 3));
}
static struct kobj_attribute migrate_os_attr =
__ATTR(migrate_os, 0600, NULL, migrate_os_store);
static struct kobj_attribute current_core_attr =
__ATTR(current_core, 0600, current_core_show, NULL);
static struct attribute *sec_os_ctrl_sysfs_attrs[] = {
&migrate_os_attr.attr,
&current_core_attr.attr,
NULL,
};
static struct attribute_group sec_os_ctrl_sysfs_group = {
.attrs = sec_os_ctrl_sysfs_attrs,
};
static const struct attribute_group *sec_os_ctrl_sysfs_groups[] = {
&sec_os_ctrl_sysfs_group,
NULL,
};
static int __init sec_os_ctrl_init(void)
{
return subsys_system_register(&sec_os_ctrl_subsys, sec_os_ctrl_sysfs_groups);
}
late_initcall(sec_os_ctrl_init);

View file

@ -0,0 +1,40 @@
#
# MobiCore configuration
#
config TRUSTONIC_TEE
tristate "Trustonic TEE Driver"
depends on ARM || ARM64
default y
---help---
Enable Trustonic TEE support
config TRUSTONIC_TEE_LPAE
bool "Trustonic TEE uses LPAE"
depends on TRUSTONIC_TEE
default y if ARM64
default n if ARM
---help---
Enable Trustonic TEE 64-bit physical addresses support
config MOBICORE_DEBUG
bool "MobiCore Module debug mode"
depends on TRUSTONIC_TEE
default n
---help---
Enable Debug mode in the MobiCore Driver.
It enables printing information about mobicore operations
config TRUSTONIC_TRUSTED_UI
tristate "<t-base TUI"
depends on TRUSTONIC_TEE
---help---
Enable <t-base Trusted User Interface
config TRUSTONIC_TRUSTED_UI_FB_BLANK
bool "<t-base TUI with fb_blank"
depends on TRUSTONIC_TRUSTED_UI
---help---
Blank the framebuffer before starting a TUI session
source "drivers/gud/gud-exynos7870/sec-os-ctrl/Kconfig"
source "drivers/gud/gud-exynos7870/sec-os-booster/Kconfig"

View file

@ -0,0 +1,9 @@
#
# Makefile for the <t-base core and trusted UI drivers
#
obj-$(CONFIG_TRUSTONIC_TEE) := MobiCoreDriver/
obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui/
obj-$(CONFIG_SECURE_OS_CONTROL) += sec-os-ctrl/
obj-$(CONFIG_SECURE_OS_BOOSTER_API) += sec-os-booster/

View file

@ -0,0 +1,32 @@
#
# Makefile for the <t-base core driver
#
GUD_ROOT_FOLDER := drivers/gud/gud-exynos7870
# add our modules to kernel.
obj-$(CONFIG_TRUSTONIC_TEE) += mcDrvModule.o
mcDrvModule-y := \
admin.o \
client.o \
clientlib.o \
clock.o \
fastcall.o \
logging.o \
main.o \
mcp.o \
mmu.o \
pm.o \
scheduler.o \
session.o \
user.o
# Release mode by default
ccflags-y += -DNDEBUG
ccflags-y += -Wno-declaration-after-statement
ccflags-$(CONFIG_MOBICORE_DEBUG) += -DDEBUG
# MobiCore Driver includes
ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver

View file

@ -0,0 +1,934 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/atomic.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/delay.h>
#include "public/mc_linux.h"
#include "public/mc_admin.h"
#include "mci/mcloadformat.h"
#include "main.h"
#include "mmu.h" /* For load_check and load_token */
#include "mcp.h"
#include "client.h"
#include "admin.h"
static struct admin_ctx {
atomic_t daemon_counter;
int (*tee_start_cb)(void);
void (*tee_stop_cb)(void);
int last_start_ret;
} admin_ctx;
static struct mc_admin_driver_request {
/* Global */
struct mutex mutex; /* Protects access to this struct */
struct mutex states_mutex; /* Protect access to the states */
enum client_state {
IDLE,
REQUEST_SENT,
BUFFERS_READY,
} client_state;
enum server_state {
NOT_CONNECTED, /* Device not open */
READY, /* Waiting for requests */
REQUEST_RECEIVED, /* Got a request, is working */
RESPONSE_SENT, /* Has sent a response header */
DATA_SENT, /* Blocked until data is consumed */
} server_state;
/* Request */
u32 request_id;
struct mc_admin_request request;
struct completion client_complete;
/* Response */
struct mc_admin_response response;
struct completion server_complete;
void *buffer; /* Reception buffer (pre-allocated) */
size_t size; /* Size of the reception buffer */
} g_request;
static struct tee_object *tee_object_alloc(bool is_sp_trustlet, size_t length)
{
struct tee_object *obj;
size_t size = sizeof(*obj) + length;
size_t header_length = 0;
/* Determine required size */
if (is_sp_trustlet) {
/* Need space for lengths info and containers */
header_length = sizeof(struct mc_blob_len_info);
size += header_length + 3 * MAX_SO_CONT_SIZE;
}
/* Allocate memory */
obj = vzalloc(size);
if (!obj)
return NULL;
/* A non-zero header_length indicates that we have a SP trustlet */
obj->header_length = header_length;
obj->length = length;
return obj;
}
void tee_object_free(struct tee_object *robj)
{
vfree(robj);
}
static inline void client_state_change(enum client_state state)
{
mutex_lock(&g_request.states_mutex);
g_request.client_state = state;
mutex_unlock(&g_request.states_mutex);
}
static inline bool client_state_is(enum client_state state)
{
bool is;
mutex_lock(&g_request.states_mutex);
is = g_request.client_state == state;
mutex_unlock(&g_request.states_mutex);
return is;
}
static inline void server_state_change(enum server_state state)
{
mutex_lock(&g_request.states_mutex);
g_request.server_state = state;
mutex_unlock(&g_request.states_mutex);
}
static inline bool server_state_is(enum server_state state)
{
bool is;
mutex_lock(&g_request.states_mutex);
is = g_request.server_state == state;
mutex_unlock(&g_request.states_mutex);
return is;
}
static void request_cancel(void);
static int request_send(u32 command, const struct mc_uuid_t *uuid, bool is_gp,
u32 spid)
{
int counter = 10;
int ret;
/* Prepare request */
mutex_lock(&g_request.states_mutex);
/* Wait a little for daemon to connect */
while ((g_request.server_state == NOT_CONNECTED) && counter--) {
mutex_unlock(&g_request.states_mutex);
ssleep(1);
mutex_lock(&g_request.states_mutex);
}
BUG_ON(g_request.client_state != IDLE);
if (g_request.server_state != READY) {
mutex_unlock(&g_request.states_mutex);
if (g_request.server_state != NOT_CONNECTED) {
mc_dev_err("invalid daemon state %d\n",
g_request.server_state);
ret = -EPROTO;
goto end;
} else {
mc_dev_err("daemon not connected\n");
ret = -ENOTCONN;
goto end;
}
}
memset(&g_request.request, 0, sizeof(g_request.request));
memset(&g_request.response, 0, sizeof(g_request.response));
g_request.request.request_id = g_request.request_id++;
g_request.request.command = command;
if (uuid)
memcpy(&g_request.request.uuid, uuid, sizeof(*uuid));
else
memset(&g_request.request.uuid, 0, sizeof(*uuid));
g_request.request.is_gp = is_gp;
g_request.request.spid = spid;
g_request.client_state = REQUEST_SENT;
mutex_unlock(&g_request.states_mutex);
/* Send request */
complete(&g_request.client_complete);
/* Wait for header (could be interruptible, but then needs more work) */
wait_for_completion(&g_request.server_complete);
/* Server should be waiting with some data for us */
mutex_lock(&g_request.states_mutex);
switch (g_request.server_state) {
case NOT_CONNECTED:
/* Daemon gone */
ret = -EPIPE;
break;
case READY:
/* No data to come, likely an error */
ret = -g_request.response.error_no;
break;
case RESPONSE_SENT:
case DATA_SENT:
/* Normal case, data to come */
ret = 0;
break;
default:
/* Should not happen as complete means the state changed */
mc_dev_err("daemon is in a bad state: %d\n",
g_request.server_state);
ret = -EPIPE;
break;
}
mutex_unlock(&g_request.states_mutex);
end:
if (ret)
request_cancel();
return ret;
}
static int request_receive(void *address, u32 size)
{
/*
* At this point we have received the header and prepared some buffers
* to receive data that we know are coming from the server.
*/
/* Check server state */
bool server_ok;
mutex_lock(&g_request.states_mutex);
server_ok = (g_request.server_state == RESPONSE_SENT) ||
(g_request.server_state == DATA_SENT);
mutex_unlock(&g_request.states_mutex);
if (!server_ok) {
request_cancel();
return -EPIPE;
}
/* Setup reception buffer */
g_request.buffer = address;
g_request.size = size;
client_state_change(BUFFERS_READY);
/* Unlock write of data */
complete(&g_request.client_complete);
/* Wait for data (far too late to be interruptible) */
wait_for_completion(&g_request.server_complete);
/* Reset reception buffer */
g_request.buffer = NULL;
g_request.size = 0;
/* Return to idle state */
client_state_change(IDLE);
return 0;
}
/* Must be called instead of request_receive() to cancel a pending request */
static void request_cancel(void)
{
/* Unlock write of data */
mutex_lock(&g_request.states_mutex);
if (g_request.server_state == DATA_SENT)
complete(&g_request.client_complete);
/* Return to idle state */
g_request.client_state = IDLE;
mutex_unlock(&g_request.states_mutex);
}
static int admin_get_root_container(void *address)
{
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_GET_ROOT_CONTAINER, 0, 0, 0);
if (ret)
goto end;
/* Check length against max */
if (g_request.response.length >= MAX_SO_CONT_SIZE) {
request_cancel();
mc_dev_err("response length exceeds maximum\n");
ret = EREMOTEIO;
goto end;
}
/* Get data */
ret = request_receive(address, g_request.response.length);
if (!ret)
ret = g_request.response.length;
end:
mutex_unlock(&g_request.mutex);
return ret;
}
static int admin_get_sp_container(void *address, u32 spid)
{
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_GET_SP_CONTAINER, 0, 0, spid);
if (ret)
goto end;
/* Check length against max */
if (g_request.response.length >= MAX_SO_CONT_SIZE) {
request_cancel();
mc_dev_err("response length exceeds maximum\n");
ret = EREMOTEIO;
goto end;
}
/* Get data */
ret = request_receive(address, g_request.response.length);
if (!ret)
ret = g_request.response.length;
end:
mutex_unlock(&g_request.mutex);
return ret;
}
static int admin_get_trustlet_container(void *address,
const struct mc_uuid_t *uuid, u32 spid)
{
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_GET_TRUSTLET_CONTAINER, uuid, 0, spid);
if (ret)
goto end;
/* Check length against max */
if (g_request.response.length >= MAX_SO_CONT_SIZE) {
request_cancel();
mc_dev_err("response length exceeds maximum\n");
ret = EREMOTEIO;
goto end;
}
/* Get data */
ret = request_receive(address, g_request.response.length);
if (!ret)
ret = g_request.response.length;
end:
mutex_unlock(&g_request.mutex);
return ret;
}
static struct tee_object *admin_get_trustlet(const struct mc_uuid_t *uuid,
bool is_gp, u32 *spid)
{
struct tee_object *obj = NULL;
bool is_sp_tl;
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_GET_TRUSTLET, uuid, is_gp, 0);
if (ret)
goto end;
/* Allocate memory */
is_sp_tl = g_request.response.service_type == SERVICE_TYPE_SP_TRUSTLET;
obj = tee_object_alloc(is_sp_tl, g_request.response.length);
if (!obj) {
request_cancel();
ret = -ENOMEM;
goto end;
}
/* Get data */
ret = request_receive(&obj->data[obj->header_length], obj->length);
*spid = g_request.response.spid;
end:
mutex_unlock(&g_request.mutex);
if (ret)
return ERR_PTR(ret);
return obj;
}
static void mc_admin_sendcrashdump(void)
{
int ret = 0;
/* Lock communication channel */
mutex_lock(&g_request.mutex);
/* Send request and wait for header */
ret = request_send(MC_DRV_SIGNAL_CRASH, NULL, false, 0);
if (ret)
goto end;
/* Done */
request_cancel();
end:
mutex_unlock(&g_request.mutex);
}
static int tee_object_make(u32 spid, struct tee_object *obj)
{
struct mc_blob_len_info *l_info = (struct mc_blob_len_info *)obj->data;
u8 *address = &obj->data[obj->header_length + obj->length];
struct mclf_header_v2 *thdr;
int ret;
/* Get root container */
ret = admin_get_root_container(address);
if (ret < 0)
goto err;
l_info->root_size = ret;
address += ret;
/* Get SP container */
ret = admin_get_sp_container(address, spid);
if (ret < 0)
goto err;
l_info->sp_size = ret;
address += ret;
/* Get trustlet container */
thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
ret = admin_get_trustlet_container(address, &thdr->uuid, spid);
if (ret < 0)
goto err;
l_info->ta_size = ret;
address += ret;
/* Setup lengths information */
l_info->magic = MC_TLBLOBLEN_MAGIC;
obj->length += sizeof(*l_info);
obj->length += l_info->root_size + l_info->sp_size + l_info->ta_size;
ret = 0;
err:
return ret;
}
struct tee_object *tee_object_read(u32 spid, uintptr_t address, size_t length)
{
char __user *addr = (char __user *)address;
struct tee_object *obj;
u8 *data;
struct mclf_header_v2 thdr;
int ret;
/* Check length */
if (length < sizeof(thdr)) {
mc_dev_err("buffer shorter than header size\n");
return ERR_PTR(-EFAULT);
}
/* Read header */
if (copy_from_user(&thdr, addr, sizeof(thdr))) {
mc_dev_err("header: copy_from_user failed\n");
return ERR_PTR(-EFAULT);
}
/* Allocate memory */
obj = tee_object_alloc(thdr.service_type == SERVICE_TYPE_SP_TRUSTLET,
length);
if (!obj)
return ERR_PTR(-ENOMEM);
/* Copy header */
data = &obj->data[obj->header_length];
memcpy(data, &thdr, sizeof(thdr));
/* Copy the rest of the data */
data += sizeof(thdr);
if (copy_from_user(data, &addr[sizeof(thdr)], length - sizeof(thdr))) {
mc_dev_err("data: copy_from_user failed\n");
vfree(obj);
return ERR_PTR(-EFAULT);
}
if (obj->header_length) {
ret = tee_object_make(spid, obj);
if (ret) {
vfree(obj);
return ERR_PTR(ret);
}
}
return obj;
}
struct tee_object *tee_object_select(const struct mc_uuid_t *uuid)
{
struct tee_object *obj;
struct mclf_header_v2 *thdr;
obj = tee_object_alloc(false, sizeof(*thdr));
if (!obj)
return ERR_PTR(-ENOMEM);
thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
memcpy(&thdr->uuid, uuid, sizeof(thdr->uuid));
return obj;
}
struct tee_object *tee_object_get(const struct mc_uuid_t *uuid, bool is_gp)
{
struct tee_object *obj;
u32 spid = 0;
/* admin_get_trustlet creates the right object based on service type */
obj = admin_get_trustlet(uuid, is_gp, &spid);
if (IS_ERR(obj))
return obj;
/* SP trustlet: create full secure object with all containers */
if (obj->header_length) {
int ret;
/* Do not return EINVAL in this case as SPID was not found */
if (!spid) {
vfree(obj);
return ERR_PTR(-ENOENT);
}
ret = tee_object_make(spid, obj);
if (ret) {
vfree(obj);
return ERR_PTR(ret);
}
}
return obj;
}
static inline int load_driver(struct tee_client *client,
struct mc_admin_load_info *info)
{
struct tee_object *obj;
struct mclf_header_v2 *thdr;
struct mc_identity identity = {
.login_type = TEEC_LOGIN_PUBLIC,
};
uintptr_t dci = 0;
u32 dci_len = 0;
u32 sid;
int ret;
obj = tee_object_read(info->spid, info->address, info->length);
if (IS_ERR(obj))
return PTR_ERR(obj);
thdr = (struct mclf_header_v2 *)&obj->data[obj->header_length];
if (!(thdr->flags & MC_SERVICE_HEADER_FLAGS_NO_CONTROL_INTERFACE)) {
/*
* The driver requires a DCI, although we won't be able to use
* it to communicate.
*/
dci_len = PAGE_SIZE;
ret = client_cbuf_create(client, dci_len, &dci, NULL);
if (ret)
goto end;
}
/* Open session */
ret = client_add_session(client, obj, dci, dci_len, &sid, false,
&identity);
if (!ret)
mc_dev_devel("driver loaded with sid %x", sid);
/*
* Always 'free' the buffer (will remain as long as used), never freed
* otherwise
*/
client_cbuf_free(client, dci);
end:
vfree(obj);
return ret;
}
static inline int load_token(struct mc_admin_load_info *token)
{
struct tee_mmu *mmu;
struct mcp_buffer_map map;
int ret;
mmu = tee_mmu_create(current, (void *)(uintptr_t)token->address,
token->length);
if (IS_ERR(mmu))
return PTR_ERR(mmu);
tee_mmu_buffer(mmu, &map);
ret = mcp_load_token(token->address, &map);
tee_mmu_delete(mmu);
return ret;
}
static inline int load_check(struct mc_admin_load_info *info)
{
struct tee_object *obj;
struct tee_mmu *mmu;
struct mcp_buffer_map map;
int ret;
obj = tee_object_read(info->spid, info->address, info->length);
if (IS_ERR(obj))
return PTR_ERR(obj);
mmu = tee_mmu_create(NULL, obj->data, obj->length);
if (IS_ERR(mmu))
return PTR_ERR(mmu);
tee_mmu_buffer(mmu, &map);
ret = mcp_load_check(obj, &map);
tee_mmu_delete(mmu);
return ret;
}
static ssize_t admin_write(struct file *file, const char __user *user,
size_t len, loff_t *off)
{
int ret;
/* No offset allowed [yet] */
if (*off) {
g_request.response.error_no = EPIPE;
ret = -ECOMM;
goto err;
}
if (server_state_is(REQUEST_RECEIVED)) {
/* Check client state */
if (!client_state_is(REQUEST_SENT)) {
g_request.response.error_no = EPIPE;
ret = -EPIPE;
goto err;
}
/* Receive response header */
if (copy_from_user(&g_request.response, user,
sizeof(g_request.response))) {
g_request.response.error_no = EPIPE;
ret = -ECOMM;
goto err;
}
/* Check request ID */
if (g_request.request.request_id !=
g_request.response.request_id) {
g_request.response.error_no = EPIPE;
ret = -EBADE;
goto err;
}
/* Response header is acceptable */
ret = sizeof(g_request.response);
if (g_request.response.length)
server_state_change(RESPONSE_SENT);
else
server_state_change(READY);
goto end;
} else if (server_state_is(RESPONSE_SENT)) {
/* Server is waiting */
server_state_change(DATA_SENT);
/* Get data */
ret = wait_for_completion_interruptible(
&g_request.client_complete);
/* Server received a signal, let see if it tries again */
if (ret) {
server_state_change(RESPONSE_SENT);
return ret;
}
/* Check client state */
if (!client_state_is(BUFFERS_READY)) {
g_request.response.error_no = EPIPE;
ret = -EPIPE;
goto err;
}
/* We do not deal with several writes */
if (len != g_request.size)
len = g_request.size;
ret = copy_from_user(g_request.buffer, user, len);
if (ret) {
g_request.response.error_no = EPIPE;
ret = -ECOMM;
goto err;
}
ret = len;
server_state_change(READY);
goto end;
} else {
ret = -ECOMM;
goto err;
}
err:
server_state_change(READY);
end:
complete(&g_request.server_complete);
return ret;
}
static long admin_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *uarg = (void __user *)arg;
int ret = -EINVAL;
mc_dev_devel("%u from %s\n", _IOC_NR(cmd), current->comm);
switch (cmd) {
case MC_ADMIN_IO_GET_DRIVER_REQUEST: {
/* Block until a request is available */
ret = wait_for_completion_interruptible(
&g_request.client_complete);
if (ret)
/* Interrupted by signal */
break;
/* Check client state */
if (!client_state_is(REQUEST_SENT)) {
g_request.response.error_no = EPIPE;
complete(&g_request.server_complete);
ret = -EPIPE;
break;
}
/* Send request (the driver request mutex is held) */
ret = copy_to_user(uarg, &g_request.request,
sizeof(g_request.request));
if (ret) {
server_state_change(READY);
complete(&g_request.server_complete);
ret = -EPROTO;
break;
}
server_state_change(REQUEST_RECEIVED);
break;
}
case MC_ADMIN_IO_GET_INFO: {
struct mc_admin_driver_info info;
info.drv_version = MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
MCDRVMODULEAPI_VERSION_MINOR);
info.initial_cmd_id = g_request.request_id;
ret = copy_to_user(uarg, &info, sizeof(info));
break;
}
case MC_ADMIN_IO_LOAD_DRIVER: {
struct tee_client *client = file->private_data;
struct mc_admin_load_info info;
if (copy_from_user(&info, uarg, sizeof(info))) {
ret = -EFAULT;
break;
}
/* Make sure we have a local client */
if (!client) {
client = client_create(true);
/* Store client for future use/close */
file->private_data = client;
}
if (!client) {
ret = -ENOMEM;
break;
}
ret = load_driver(client, &info);
break;
}
case MC_ADMIN_IO_LOAD_TOKEN: {
struct mc_admin_load_info info;
if (copy_from_user(&info, uarg, sizeof(info))) {
ret = -EFAULT;
break;
}
ret = load_token(&info);
break;
}
case MC_ADMIN_IO_LOAD_CHECK: {
struct mc_admin_load_info info;
if (copy_from_user(&info, uarg, sizeof(info))) {
ret = -EFAULT;
break;
}
ret = load_check(&info);
break;
}
default:
ret = -ENOIOCTLCMD;
}
return ret;
}
/*
* mc_fd_release() - This function will be called from user space as close(...)
* The client data are freed and the associated memory pages are unreserved.
*
* @inode
* @file
*
* Returns 0
*/
static int admin_release(struct inode *inode, struct file *file)
{
/* Close client if any */
if (file->private_data)
client_close((struct tee_client *)file->private_data);
/* Requests from driver to daemon */
mutex_lock(&g_request.states_mutex);
mc_dev_info("daemon disconnected\n");
g_request.server_state = NOT_CONNECTED;
/* A non-zero command indicates that a thread is waiting */
if (g_request.client_state != IDLE) {
g_request.response.error_no = ESHUTDOWN;
complete(&g_request.server_complete);
}
mutex_unlock(&g_request.states_mutex);
atomic_set(&admin_ctx.daemon_counter, 0);
/*
* ret is quite irrelevant here as most apps don't care about the
* return value from close() and it's quite difficult to recover
*/
return 0;
}
static int admin_open(struct inode *inode, struct file *file)
{
/*
* If the daemon is already set we can't allow anybody else to open
* the admin interface.
*/
if (atomic_cmpxchg(&admin_ctx.daemon_counter, 0, 1) != 0) {
mc_dev_err("Daemon is already connected\n");
return -EPROTO;
}
/* Any value will do */
g_request.request_id = 42;
/* Setup the usual variables */
mc_dev_devel("accept %s as TEE daemon\n", current->comm);
/*
* daemon is connected so now we can safely suppose
* the secure world is loaded too
*/
if (admin_ctx.last_start_ret > 0)
admin_ctx.last_start_ret = admin_ctx.tee_start_cb();
/* Failed to start the TEE, either now or before */
if (admin_ctx.last_start_ret) {
atomic_set(&admin_ctx.daemon_counter, 0);
return admin_ctx.last_start_ret;
}
/* Requests from driver to daemon */
server_state_change(READY);
mc_dev_info("daemon connected\n");
return 0;
}
/* function table structure of this device driver. */
static const struct file_operations mc_admin_fops = {
.owner = THIS_MODULE,
.open = admin_open,
.release = admin_release,
.unlocked_ioctl = admin_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = admin_ioctl,
#endif
.write = admin_write,
};
int mc_admin_init(struct cdev *cdev, int (*tee_start_cb)(void),
void (*tee_stop_cb)(void))
{
atomic_set(&admin_ctx.daemon_counter, 0);
/* Requests from driver to daemon */
mutex_init(&g_request.mutex);
mutex_init(&g_request.states_mutex);
init_completion(&g_request.client_complete);
init_completion(&g_request.server_complete);
mcp_register_crashhandler(mc_admin_sendcrashdump);
/* Create char device */
cdev_init(cdev, &mc_admin_fops);
/* Register the call back for starting the secure world */
admin_ctx.tee_start_cb = tee_start_cb;
admin_ctx.tee_stop_cb = tee_stop_cb;
admin_ctx.last_start_ret = 1;
return 0;
}
void mc_admin_exit(void)
{
if (!admin_ctx.last_start_ret)
admin_ctx.tee_stop_cb();
}

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_ADMIN_H_
#define _MC_ADMIN_H_
struct cdev;
struct mc_uuid_t;
struct tee_object;
int mc_admin_init(struct cdev *cdev, int (*tee_start_cb)(void),
void (*tee_stop_cb)(void));
void mc_admin_exit(void);
struct tee_object *tee_object_select(const struct mc_uuid_t *uuid);
struct tee_object *tee_object_get(const struct mc_uuid_t *uuid, bool is_gp);
struct tee_object *tee_object_read(u32 spid, uintptr_t address, size_t length);
void tee_object_free(struct tee_object *object);
#endif /* _MC_ADMIN_H_ */

View file

@ -0,0 +1,88 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_ARM_H_
#define _MC_ARM_H_
#include "main.h"
#ifdef CONFIG_ARM64
inline bool has_security_extensions(void)
{
return true;
}
inline bool is_secure_mode(void)
{
return false;
}
#else
/*
* ARM Trustzone specific masks and modes
* Vanilla Linux is unaware of TrustZone extension.
* I.e. arch/arm/include/asm/ptrace.h does not define monitor mode.
* Also TZ bits in cpuid are not defined, ARM port uses magic numbers,
* see arch/arm/kernel/setup.c
*/
#define ARM_MONITOR_MODE (0x16) /*(0b10110)*/
#define ARM_SECURITY_EXTENSION_MASK (0x30)
/* check if CPU supports the ARM TrustZone Security Extensions */
inline bool has_security_extensions(void)
{
u32 fea = 0;
asm volatile(
"mrc p15, 0, %[fea], cr0, cr1, 0" :
[fea]"=r" (fea));
mc_dev_devel("CPU Features: 0x%X\n", fea);
/*
* If the CPU features ID has 0 for security features then the CPU
* doesn't support TrustZone at all!
*/
if ((fea & ARM_SECURITY_EXTENSION_MASK) == 0)
return false;
return true;
}
/* check if running in secure mode */
inline bool is_secure_mode(void)
{
u32 cpsr = 0;
u32 nsacr = 0;
asm volatile(
"mrc p15, 0, %[nsacr], cr1, cr1, 2\n"
"mrs %[cpsr], cpsr\n" :
[nsacr]"=r" (nsacr),
[cpsr]"=r"(cpsr));
mc_dev_devel("CPRS.M = set to 0x%X\n", cpsr & MODE_MASK);
mc_dev_devel("SCR.NS = set to 0x%X\n", nsacr);
/*
* If the NSACR contains the reset value(=0) then most likely we are
* running in Secure MODE.
* If the cpsr mode is set to monitor mode then we cannot load!
*/
if (nsacr == 0 || ((cpsr & MODE_MASK) == ARM_MONITOR_MODE))
return true;
return false;
}
#endif
#endif /* _MC_ARM_H_ */

View file

@ -0,0 +1,15 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define MOBICORE_COMPONENT_BUILD_TAG \
"t-base-EXYNOS64-Android-310A-V002-20150826_162546_62"

View file

@ -0,0 +1,928 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/err.h>
#include "public/mc_linux.h"
#include "public/mc_admin.h"
#include "main.h"
#include "admin.h" /* tee_object* */
#include "mcp.h"
#include "mmu.h"
#include "session.h"
#include "client.h"
/* Context */
static struct client_ctx {
/* Clients list */
struct mutex clients_lock;
struct list_head clients;
/* Clients waiting for their last cbuf to be released */
struct mutex closing_clients_lock;
struct list_head closing_clients;
} client_ctx;
/*
* Contiguous buffer allocated to TLCs.
* These buffers are used as world shared memory (wsm) to share with
* secure world.
*/
struct cbuf {
/* Client this cbuf belongs to */
struct tee_client *client;
/* List element for client's list of cbuf's */
struct list_head list;
/* Number of references kept to this buffer */
struct kref kref;
/* virtual Kernel start address */
uintptr_t addr;
/* virtual Userspace start address */
uintptr_t uaddr;
/* physical start address */
phys_addr_t phys;
/* 2^order = number of pages allocated */
unsigned int order;
/* Length of memory mapped to user */
u32 len;
/* Has been freed via the API */
bool api_freed;
};
static inline void cbuf_get(struct cbuf *cbuf)
{
kref_get(&cbuf->kref);
}
static void cbuf_release(struct kref *kref)
{
struct cbuf *cbuf = container_of(kref, struct cbuf, kref);
struct tee_client *client = cbuf->client;
/* Unlist from client */
mutex_lock(&client->cbufs_lock);
list_del_init(&cbuf->list);
mutex_unlock(&client->cbufs_lock);
/* Release client token */
client_put(client);
/* Free */
free_pages(cbuf->addr, cbuf->order);
mc_dev_devel("freed cbuf %p: client %p addr %lx uaddr %lx len %u\n",
cbuf, client, cbuf->addr, cbuf->uaddr, cbuf->len);
kfree(cbuf);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_cbufs);
}
static inline void cbuf_put(struct cbuf *cbuf)
{
kref_put(&cbuf->kref, cbuf_release);
}
/*
* Map a kernel contiguous buffer to user space
*/
static int cbuf_map(struct vm_area_struct *vmarea, uintptr_t addr, u32 len,
uintptr_t *uaddr)
{
int ret;
if (WARN(!uaddr, "No uaddr pointer available"))
return -EINVAL;
if (WARN(!vmarea, "No vma available"))
return -EINVAL;
if (WARN(!addr, "No addr available"))
return -EINVAL;
if (len != (u32)(vmarea->vm_end - vmarea->vm_start)) {
mc_dev_err("cbuf incompatible with vma\n");
return -EINVAL;
}
vmarea->vm_flags |= VM_IO;
ret = remap_pfn_range(vmarea, vmarea->vm_start,
page_to_pfn(virt_to_page(addr)),
vmarea->vm_end - vmarea->vm_start,
vmarea->vm_page_prot);
if (ret) {
*uaddr = 0;
mc_dev_err("User mapping failed\n");
return ret;
}
*uaddr = vmarea->vm_start;
return 0;
}
/*
* Allocate and initialize a client object
*/
struct tee_client *client_create(bool is_from_kernel)
{
struct tee_client *client;
/* Allocate client structure */
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return NULL;
/* Increment debug counter */
atomic_inc(&g_ctx.c_clients);
/* initialize members */
client->pid = is_from_kernel ? 0 : current->pid;
memcpy(client->comm, current->comm, sizeof(client->comm));
kref_init(&client->kref);
INIT_LIST_HEAD(&client->cbufs);
mutex_init(&client->cbufs_lock);
INIT_LIST_HEAD(&client->sessions);
INIT_LIST_HEAD(&client->closing_sessions);
mutex_init(&client->sessions_lock);
INIT_LIST_HEAD(&client->list);
/* Add client to list of clients */
mutex_lock(&client_ctx.clients_lock);
list_add_tail(&client->list, &client_ctx.clients);
mutex_unlock(&client_ctx.clients_lock);
mc_dev_devel("created client %p\n", client);
return client;
}
/*
* Free client object + all objects it contains.
* Can be called only by last user referencing the client,
* therefore mutex lock seems overkill
*/
static void client_release(struct kref *kref)
{
struct tee_client *client;
client = container_of(kref, struct tee_client, kref);
/* Client is closed, remove from closing list */
mutex_lock(&client_ctx.closing_clients_lock);
list_del(&client->list);
mutex_unlock(&client_ctx.closing_clients_lock);
mc_dev_devel("freed client %p\n", client);
kfree(client);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_clients);
}
void client_put(struct tee_client *client)
{
kref_put(&client->kref, client_release);
}
/*
* Returns true if client is a kernel object.
*/
static inline bool client_is_kernel(struct tee_client *client)
{
return !client->pid;
}
/*
* Set client "closing" state, only if it contains no session.
* Once in "closing" state, system "close" can be called.
* Return: 0 if this state could be set.
*/
int client_freeze(struct tee_client *client)
{
int ret;
/* Check for sessions */
mutex_lock(&client->sessions_lock);
ret = list_empty(&client->sessions) ? 0 : -ENOTEMPTY;
client->closing = ret == 0;
mutex_unlock(&client->sessions_lock);
mc_dev_devel("client %p, exit with %d\n", client, ret);
return ret;
}
/*
* At this point, nobody has access to the client anymore, so no new sessions
* are being created.
*/
static void client_close_sessions(struct tee_client *client)
{
struct tee_session *session;
mutex_lock(&client->sessions_lock);
while (!list_empty(&client->sessions)) {
session = list_first_entry(&client->sessions,
struct tee_session, list);
/* Move session to closing sessions list */
list_move(&session->list, &client->closing_sessions);
/* Call session_close without lock */
mutex_unlock(&client->sessions_lock);
session_close(session);
mutex_lock(&client->sessions_lock);
}
mutex_unlock(&client->sessions_lock);
}
/*
* At this point, nobody has access to the client anymore, so no new contiguous
* buffers are being created.
*/
static void client_close_kernel_cbufs(struct tee_client *client)
{
/* Put buffers allocated and not freed via the kernel API */
if (!client_is_kernel(client))
return;
/* Look for cbufs that the client has not freed and put them */
while (true) {
struct cbuf *cbuf = NULL, *candidate;
mutex_lock(&client->cbufs_lock);
list_for_each_entry(candidate, &client->cbufs, list) {
if (!candidate->api_freed) {
candidate->api_freed = true;
cbuf = candidate;
break;
}
}
mutex_unlock(&client->cbufs_lock);
if (!cbuf)
break;
cbuf_put(cbuf);
}
}
/*
* Release a client and the session+cbuf objects it contains.
* @param client_t client
* @return driver error code
*/
void client_close(struct tee_client *client)
{
/* Move client from active clients to closing clients for debug */
mutex_lock(&client_ctx.clients_lock);
mutex_lock(&client_ctx.closing_clients_lock);
list_move(&client->list, &client_ctx.closing_clients);
mutex_unlock(&client_ctx.closing_clients_lock);
mutex_unlock(&client_ctx.clients_lock);
client_close_kernel_cbufs(client);
/* Close all remaining sessions */
client_close_sessions(client);
client_put(client);
mc_dev_devel("client %p closed\n", client);
}
/*
* The TEE is going to die, so get rid of whatever is shared with it
*/
void clients_kill_sessions(void)
{
struct tee_client *client;
mutex_lock(&client_ctx.clients_lock);
list_for_each_entry(client, &client_ctx.clients, list) {
/*
* session_kill() will put the session which should get freed
* and free its wsms/mmus and put any cbuf concerned
*/
mutex_lock(&client->sessions_lock);
while (!list_empty(&client->sessions)) {
struct tee_session *session;
session = list_first_entry(&client->sessions,
struct tee_session, list);
list_del(&session->list);
session_kill(session);
}
mutex_unlock(&client->sessions_lock);
}
mutex_unlock(&client_ctx.clients_lock);
}
/*
* Open TA for given client. TA binary is provided by the daemon.
* @param
* @return driver error code
*/
int client_open_session(struct tee_client *client, u32 *session_id,
const struct mc_uuid_t *uuid, uintptr_t tci,
size_t tci_len, bool is_gp_uuid,
struct mc_identity *identity)
{
int err = 0;
u32 sid = 0;
struct tee_object *obj;
/* Get secure object */
obj = tee_object_get(uuid, is_gp_uuid);
if (IS_ERR(obj)) {
/* Try to select secure object inside the SWd if not found */
if ((PTR_ERR(obj) == -ENOENT) && g_ctx.f_ta_auth)
obj = tee_object_select(uuid);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto end;
}
}
/* Open session */
err = client_add_session(client, obj, tci, tci_len, &sid, is_gp_uuid,
identity);
/* Fill in return parameter */
if (!err)
*session_id = sid;
/* Delete secure object */
tee_object_free(obj);
end:
mc_dev_devel("session %x, exit with %d\n", sid, err);
return err;
}
/*
* Open TA for given client. TA binary is provided by the client.
* @param
* @return driver error code
*/
int client_open_trustlet(struct tee_client *client, u32 *session_id, u32 spid,
uintptr_t trustlet, size_t trustlet_len,
uintptr_t tci, size_t tci_len)
{
struct tee_object *obj;
struct mc_identity identity = {
.login_type = TEEC_LOGIN_PUBLIC,
};
u32 sid = 0;
int err = 0;
/* Create secure object from user-space trustlet binary */
obj = tee_object_read(spid, trustlet, trustlet_len);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto end;
}
/* Open session */
err = client_add_session(client, obj, tci, tci_len, &sid, false,
&identity);
/* Fill in return parameter */
if (!err)
*session_id = sid;
/* Delete secure object */
tee_object_free(obj);
end:
mc_dev_devel("session %x, exit with %d\n", sid, err);
return err;
}
/*
* Opens a TA and add corresponding session object to given client
* return: driver error code
*/
int client_add_session(struct tee_client *client, const struct tee_object *obj,
uintptr_t tci, size_t len, u32 *session_id, bool is_gp,
struct mc_identity *identity)
{
struct tee_session *session = NULL;
struct tee_mmu *obj_mmu = NULL;
int ret = 0;
/*
* Create session object with temp sid=0 BEFORE session is started,
* otherwise if a GP TA is started and NWd session object allocation
* fails, we cannot handle the potentially delayed GP closing.
* Adding session to list must be done AFTER it is started (once we have
* sid), therefore it cannot be done within session_create().
*/
session = session_create(client, is_gp, identity);
if (IS_ERR(session))
return PTR_ERR(session);
/* Create blob L2 table (blob is allocated by driver, so task=NULL) */
obj_mmu = tee_mmu_create(NULL, obj->data, obj->length);
if (IS_ERR(obj_mmu)) {
ret = PTR_ERR(obj_mmu);
goto err;
}
/* Open session */
ret = session_open(session, obj, obj_mmu, tci, len);
/* Blob table no more needed in any case */
tee_mmu_delete(obj_mmu);
if (ret)
goto err;
mutex_lock(&client->sessions_lock);
if (unlikely(client->closing)) {
/* Client has been frozen, no more sessions allowed */
ret = -ENODEV;
} else {
/* Add session to client */
list_add_tail(&session->list, &client->sessions);
/* Set sid returned by SWd */
*session_id = session->mcp_session.id;
}
mutex_unlock(&client->sessions_lock);
err:
/* Close or free session on error */
if (ret == -ENODEV) {
/* The session must enter the closing process... */
list_add_tail(&session->list, &client->closing_sessions);
session_close(session);
} else if (ret) {
session_put(session);
}
return ret;
}
/*
* Remove a session object from client and close corresponding TA
* Return: true if session was found and closed
*/
int client_remove_session(struct tee_client *client, u32 session_id)
{
struct tee_session *session = NULL, *candidate;
/* Move session from main list to closing list */
mutex_lock(&client->sessions_lock);
list_for_each_entry(candidate, &client->sessions, list) {
if (candidate->mcp_session.id == session_id) {
session = candidate;
list_move(&session->list, &client->closing_sessions);
break;
}
}
mutex_unlock(&client->sessions_lock);
if (!session)
return -ENXIO;
/* Close session */
return session_close(session);
}
/*
* Find a session object and increment its reference counter.
* Object cannot be freed until its counter reaches 0.
* return: pointer to the object, NULL if not found.
*/
static struct tee_session *client_get_session(struct tee_client *client,
u32 session_id)
{
struct tee_session *session = NULL, *candidate;
mutex_lock(&client->sessions_lock);
list_for_each_entry(candidate, &client->sessions, list) {
if (candidate->mcp_session.id == session_id) {
session = candidate;
session_get(session);
break;
}
}
mutex_unlock(&client->sessions_lock);
if (!session)
mc_dev_err("session %x not found\n", session_id);
return session;
}
/*
* Send a notification to TA
* @return driver error code
*/
int client_notify_session(struct tee_client *client, u32 session_id)
{
struct tee_session *session;
int ret;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
/* Send command to SWd */
ret = session_notify_swd(session);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit with %d\n", session_id, ret);
return ret;
}
/*
* Wait for a notification from TA
* @return driver error code
*/
int client_waitnotif_session(struct tee_client *client, u32 session_id,
s32 timeout)
{
struct tee_session *session;
int ret;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
ret = session_waitnotif(session, timeout);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit with %d\n", session_id, ret);
return ret;
}
/*
* Read session exit/termination code
*/
int client_get_session_exitcode(struct tee_client *client, u32 session_id,
s32 *exit_code)
{
struct tee_session *session;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
/* Retrieve error */
*exit_code = session_exitcode(session);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit code %d\n", session_id, *exit_code);
return 0;
}
/* Share a buffer with given TA in SWd */
int client_map_session_wsms(struct tee_client *client, u32 session_id,
struct mc_ioctl_buffer *bufs)
{
struct tee_session *session;
int ret;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
/* Add buffer to the session */
ret = session_wsms_add(session, bufs);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit with %d\n", session_id, ret);
return ret;
}
/* Stop sharing a buffer with SWd */
int client_unmap_session_wsms(struct tee_client *client, u32 session_id,
const struct mc_ioctl_buffer *bufs)
{
struct tee_session *session;
int ret;
/* Find/get session */
session = client_get_session(client, session_id);
if (!session)
return -ENXIO;
/* Remove buffer from session */
ret = session_wsms_remove(session, bufs);
/* Put session */
session_put(session);
mc_dev_devel("session %x, exit with %d\n", session_id, ret);
return ret;
}
/*
* This callback is called on remap
*/
static void cbuf_vm_open(struct vm_area_struct *vmarea)
{
struct cbuf *cbuf = vmarea->vm_private_data;
cbuf_get(cbuf);
}
/*
* This callback is called on unmap
*/
static void cbuf_vm_close(struct vm_area_struct *vmarea)
{
struct cbuf *cbuf = vmarea->vm_private_data;
cbuf_put(cbuf);
}
static struct vm_operations_struct cbuf_vm_ops = {
.open = cbuf_vm_open,
.close = cbuf_vm_close,
};
/*
* Create a cbuf object and add it to client
*/
int client_cbuf_create(struct tee_client *client, u32 len, uintptr_t *addr,
struct vm_area_struct *vmarea)
{
int err = 0;
struct cbuf *cbuf = NULL;
unsigned int order;
if (WARN(!client, "No client available"))
return -EINVAL;
if (WARN(!len, "No len available"))
return -EINVAL;
order = get_order(len);
if (order > MAX_ORDER) {
mc_dev_err("Buffer size too large\n");
return -ENOMEM;
}
/* Allocate buffer descriptor structure */
cbuf = kzalloc(sizeof(*cbuf), GFP_KERNEL);
if (!cbuf)
return -ENOMEM;
/* Increment debug counter */
atomic_inc(&g_ctx.c_cbufs);
/* Allocate buffer */
cbuf->addr = __get_free_pages(GFP_USER | __GFP_ZERO, order);
if (!cbuf->addr) {
kfree(cbuf);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_cbufs);
return -ENOMEM;
}
/* Map to user space if applicable */
if (!client_is_kernel(client)) {
err = cbuf_map(vmarea, cbuf->addr, len, &cbuf->uaddr);
if (err) {
free_pages(cbuf->addr, order);
kfree(cbuf);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_cbufs);
return err;
}
}
/* Init descriptor members */
cbuf->client = client;
cbuf->phys = virt_to_phys((void *)cbuf->addr);
cbuf->len = len;
cbuf->order = order;
kref_init(&cbuf->kref);
INIT_LIST_HEAD(&cbuf->list);
/* Keep cbuf in VMA private data for refcounting (user-space clients) */
if (vmarea) {
vmarea->vm_private_data = cbuf;
vmarea->vm_ops = &cbuf_vm_ops;
}
/* Fill return parameter for k-api */
if (addr)
*addr = cbuf->addr;
/* Get a token on the client */
client_get(client);
/* Add buffer to list */
mutex_lock(&client->cbufs_lock);
list_add_tail(&cbuf->list, &client->cbufs);
mutex_unlock(&client->cbufs_lock);
mc_dev_devel("created cbuf %p: client %p addr %lx uaddr %lx len %u\n",
cbuf, client, cbuf->addr, cbuf->uaddr, cbuf->len);
return err;
}
/*
* Find a contiguous buffer (cbuf) in the cbuf list of given client that
* contains given address and take a reference on it.
* Return pointer to the object, or NULL if not found.
*/
static struct cbuf *cbuf_get_by_addr(struct tee_client *client, uintptr_t addr)
{
struct cbuf *cbuf = NULL, *candidate;
bool is_kernel = client_is_kernel(client);
mutex_lock(&client->cbufs_lock);
list_for_each_entry(candidate, &client->cbufs, list) {
/* Compare to kernel VA or user VA depending on client type */
uintptr_t start = is_kernel ?
candidate->addr : candidate->uaddr;
uintptr_t end = start + candidate->len;
/* Check that (user) cbuf has not been unmapped */
if (!start)
break;
if ((addr >= start) && (addr < end)) {
cbuf = candidate;
break;
}
}
if (cbuf)
cbuf_get(cbuf);
mutex_unlock(&client->cbufs_lock);
return cbuf;
}
/*
* Remove a cbuf object from client, and mark it for freeing.
* Freeing will happen once all current references are released.
*/
int client_cbuf_free(struct tee_client *client, uintptr_t addr)
{
struct cbuf *cbuf = cbuf_get_by_addr(client, addr);
if (!cbuf) {
mc_dev_err("cbuf %lu not found\n", addr);
return -EINVAL;
}
/* Two references to put: the caller's and the one we just took */
cbuf_put(cbuf);
mutex_lock(&client->cbufs_lock);
cbuf->api_freed = true;
mutex_unlock(&client->cbufs_lock);
cbuf_put(cbuf);
return 0;
}
struct tee_mmu *client_mmu_create(struct tee_client *client, uintptr_t va,
u32 len, struct cbuf **cbuf_p)
{
/* Check if buffer is contained in a cbuf */
struct cbuf *cbuf = cbuf_get_by_addr(client, va);
struct task_struct *task = NULL;
struct tee_mmu *mmu;
*cbuf_p = cbuf;
if (cbuf) {
uintptr_t offset;
if (client_is_kernel(client))
offset = va - cbuf->addr;
else
offset = va - cbuf->uaddr;
if ((offset + len) > cbuf->len) {
mc_dev_err("crosses cbuf boundary\n");
cbuf_put(cbuf);
return ERR_PTR(-EINVAL);
}
/* Provide kernel virtual address */
va = cbuf->addr + offset;
} else if (!client_is_kernel(client)) {
/* Provide task if buffer was allocated in user space */
task = current;
}
/* Build MMU table for buffer */
mmu = tee_mmu_create(task, (void *)va, len);
if (IS_ERR_OR_NULL(mmu) && cbuf)
cbuf_put(cbuf);
return mmu;
}
void client_mmu_free(struct tee_client *client, uintptr_t va,
struct tee_mmu *mmu, struct cbuf *cbuf)
{
tee_mmu_delete(mmu);
if (cbuf)
cbuf_put(cbuf);
}
void client_init(void)
{
INIT_LIST_HEAD(&client_ctx.clients);
mutex_init(&client_ctx.clients_lock);
INIT_LIST_HEAD(&client_ctx.closing_clients);
mutex_init(&client_ctx.closing_clients_lock);
}
static inline int cbuf_debug_structs(struct kasnprintf_buf *buf,
struct cbuf *cbuf)
{
return kasnprintf(buf, "\tcbuf %p [%d]: addr %lx uaddr %lx len %u\n",
cbuf, kref_read(&cbuf->kref), cbuf->addr,
cbuf->uaddr, cbuf->len);
}
static int client_debug_structs(struct kasnprintf_buf *buf,
struct tee_client *client, bool is_closing)
{
struct cbuf *cbuf;
struct tee_session *session;
int ret;
if (client->pid)
ret = kasnprintf(buf, "client %p [%d]: %s (%d)%s\n",
client, kref_read(&client->kref),
client->comm, client->pid,
is_closing ? " <closing>" : "");
else
ret = kasnprintf(buf, "client %p [%d]: [kernel]%s\n",
client, kref_read(&client->kref),
is_closing ? " <closing>" : "");
if (ret < 0)
return ret;
/* Buffers */
mutex_lock(&client->cbufs_lock);
if (list_empty(&client->cbufs))
goto done_cbufs;
list_for_each_entry(cbuf, &client->cbufs, list) {
ret = cbuf_debug_structs(buf, cbuf);
if (ret < 0)
goto done_cbufs;
}
done_cbufs:
mutex_unlock(&client->cbufs_lock);
if (ret < 0)
return ret;
/* Sessions */
mutex_lock(&client->sessions_lock);
list_for_each_entry(session, &client->sessions, list) {
ret = session_debug_structs(buf, session, false);
if (ret < 0)
goto done_sessions;
}
list_for_each_entry(session, &client->closing_sessions, list) {
ret = session_debug_structs(buf, session, true);
if (ret < 0)
goto done_sessions;
}
done_sessions:
mutex_unlock(&client->sessions_lock);
if (ret < 0)
return ret;
return 0;
}
int clients_debug_structs(struct kasnprintf_buf *buf)
{
struct tee_client *client;
ssize_t ret = 0;
mutex_lock(&client_ctx.clients_lock);
list_for_each_entry(client, &client_ctx.clients, list) {
ret = client_debug_structs(buf, client, false);
if (ret < 0)
break;
}
mutex_unlock(&client_ctx.clients_lock);
if (ret < 0)
return ret;
mutex_lock(&client_ctx.closing_clients_lock);
list_for_each_entry(client, &client_ctx.closing_clients, list) {
ret = client_debug_structs(buf, client, true);
if (ret < 0)
break;
}
mutex_unlock(&client_ctx.closing_clients_lock);
return ret;
}

View file

@ -0,0 +1,99 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _CLIENT_H_
#define _CLIENT_H_
#include <linux/list.h>
#include <linux/sched.h> /* TASK_COMM_LEN */
struct tee_object;
struct tee_client {
/* PID of task that opened the device, 0 if kernel */
pid_t pid;
/* Command for task*/
char comm[TASK_COMM_LEN];
/* Number of references kept to this object */
struct kref kref;
/* List of contiguous buffers allocated by mcMallocWsm for the client */
struct list_head cbufs;
struct mutex cbufs_lock; /* lock for the cbufs list */
/* List of TA sessions opened by this client */
struct list_head sessions;
struct list_head closing_sessions;
struct mutex sessions_lock; /* sessions list + closing */
/* Client state */
bool closing;
/* The list entry to attach to "ctx.clients" list */
struct list_head list;
};
/* Client */
struct tee_client *client_create(bool is_from_kernel);
static inline void client_get(struct tee_client *client)
{
kref_get(&client->kref);
}
void client_put(struct tee_client *client);
int client_freeze(struct tee_client *client);
void client_close(struct tee_client *client);
/* All clients */
void clients_kill_sessions(void);
/* Session */
int client_open_session(struct tee_client *client, u32 *session_id,
const struct mc_uuid_t *uuid, uintptr_t tci,
size_t tci_len, bool is_gp_uuid,
struct mc_identity *identity);
int client_open_trustlet(struct tee_client *client, u32 *session_id, u32 spid,
uintptr_t trustlet, size_t trustlet_len,
uintptr_t tci, size_t tci_len);
int client_add_session(struct tee_client *client,
const struct tee_object *obj, uintptr_t tci, size_t len,
u32 *p_sid, bool is_gp_uuid,
struct mc_identity *identity);
int client_remove_session(struct tee_client *client, u32 session_id);
int client_notify_session(struct tee_client *client, u32 session_id);
int client_waitnotif_session(struct tee_client *client, u32 session_id,
s32 timeout);
int client_get_session_exitcode(struct tee_client *client, u32 session_id,
s32 *exit_code);
int client_map_session_wsms(struct tee_client *client, u32 session_id,
struct mc_ioctl_buffer *bufs);
int client_unmap_session_wsms(struct tee_client *client, u32 session_id,
const struct mc_ioctl_buffer *bufs);
/* Contiguous buffer */
int client_cbuf_create(struct tee_client *client, u32 len, uintptr_t *addr,
struct vm_area_struct *vmarea);
int client_cbuf_free(struct tee_client *client, uintptr_t addr);
/* MMU */
struct cbuf;
struct tee_mmu *client_mmu_create(struct tee_client *client, uintptr_t buf,
u32 len, struct cbuf **cbuf);
void client_mmu_free(struct tee_client *client, uintptr_t buf,
struct tee_mmu *mmu, struct cbuf *cbuf);
/* Global */
void client_init(void);
/* Debug */
int clients_debug_structs(struct kasnprintf_buf *buf);
#endif /* _CLIENT_H_ */

View file

@ -0,0 +1,431 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/sched.h>
#include <linux/list.h>
#include "public/mc_linux.h"
#include "public/mc_admin.h"
#include "public/mobicore_driver_api.h"
#include "main.h"
#include "client.h"
enum mc_result convert(int err)
{
switch (-err) {
case 0:
return MC_DRV_OK;
case ENOMSG:
return MC_DRV_NO_NOTIFICATION;
case EBADMSG:
return MC_DRV_ERR_NOTIFICATION;
case EAGAIN:
return MC_DRV_ERR_OUT_OF_RESOURCES;
case EHOSTDOWN:
return MC_DRV_ERR_INIT;
case ENODEV:
return MC_DRV_ERR_UNKNOWN_DEVICE;
case ENXIO:
return MC_DRV_ERR_UNKNOWN_SESSION;
case EPERM:
return MC_DRV_ERR_INVALID_OPERATION;
case EBADE:
return MC_DRV_ERR_INVALID_RESPONSE;
case ETIME:
return MC_DRV_ERR_TIMEOUT;
case ENOMEM:
return MC_DRV_ERR_NO_FREE_MEMORY;
case EUCLEAN:
return MC_DRV_ERR_FREE_MEMORY_FAILED;
case ENOTEMPTY:
return MC_DRV_ERR_SESSION_PENDING;
case EHOSTUNREACH:
return MC_DRV_ERR_DAEMON_UNREACHABLE;
case ENOENT:
return MC_DRV_ERR_INVALID_DEVICE_FILE;
case EINVAL:
return MC_DRV_ERR_INVALID_PARAMETER;
case EPROTO:
return MC_DRV_ERR_KERNEL_MODULE;
case EADDRINUSE:
return MC_DRV_ERR_BULK_MAPPING;
case EADDRNOTAVAIL:
return MC_DRV_ERR_BULK_UNMAPPING;
case ECOMM:
return MC_DRV_INFO_NOTIFICATION;
case EUNATCH:
return MC_DRV_ERR_NQ_FAILED;
default:
mc_dev_devel("error is %d\n", err);
return MC_DRV_ERR_UNKNOWN;
}
}
static inline bool is_valid_device(u32 device_id)
{
return MC_DEVICE_ID_DEFAULT == device_id;
}
static struct tee_client *client;
static int open_count;
static DEFINE_MUTEX(dev_mutex); /* Lock for the device */
static bool clientlib_client_get(void)
{
int ret = true;
mutex_lock(&dev_mutex);
if (!client)
ret = false;
else
client_get(client);
mutex_unlock(&dev_mutex);
return ret;
}
static void clientlib_client_put(void)
{
mutex_lock(&dev_mutex);
client_put(client);
mutex_unlock(&dev_mutex);
}
enum mc_result mc_open_device(u32 device_id)
{
enum mc_result mc_result = MC_DRV_OK;
/* Check parameters */
if (!is_valid_device(device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
mutex_lock(&dev_mutex);
if (!open_count)
client = client_create(true);
if (client) {
open_count++;
mc_dev_devel("Successfully opened the device\n");
} else {
mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
mc_dev_devel("Could not open device\n");
}
mutex_unlock(&dev_mutex);
return mc_result;
}
EXPORT_SYMBOL(mc_open_device);
enum mc_result mc_close_device(u32 device_id)
{
enum mc_result mc_result = MC_DRV_OK;
/* Check parameters */
if (!is_valid_device(device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
mutex_lock(&dev_mutex);
if (!client) {
mc_result = MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
goto end;
}
if (open_count > 1) {
open_count--;
goto end;
}
/* Check sessions and freeze client */
mc_result = convert(client_freeze(client));
if (MC_DRV_OK != mc_result)
goto end;
/* Close the device */
client_close(client);
client = NULL;
open_count = 0;
end:
mutex_unlock(&dev_mutex);
return mc_result;
}
EXPORT_SYMBOL(mc_close_device);
enum mc_result mc_open_session(struct mc_session_handle *session,
const struct mc_uuid_t *uuid, u8 *tci, u32 len)
{
struct mc_identity identity = {
.login_type = TEEC_LOGIN_PUBLIC,
};
enum mc_result ret;
/* Check parameters */
if (!session || !uuid)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_open_session(client, &session->session_id, uuid,
(uintptr_t)tci, len, false,
&identity));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_open_session);
enum mc_result mc_open_trustlet(struct mc_session_handle *session, u32 spid,
u8 *trustlet, u32 trustlet_len,
u8 *tci, u32 len)
{
enum mc_result ret;
/* Check parameters */
if (!session || !trustlet)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_open_trustlet(client, &session->session_id, spid,
(uintptr_t)trustlet, trustlet_len,
(uintptr_t)tci, len));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_open_trustlet);
enum mc_result mc_close_session(struct mc_session_handle *session)
{
enum mc_result ret;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_remove_session(client, session->session_id));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_close_session);
enum mc_result mc_notify(struct mc_session_handle *session)
{
enum mc_result ret;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_notify_session(client, session->session_id));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_notify);
enum mc_result mc_wait_notification(struct mc_session_handle *session,
s32 timeout)
{
enum mc_result ret;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_waitnotif_session(client, session->session_id,
timeout));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_wait_notification);
enum mc_result mc_malloc_wsm(u32 device_id, u32 align, u32 len, u8 **wsm,
u32 wsm_flags)
{
enum mc_result ret;
uintptr_t va;
/* Check parameters */
if (!is_valid_device(device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!len)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!wsm)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_cbuf_create(client, len, &va, NULL));
if (ret == MC_DRV_OK)
*wsm = (u8 *)va;
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_malloc_wsm);
enum mc_result mc_free_wsm(u32 device_id, u8 *wsm)
{
enum mc_result ret;
uintptr_t va = (uintptr_t)wsm;
/* Check parameters */
if (!is_valid_device(device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_cbuf_free(client, va));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_free_wsm);
enum mc_result mc_map(struct mc_session_handle *session, void *address,
u32 length, struct mc_bulk_map *map_info)
{
enum mc_result ret;
struct mc_ioctl_buffer bufs[MC_MAP_MAX];
u32 i;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!map_info)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
bufs[0].va = (uintptr_t)address;
bufs[0].len = length;
for (i = 1; i < MC_MAP_MAX; i++)
bufs[i].va = 0;
ret = convert(client_map_session_wsms(client, session->session_id,
bufs));
if (ret == MC_DRV_OK) {
map_info->secure_virt_addr = bufs[0].sva;
map_info->secure_virt_len = bufs[0].len;
}
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_map);
enum mc_result mc_unmap(struct mc_session_handle *session, void *address,
struct mc_bulk_map *map_info)
{
enum mc_result ret;
struct mc_ioctl_buffer bufs[MC_MAP_MAX];
u32 i;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!map_info)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
bufs[0].va = (uintptr_t)address;
bufs[0].len = map_info->secure_virt_len;
bufs[0].sva = map_info->secure_virt_addr;
for (i = 1; i < MC_MAP_MAX; i++)
bufs[i].va = 0;
ret = convert(client_unmap_session_wsms(client, session->session_id,
bufs));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_unmap);
enum mc_result mc_get_session_error_code(struct mc_session_handle *session,
s32 *exit_code)
{
enum mc_result ret;
/* Check parameters */
if (!session)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!is_valid_device(session->device_id))
return MC_DRV_ERR_UNKNOWN_DEVICE;
if (!exit_code)
return MC_DRV_ERR_INVALID_PARAMETER;
if (!clientlib_client_get())
return MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN;
/* Call core api */
ret = convert(client_get_session_exitcode(client, session->session_id,
exit_code));
clientlib_client_put();
return ret;
}
EXPORT_SYMBOL(mc_get_session_error_code);

View file

@ -0,0 +1,161 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "platform.h"
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/of.h>
#include "main.h"
#include "clock.h"
static struct clk_context {
struct clk *mc_ce_iface_clk;
struct clk *mc_ce_core_clk;
struct clk *mc_ce_bus_clk;
struct clk *mc_ce_core_src_clk;
} clk_ctx;
int mc_clock_init(void)
{
int ret = 0;
#ifdef MC_CLOCK_CORESRC_DEFAULTRATE
int core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
/* Get core clk src */
clk_ctx.mc_ce_core_src_clk = clk_get(g_ctx.mcd, "core_clk_src");
if (IS_ERR(clk_ctx.mc_ce_core_src_clk)) {
ret = PTR_ERR(clk_ctx.mc_ce_core_src_clk);
mc_dev_err("cannot get core src clock: %d\n", ret);
goto error;
}
#ifdef MC_CRYPTO_CLOCK_CORESRC_PROPNAME
if (of_property_read_u32(g_ctx.mcd->of_node,
MC_CRYPTO_CLOCK_CORESRC_PROPNAME,
&core_src_rate)) {
core_src_rate = MC_CLOCK_CORESRC_DEFAULTRATE;
mc_dev_err("cannot get ce clock frequency from DT, use %d\n",
core_src_rate);
}
#endif /* MC_CRYPTO_CLOCK_CORESRC_PROPNAME */
ret = clk_set_rate(clk_ctx.mc_ce_core_src_clk, core_src_rate);
if (ret) {
clk_put(clk_ctx.mc_ce_core_src_clk);
clk_ctx.mc_ce_core_src_clk = NULL;
mc_dev_err("cannot set core clock src rate: %d\n", ret);
ret = -EIO;
goto error;
}
#endif /* MC_CLOCK_CORESRC_DEFAULTRATE */
/* Get core clk */
clk_ctx.mc_ce_core_clk = clk_get(g_ctx.mcd, "core_clk");
if (IS_ERR(clk_ctx.mc_ce_core_clk)) {
ret = PTR_ERR(clk_ctx.mc_ce_core_clk);
mc_dev_err("cannot get core clock: %d\n", ret);
goto error;
}
/* Get Interface clk */
clk_ctx.mc_ce_iface_clk = clk_get(g_ctx.mcd, "iface_clk");
if (IS_ERR(clk_ctx.mc_ce_iface_clk)) {
clk_put(clk_ctx.mc_ce_core_clk);
ret = PTR_ERR(clk_ctx.mc_ce_iface_clk);
mc_dev_err("cannot get iface clock: %d\n", ret);
goto error;
}
/* Get AXI clk */
clk_ctx.mc_ce_bus_clk = clk_get(g_ctx.mcd, "bus_clk");
if (IS_ERR(clk_ctx.mc_ce_bus_clk)) {
clk_put(clk_ctx.mc_ce_iface_clk);
clk_put(clk_ctx.mc_ce_core_clk);
ret = PTR_ERR(clk_ctx.mc_ce_bus_clk);
mc_dev_err("cannot get AXI bus clock: %d\n", ret);
goto error;
}
return ret;
error:
clk_ctx.mc_ce_core_clk = NULL;
clk_ctx.mc_ce_iface_clk = NULL;
clk_ctx.mc_ce_bus_clk = NULL;
clk_ctx.mc_ce_core_src_clk = NULL;
return ret;
}
void mc_clock_exit(void)
{
if (clk_ctx.mc_ce_iface_clk)
clk_put(clk_ctx.mc_ce_iface_clk);
if (clk_ctx.mc_ce_core_clk)
clk_put(clk_ctx.mc_ce_core_clk);
if (clk_ctx.mc_ce_bus_clk)
clk_put(clk_ctx.mc_ce_bus_clk);
if (clk_ctx.mc_ce_core_src_clk)
clk_put(clk_ctx.mc_ce_core_src_clk);
}
int mc_clock_enable(void)
{
int rc;
rc = clk_prepare_enable(clk_ctx.mc_ce_core_clk);
if (rc) {
mc_dev_err("cannot enable core clock\n");
goto err_core;
}
rc = clk_prepare_enable(clk_ctx.mc_ce_iface_clk);
if (rc) {
mc_dev_err("cannot enable interface clock\n");
goto err_iface;
}
rc = clk_prepare_enable(clk_ctx.mc_ce_bus_clk);
if (rc) {
mc_dev_err("cannot enable bus clock\n");
goto err_bus;
}
return 0;
err_bus:
clk_disable_unprepare(clk_ctx.mc_ce_iface_clk);
err_iface:
clk_disable_unprepare(clk_ctx.mc_ce_core_clk);
err_core:
return rc;
}
void mc_clock_disable(void)
{
if (clk_ctx.mc_ce_iface_clk)
clk_disable_unprepare(clk_ctx.mc_ce_iface_clk);
if (clk_ctx.mc_ce_core_clk)
clk_disable_unprepare(clk_ctx.mc_ce_core_clk);
if (clk_ctx.mc_ce_bus_clk)
clk_disable_unprepare(clk_ctx.mc_ce_bus_clk);
}
#endif /* MC_CRYPTO_CLOCK_MANAGEMENT */

View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_CLOCK_H_
#define _MC_CLOCK_H_
#include "platform.h" /* MC_CRYPTO_CLOCK_MANAGEMENT */
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
/* Initialize secure crypto clocks */
int mc_clock_init(void);
/* Free secure crypto clocks */
void mc_clock_exit(void);
/* Enable secure crypto clocks */
int mc_clock_enable(void);
/* Disable secure crypto clocks */
void mc_clock_disable(void);
#else /* MC_CRYPTO_CLOCK_MANAGEMENT */
static inline int mc_clock_init(void)
{
return 0;
}
static inline void mc_clock_exit(void)
{
}
static inline int mc_clock_enable(void)
{
return 0;
}
static inline void mc_clock_disable(void)
{
}
#endif /* !MC_CRYPTO_CLOCK_MANAGEMENT */
#endif /* _MC_CLOCK_H_ */

View file

@ -0,0 +1,687 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include "public/mc_linux.h"
#include "public/mc_linux_api.h"
#include "mci/mcifc.h"
#include "platform.h" /* MC_FASTCALL_WORKER_THREAD and more */
#include "main.h"
#include "clock.h" /* mc_clock_enable, mc_clock_disable */
#include "fastcall.h"
/* ExySp: Lock for core switch processing */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
struct mutex core_switch_lock;
uint8_t core_status = 0xFF;
static int disable_local_timer;
int mc_timer(void);
void mc_set_schedule_policy(int core);
int __mc_switch_core(u32 core_num);
#endif
struct fastcall_work {
#ifdef MC_FASTCALL_WORKER_THREAD
struct kthread_work work;
#else
struct work_struct work;
#endif
void *data;
};
/* generic fast call parameters */
union mc_fc_generic {
struct mc_fc_as_in {
u32 cmd;
u32 param[3];
} as_in;
struct {
u32 resp;
u32 ret;
u32 param[2];
} as_out;
};
/* fast call init */
union mc_fc_init {
union mc_fc_generic as_generic;
struct {
u32 cmd;
u32 base;
u32 nq_info;
u32 mcp_info;
} as_in;
struct {
u32 resp;
u32 ret;
u32 flags;
u32 rfu;
} as_out;
};
/* fast call info parameters */
union mc_fc_info {
union mc_fc_generic as_generic;
struct {
u32 cmd;
u32 ext_info_id;
u32 rfu[2];
} as_in;
struct {
u32 resp;
u32 ret;
u32 state;
u32 ext_info;
} as_out;
};
#ifdef TBASE_CORE_SWITCHER
/* fast call switch Core parameters */
union mc_fc_swich_core {
union mc_fc_generic as_generic;
struct {
u32 cmd;
u32 core_id;
u32 rfu[2];
} as_in;
struct {
u32 resp;
u32 ret;
u32 state;
u32 ext_info;
} as_out;
};
#endif
#ifdef MC_FASTCALL_WORKER_THREAD
static struct task_struct *fastcall_thread;
static DEFINE_KTHREAD_WORKER(fastcall_worker);
#endif
/* Structure to log SMC calls */
struct smc_log_entry {
u64 cpu_clk;
struct mc_fc_as_in as_in;
};
#define SMC_LOG_SIZE 256
static struct smc_log_entry smc_log[SMC_LOG_SIZE];
static int smc_log_index;
/*
* _smc() - fast call to MobiCore
*
* @data: pointer to fast call data
*/
static inline int _smc(union mc_fc_generic *mc_fc_generic)
{
if (!mc_fc_generic)
return -EINVAL;
/* Log SMC call */
smc_log[smc_log_index].cpu_clk = local_clock();
smc_log[smc_log_index].as_in = mc_fc_generic->as_in;
if (++smc_log_index >= SMC_LOG_SIZE)
smc_log_index = 0;
#ifdef MC_SMC_FASTCALL
return smc_fastcall(mc_fc_generic, sizeof(*mc_fc_generic));
#else /* MC_SMC_FASTCALL */
{
#ifdef CONFIG_ARM64
/* SMC expect values in x0-x3 */
register u64 reg0 __asm__("x0") = mc_fc_generic->as_in.cmd;
register u64 reg1 __asm__("x1") = mc_fc_generic->as_in.param[0];
register u64 reg2 __asm__("x2") = mc_fc_generic->as_in.param[1];
register u64 reg3 __asm__("x3") = mc_fc_generic->as_in.param[2];
/*
* According to AARCH64 SMC Calling Convention (ARM DEN 0028A),
* section 3.1: registers x4-x17 are unpredictable/scratch
* registers. So we have to make sure that the compiler does
* not allocate any of those registers by letting him know that
* the asm code might clobber them.
*/
__asm__ volatile (
"smc #0\n"
: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
:
: "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
"x12", "x13", "x14", "x15", "x16", "x17"
);
#else /* CONFIG_ARM64 */
/* SMC expect values in r0-r3 */
register u32 reg0 __asm__("r0") = mc_fc_generic->as_in.cmd;
register u32 reg1 __asm__("r1") = mc_fc_generic->as_in.param[0];
register u32 reg2 __asm__("r2") = mc_fc_generic->as_in.param[1];
register u32 reg3 __asm__("r3") = mc_fc_generic->as_in.param[2];
__asm__ volatile (
#ifdef MC_ARCH_EXTENSION_SEC
/* This pseudo op is supported and required from
* binutils 2.21 on */
".arch_extension sec\n"
#endif /* MC_ARCH_EXTENSION_SEC */
"smc #0\n"
: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
);
#ifdef __ARM_VE_A9X4_QEMU__
/* Qemu does not return to the address following the SMC
* instruction so we have to insert several nop instructions to
* workaround this Qemu bug. */
__asm__ volatile (
"nop\n"
"nop\n"
"nop\n"
"nop"
);
#endif /* __ARM_VE_A9X4_QEMU__ */
#endif /* !CONFIG_ARM64 */
/* set response */
mc_fc_generic->as_out.resp = reg0;
mc_fc_generic->as_out.ret = reg1;
mc_fc_generic->as_out.param[0] = reg2;
mc_fc_generic->as_out.param[1] = reg3;
}
return 0;
#endif /* !MC_SMC_FASTCALL */
}
#ifdef TBASE_CORE_SWITCHER
static u32 active_cpu;
#ifdef CONFIG_SECURE_OS_BOOSTER_API
/* ExySp: for sos performance */
void mc_set_schedule_policy(int core)
{
if (core == DEFAULT_BIG_CORE)
disable_local_timer = 1;
else
disable_local_timer = 0;
return;
}
#endif
#ifdef MC_FASTCALL_WORKER_THREAD
#ifdef CONFIG_SECURE_OS_BOOSTER_API
/* ExySp: for sos performance */
static void mc_cpu_offline(int cpu)
{
int i;
mutex_lock(&core_switch_lock);
core_status &= ~(0x1 << cpu);
if (active_cpu != cpu) {
mc_dev_devel("not active CPU, no action taken\n");
mutex_unlock(&core_switch_lock);
return;
}
/* Chose the first online CPU and switch! */
for_each_online_cpu(i) {
if (cpu != i) {
mc_dev_devel("CPU %d is dying, switching to %d\n",
cpu, i);
mc_set_schedule_policy(DEFAULT_LITTLE_CORE);
__mc_switch_core(i);
break;
}
mc_dev_devel("Skipping CPU %d\n", cpu);
}
mutex_unlock(&core_switch_lock);
}
void mc_cpu_online(int cpu)
{
mutex_lock(&core_switch_lock);
core_status |= (0x1 << cpu);
mutex_unlock(&core_switch_lock);
}
#else
static void mc_cpu_offline(int cpu)
{
int i;
if (active_cpu != cpu) {
mc_dev_devel("not active CPU, no action taken\n");
return;
}
/* Chose the first online CPU and switch! */
for_each_online_cpu(i) {
if (cpu != i) {
mc_dev_devel("CPU %d is dying, switching to %d\n",
cpu, i);
mc_switch_core(i);
break;
}
mc_dev_devel("Skipping CPU %d\n", cpu);
}
}
#endif
static int mobicore_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action) {
#ifdef CONFIG_SECURE_OS_BOOSTER_API
/* ExySp: for sos performance */
case CPU_ONLINE:
mc_cpu_online(cpu);
break;
#endif
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
mc_cpu_offline(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
break;
}
return NOTIFY_OK;
}
static struct notifier_block mobicore_cpu_notifer = {
.notifier_call = mobicore_cpu_callback,
};
#endif /* MC_FASTCALL_WORKER_THREAD */
static cpumask_t mc_exec_core_switch(union mc_fc_generic *mc_fc_generic)
{
cpumask_t cpu;
u32 new_cpu;
u32 cpu_id[] = CPU_IDS;
new_cpu = mc_fc_generic->as_in.param[0];
mc_fc_generic->as_in.param[0] = cpu_id[mc_fc_generic->as_in.param[0]];
if (_smc(mc_fc_generic) != 0 || mc_fc_generic->as_out.ret != 0) {
mc_dev_info("CoreSwap failed %d -> %d (cpu %d still active)\n",
raw_smp_processor_id(),
mc_fc_generic->as_in.param[0],
raw_smp_processor_id());
} else {
active_cpu = new_cpu;
mc_dev_info("CoreSwap ok %d -> %d\n",
raw_smp_processor_id(), active_cpu);
}
cpumask_clear(&cpu);
cpumask_set_cpu(active_cpu, &cpu);
return cpu;
}
#else /* TBASE_CORE_SWITCHER */
static inline cpumask_t mc_exec_core_switch(union mc_fc_generic *mc_fc_generic)
{
return CPU_MASK_CPU0;
}
#endif /* !TBASE_CORE_SWITCHER */
#ifdef MC_FASTCALL_WORKER_THREAD
static void fastcall_work_func(struct kthread_work *work)
#else
static void fastcall_work_func(struct work_struct *work)
#endif
{
struct fastcall_work *fc_work =
container_of(work, struct fastcall_work, work);
union mc_fc_generic *mc_fc_generic = fc_work->data;
/* ExySp: for sos performance */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
int irq_check_cnt = 0;
struct irq_desc *desc = irq_to_desc(MC_INTR_LOCAL_TIMER);
#endif
if (!mc_fc_generic)
return;
mc_clock_enable();
if (mc_fc_generic->as_in.cmd == MC_FC_SWAP_CPU) {
#ifdef MC_FASTCALL_WORKER_THREAD
cpumask_t new_msk = mc_exec_core_switch(mc_fc_generic);
set_cpus_allowed(fastcall_thread, new_msk);
#else
mc_exec_core_switch(mc_fc_generic);
#endif
} else {
/* ExySp: for sos performance */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
if (active_cpu == DEFAULT_BIG_CORE && disable_local_timer) {
irq_check_cnt++;
disable_irq(MC_INTR_LOCAL_TIMER);
mc_timer();
}
#endif
_smc(mc_fc_generic);
#ifdef CONFIG_SECURE_OS_BOOSTER_API
if (irq_check_cnt) {
if (desc->depth != 0)
enable_irq(MC_INTR_LOCAL_TIMER);
}
#endif
}
mc_clock_disable();
}
static bool mc_fastcall(void *data)
{
#ifdef MC_FASTCALL_WORKER_THREAD
struct fastcall_work fc_work = {
KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
.data = data,
};
if (!queue_kthread_work(&fastcall_worker, &fc_work.work))
return false;
/* If work is queued or executing, wait for it to finish execution */
flush_kthread_work(&fc_work.work);
#else
struct fastcall_work fc_work = {
.data = data,
};
INIT_WORK(&fc_work.work, fastcall_work_func);
if (!schedule_work_on(0, &fc_work.work))
return false;
flush_work(&fc_work.work);
#endif
return true;
}
int mc_fastcall_init(void)
{
int ret = mc_clock_init();
if (ret)
return ret;
#ifdef MC_FASTCALL_WORKER_THREAD
fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
"mc_fastcall");
if (IS_ERR(fastcall_thread)) {
ret = PTR_ERR(fastcall_thread);
fastcall_thread = NULL;
mc_dev_err("cannot create fastcall wq: %d\n", ret);
return ret;
}
/* this thread MUST run on CPU 0 at startup */
set_cpus_allowed(fastcall_thread, CPU_MASK_CPU0);
wake_up_process(fastcall_thread);
#ifdef TBASE_CORE_SWITCHER
ret = register_cpu_notifier(&mobicore_cpu_notifer);
#endif
#endif /* MC_FASTCALL_WORKER_THREAD */
/* ExySp: init lock for core switch processing */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
mutex_init(&core_switch_lock);
#endif
return ret;
}
void mc_fastcall_exit(void)
{
#ifdef MC_FASTCALL_WORKER_THREAD
if (!IS_ERR_OR_NULL(fastcall_thread)) {
#ifdef TBASE_CORE_SWITCHER
unregister_cpu_notifier(&mobicore_cpu_notifer);
#endif
kthread_stop(fastcall_thread);
fastcall_thread = NULL;
}
#endif /* MC_FASTCALL_WORKER_THREAD */
mc_clock_exit();
}
/*
* convert fast call return code to linux driver module error code
*/
static int convert_fc_ret(u32 ret)
{
switch (ret) {
case MC_FC_RET_OK:
return 0;
case MC_FC_RET_ERR_INVALID:
return -EINVAL;
case MC_FC_RET_ERR_ALREADY_INITIALIZED:
return -EBUSY;
default:
return -EFAULT;
}
}
int mc_fc_init(uintptr_t base_pa, ptrdiff_t off, size_t q_len, size_t buf_len)
{
#ifdef CONFIG_ARM64
u32 base_high = (u32)(base_pa >> 32);
#else
u32 base_high = 0;
#endif
union mc_fc_init fc_init;
/* Call the INIT fastcall to setup MobiCore initialization */
memset(&fc_init, 0, sizeof(fc_init));
fc_init.as_in.cmd = MC_FC_INIT;
/* base address of mci buffer PAGE_SIZE (default is 4KB) aligned */
fc_init.as_in.base = (u32)base_pa;
/* notification buffer start/length [16:16] [start, length] */
fc_init.as_in.nq_info =
((base_high & 0xFFFF) << 16) | (q_len & 0xFFFF);
/* mcp buffer start/length [16:16] [start, length] */
fc_init.as_in.mcp_info = (off << 16) | (buf_len & 0xFFFF);
mc_dev_devel("cmd=%d, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x\n",
fc_init.as_in.cmd, fc_init.as_in.base,
fc_init.as_in.nq_info, fc_init.as_in.mcp_info);
mc_fastcall(&fc_init.as_generic);
mc_dev_devel("out cmd=0x%08x, ret=0x%08x\n", fc_init.as_out.resp,
fc_init.as_out.ret);
if (fc_init.as_out.flags & MC_FC_INIT_FLAG_LPAE)
g_ctx.f_lpae = true;
return convert_fc_ret(fc_init.as_out.ret);
}
int mc_fc_info(u32 ext_info_id, u32 *state, u32 *ext_info)
{
union mc_fc_info fc_info;
int ret = 0;
memset(&fc_info, 0, sizeof(fc_info));
fc_info.as_in.cmd = MC_FC_INFO;
fc_info.as_in.ext_info_id = ext_info_id;
mc_fastcall(&fc_info.as_generic);
ret = convert_fc_ret(fc_info.as_out.ret);
if (ret) {
if (state)
*state = MC_STATUS_NOT_INITIALIZED;
if (ext_info)
*ext_info = 0;
mc_dev_err("code %d for idx %d\n", ret, ext_info_id);
} else {
if (state)
*state = fc_info.as_out.state;
if (ext_info)
*ext_info = fc_info.as_out.ext_info;
}
return ret;
}
int mc_fc_mem_trace(phys_addr_t buffer, u32 size)
{
union mc_fc_generic mc_fc_generic;
memset(&mc_fc_generic, 0, sizeof(mc_fc_generic));
mc_fc_generic.as_in.cmd = MC_FC_MEM_TRACE;
mc_fc_generic.as_in.param[0] = (u32)buffer;
#ifdef CONFIG_ARM64
mc_fc_generic.as_in.param[1] = (u32)(buffer >> 32);
#endif
mc_fc_generic.as_in.param[2] = size;
mc_fastcall(&mc_fc_generic);
return convert_fc_ret(mc_fc_generic.as_out.ret);
}
int mc_fc_nsiq(void)
{
union mc_fc_generic fc;
int ret;
memset(&fc, 0, sizeof(fc));
fc.as_in.cmd = MC_SMC_N_SIQ;
mc_fastcall(&fc);
ret = convert_fc_ret(fc.as_out.ret);
if (ret)
mc_dev_err("failed: %d\n", ret);
return ret;
}
int mc_fc_yield(void)
{
union mc_fc_generic fc;
int ret;
memset(&fc, 0, sizeof(fc));
fc.as_in.cmd = MC_SMC_N_YIELD;
mc_fastcall(&fc);
ret = convert_fc_ret(fc.as_out.ret);
if (ret)
mc_dev_err("failed: %d\n", ret);
return ret;
}
static int show_smc_log_entry(struct kasnprintf_buf *buf,
struct smc_log_entry *entry)
{
return kasnprintf(buf, "%20llu %10d 0x%08x 0x%08x 0x%08x\n",
entry->cpu_clk, (s32)entry->as_in.cmd,
entry->as_in.param[0], entry->as_in.param[1],
entry->as_in.param[2]);
}
/*
* Dump SMC log circular buffer, starting from oldest command. It is assumed
* nothing goes in any more at this point.
*/
int mc_fastcall_debug_smclog(struct kasnprintf_buf *buf)
{
int i, ret = 0;
ret = kasnprintf(buf, "%20s %10s %-10s %-10s %-10s\n",
"CPU clock", "command", "param1", "param2", "param3");
if (ret < 0)
return ret;
if (smc_log[smc_log_index].cpu_clk)
/* Buffer has wrapped around, dump end (oldest records) */
for (i = smc_log_index; i < SMC_LOG_SIZE; i++) {
ret = show_smc_log_entry(buf, &smc_log[i]);
if (ret < 0)
return ret;
}
/* Dump first records */
for (i = 0; i < smc_log_index; i++) {
ret = show_smc_log_entry(buf, &smc_log[i]);
if (ret < 0)
return ret;
}
return ret;
}
#ifdef TBASE_CORE_SWITCHER
u32 mc_active_core(void)
{
return active_cpu;
}
/* ExySp: for sos performance */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
int __mc_switch_core(u32 core_num)
#else
int mc_switch_core(u32 core_num)
#endif
{
s32 ret = 0;
union mc_fc_swich_core fc_switch_core;
if (!cpu_online(core_num))
return 1;
memset(&fc_switch_core, 0, sizeof(fc_switch_core));
fc_switch_core.as_in.cmd = MC_FC_SWAP_CPU;
if (core_num < COUNT_OF_CPUS)
fc_switch_core.as_in.core_id = core_num;
else
fc_switch_core.as_in.core_id = 0;
mc_dev_devel("<- cmd=0x%08x, core_id=0x%08x\n",
fc_switch_core.as_in.cmd, fc_switch_core.as_in.core_id);
/* ExySp: for sos performance */
mc_dev_info("<- core_num=0x%08x, active_cpu=0x%08x\n",
core_num, active_cpu);
mc_fastcall(&fc_switch_core.as_generic);
ret = convert_fc_ret(fc_switch_core.as_out.ret);
mc_dev_devel("exit with %d/0x%08X\n", ret, ret);
return ret;
}
/* ExySp: for sos performance */
#ifdef CONFIG_SECURE_OS_BOOSTER_API
int mc_switch_core(uint32_t core_num)
{
int ret;
mutex_lock(&core_switch_lock);
if (!(core_status & (0x1 << core_num))){
mc_dev_devel("Core status... core #%d is off line\n", core_num);
mutex_unlock(&core_switch_lock);
return 1;
}
ret = __mc_switch_core(core_num);
mutex_unlock(&core_switch_lock);
return ret;
}
#endif
#endif

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _TBASE_FASTCALL_H_
#define _TBASE_FASTCALL_H_
/* Use the arch_extension sec pseudo op before switching to secure world */
#if defined(__GNUC__) && \
defined(__GNUC_MINOR__) && \
defined(__GNUC_PATCHLEVEL__) && \
((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)) \
>= 40502
#ifndef CONFIG_ARM64
#define MC_ARCH_EXTENSION_SEC
#endif
#endif
int mc_fc_init(uintptr_t base_pa, ptrdiff_t off, size_t q_len, size_t buf_len);
int mc_fc_info(u32 ext_info_id, u32 *state, u32 *ext_info);
int mc_fc_mem_trace(phys_addr_t buffer, u32 size);
int mc_fc_nsiq(void);
int mc_fc_yield(void);
int mc_fastcall_init(void);
void mc_fastcall_exit(void);
int mc_fastcall_debug_smclog(struct kasnprintf_buf *buf);
#endif /* _TBASE_FASTCALL_H_ */

View file

@ -0,0 +1,256 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include "main.h"
#include "fastcall.h"
#include "logging.h"
/* Supported log buffer version */
#define MC_LOG_VERSION 2
/* Default length of the log ring buffer 256KiB */
#define LOG_BUF_ORDER 6
/* Max Len of a log line for printing */
#define LOG_LINE_SIZE 256
/* Definitions for log version 2 */
#define LOG_TYPE_MASK (0x0007)
#define LOG_TYPE_CHAR 0
#define LOG_TYPE_INTEGER 1
/* Field length */
#define LOG_LENGTH_MASK (0x00F8)
#define LOG_LENGTH_SHIFT 3
/* Extra attributes */
#define LOG_EOL (0x0100)
#define LOG_INTEGER_DECIMAL (0x0200)
#define LOG_INTEGER_SIGNED (0x0400)
struct mc_logmsg {
u16 ctrl; /* Type and format of data */
u16 source; /* Unique value for each event source */
u32 log_data; /* Value, if any */
};
/* MobiCore internal trace buffer structure. */
struct mc_trace_buf {
u32 version; /* version of trace buffer */
u32 length; /* length of buff */
u32 head; /* last write position */
u8 buff[]; /* start of the log buffer */
};
static struct logging_ctx {
struct work_struct work;
union {
struct mc_trace_buf *trace_buf; /* Circular log buffer */
unsigned long trace_page;
};
bool buffer_is_shared; /* Log buffer cannot be freed */
u32 tail; /* MobiCore log read position */
u32 line_len; /* Log Line buffer current length */
int thread_err;
u16 prev_source; /* Previous Log source */
char line[LOG_LINE_SIZE]; /* Log Line buffer */
u32 enabled; /* Log can be disabled via debugfs */
bool dead;
} log_ctx;
static inline void log_eol(u16 source)
{
if (!strnlen(log_ctx.line, LOG_LINE_SIZE)) {
/* In case a TA tries to print a 0x0 */
log_ctx.line_len = 0;
return;
}
if (log_ctx.prev_source)
/* MobiCore Userspace */
dev_info(g_ctx.mcd, "%03x|%s\n", log_ctx.prev_source,
log_ctx.line);
else
/* MobiCore kernel */
dev_info(g_ctx.mcd, "%s\n", log_ctx.line);
log_ctx.line_len = 0;
log_ctx.line[0] = 0;
}
/*
* Collect chars in log_ctx.line buffer and output the buffer when it is full.
* No locking needed because only "mobicore_log" thread updates this buffer.
*/
static inline void log_char(char ch, u16 source)
{
if (ch == '\n' || ch == '\r') {
log_eol(source);
return;
}
if ((log_ctx.line_len >= (LOG_LINE_SIZE - 1)) ||
(source != log_ctx.prev_source))
log_eol(source);
log_ctx.line[log_ctx.line_len++] = ch;
log_ctx.line[log_ctx.line_len] = 0;
log_ctx.prev_source = source;
}
static inline void log_string(u32 ch, u16 source)
{
while (ch) {
log_char(ch & 0xFF, source);
ch >>= 8;
}
}
static inline void log_number(u32 format, u32 value, u16 source)
{
int width = (format & LOG_LENGTH_MASK) >> LOG_LENGTH_SHIFT;
char fmt[16];
char buffer[32];
const char *reader = buffer;
if (format & LOG_INTEGER_DECIMAL)
if (format & LOG_INTEGER_SIGNED)
snprintf(fmt, sizeof(fmt), "%%%ud", width);
else
snprintf(fmt, sizeof(fmt), "%%%uu", width);
else
snprintf(fmt, sizeof(fmt), "%%0%ux", width);
snprintf(buffer, sizeof(buffer), fmt, value);
while (*reader)
log_char(*reader++, source);
}
static inline int log_msg(void *data)
{
struct mc_logmsg *msg = (struct mc_logmsg *)data;
int log_type = msg->ctrl & LOG_TYPE_MASK;
switch (log_type) {
case LOG_TYPE_CHAR:
log_string(msg->log_data, msg->source);
break;
case LOG_TYPE_INTEGER:
log_number(msg->ctrl, msg->log_data, msg->source);
break;
}
if (msg->ctrl & LOG_EOL)
log_eol(msg->source);
return sizeof(*msg);
}
static void log_worker(struct work_struct *work)
{
static DEFINE_MUTEX(local_mutex);
mutex_lock(&local_mutex);
while (log_ctx.trace_buf->head != log_ctx.tail) {
if (log_ctx.trace_buf->version != MC_LOG_VERSION) {
mc_dev_err("Bad log data v%d (exp. v%d), stop\n",
log_ctx.trace_buf->version, MC_LOG_VERSION);
log_ctx.dead = true;
break;
}
log_ctx.tail += log_msg(&log_ctx.trace_buf->buff[log_ctx.tail]);
/* Wrap over if no space left for a complete message */
if ((log_ctx.tail + sizeof(struct mc_logmsg)) >
log_ctx.trace_buf->length)
log_ctx.tail = 0;
}
mutex_unlock(&local_mutex);
}
/*
* Wake up the log reader thread
* This should be called from the places where calls into MobiCore have
* generated some logs(eg, yield, SIQ...)
*/
void mc_logging_run(void)
{
if (log_ctx.enabled && !log_ctx.dead &&
(log_ctx.trace_buf->head != log_ctx.tail))
schedule_work(&log_ctx.work);
}
int mc_logging_start(void)
{
int ret = mc_fc_mem_trace(virt_to_phys((void *)(log_ctx.trace_page)),
BIT(LOG_BUF_ORDER) * PAGE_SIZE);
if (ret) {
mc_dev_err("shared traces setup failed\n");
return ret;
}
log_ctx.buffer_is_shared = true;
mc_dev_devel("fc_log version %u\n", log_ctx.trace_buf->version);
mc_logging_run();
return 0;
}
void mc_logging_stop(void)
{
if (!mc_fc_mem_trace(0, 0))
log_ctx.buffer_is_shared = false;
mc_logging_run();
flush_work(&log_ctx.work);
}
/*
* Setup MobiCore kernel log. It assumes it's running on CORE 0!
* The fastcall will complain is that is not the case!
*/
int mc_logging_init(void)
{
/*
* We are going to map this buffer into virtual address space in SWd.
* To reduce complexity there, we use a contiguous buffer.
*/
log_ctx.trace_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
LOG_BUF_ORDER);
if (!log_ctx.trace_page)
return -ENOMEM;
INIT_WORK(&log_ctx.work, log_worker);
log_ctx.enabled = true;
debugfs_create_bool("swd_debug", 0400, g_ctx.debug_dir,
&log_ctx.enabled);
return 0;
}
void mc_logging_exit(void)
{
/*
* This is not racey as the only caller for mc_logging_run is the
* scheduler which gets stopped before us, and long before we exit.
*/
if (!log_ctx.buffer_is_shared)
free_pages(log_ctx.trace_page, LOG_BUF_ORDER);
else
mc_dev_err("log buffer unregister not supported\n");
}

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_LOGGING_H_
#define _MC_LOGGING_H_
void mc_logging_run(void);
int mc_logging_init(void);
void mc_logging_exit(void);
int mc_logging_start(void);
void mc_logging_stop(void);
#endif /* _MC_LOGGING_H_ */

View file

@ -0,0 +1,677 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/debugfs.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
#include "public/mc_linux.h"
#include "public/mc_admin.h" /* MC_ADMIN_DEVNODE */
#include "platform.h" /* MC_PM_RUNTIME */
#include "main.h"
#include "fastcall.h"
#include "arm.h"
#include "mmu.h"
#include "scheduler.h"
#include "pm.h"
#include "logging.h"
#include "admin.h"
#include "user.h"
#include "mcp.h"
#include "client.h"
#include "build_tag.h"
/* Define a MobiCore device structure for use with dev_debug() etc */
static struct device_driver driver = {
.name = "Trustonic"
};
static struct device device = {
.driver = &driver
};
struct mc_device_ctx g_ctx = {
.mcd = &device
};
static struct main_ctx {
#ifdef MC_PM_RUNTIME
/* Whether hibernation succeeded */
bool did_hibernate;
/* Reboot notifications */
struct notifier_block reboot_notifier;
/* PM notifications */
struct notifier_block pm_notifier;
#endif
/* Devices */
dev_t device;
struct class *class;
/* Admin device */
struct cdev admin_cdev;
/* User device */
dev_t user_dev;
struct cdev user_cdev;
/* Debug counters */
struct mutex struct_counters_buf_mutex;
char struct_counters_buf[256];
int struct_counters_buf_len;
} main_ctx;
static int mobicore_start(void);
static void mobicore_stop(void);
int kasnprintf(struct kasnprintf_buf *buf, const char *fmt, ...)
{
va_list args;
int max_size = buf->size - buf->off;
int i;
va_start(args, fmt);
i = vsnprintf(buf->buf + buf->off, max_size, fmt, args);
if (i >= max_size) {
int new_size = PAGE_ALIGN(buf->size + i + 1);
char *new_buf = krealloc(buf->buf, new_size, buf->gfp);
if (!new_buf) {
i = -ENOMEM;
} else {
buf->buf = new_buf;
buf->size = new_size;
max_size = buf->size - buf->off;
i = vsnprintf(buf->buf + buf->off, max_size, fmt, args);
}
}
if (i > 0)
buf->off += i;
va_end(args);
return i;
}
ssize_t debug_generic_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos,
int (*function)(struct kasnprintf_buf *buf))
{
/* Add/update buffer */
if (!file->private_data || !*ppos) {
struct kasnprintf_buf *buf, *old_buf;
int ret;
buf = kzalloc(GFP_KERNEL, sizeof(*buf));
if (!buf)
return -ENOMEM;
buf->gfp = GFP_KERNEL;
ret = function(buf);
if (ret < 0) {
kfree(buf);
return ret;
}
old_buf = file->private_data;
file->private_data = buf;
kfree(old_buf);
}
if (file->private_data) {
struct kasnprintf_buf *buf = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos, buf->buf,
buf->off);
}
return 0;
}
int debug_generic_release(struct inode *inode, struct file *file)
{
struct kasnprintf_buf *buf = file->private_data;
kfree(buf->buf);
kfree(buf);
return 0;
}
static ssize_t debug_structs_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
return debug_generic_read(file, user_buf, count, ppos,
clients_debug_structs);
}
static const struct file_operations mc_debug_structs_ops = {
.read = debug_structs_read,
.llseek = default_llseek,
.release = debug_generic_release,
};
static ssize_t debug_struct_counters_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
if (!*ppos) {
int ret;
mutex_lock(&main_ctx.struct_counters_buf_mutex);
ret = snprintf(main_ctx.struct_counters_buf,
sizeof(main_ctx.struct_counters_buf),
"clients: %d\n"
"cbufs: %d\n"
"sessions: %d\n"
"wsms: %d\n"
"mmus: %d\n",
atomic_read(&g_ctx.c_clients),
atomic_read(&g_ctx.c_cbufs),
atomic_read(&g_ctx.c_sessions),
atomic_read(&g_ctx.c_wsms),
atomic_read(&g_ctx.c_mmus));
mutex_unlock(&main_ctx.struct_counters_buf_mutex);
if (ret > 0)
main_ctx.struct_counters_buf_len = ret;
}
return simple_read_from_buffer(user_buf, count, ppos,
main_ctx.struct_counters_buf,
main_ctx.struct_counters_buf_len);
}
static const struct file_operations mc_debug_struct_counters_ops = {
.read = debug_struct_counters_read,
.llseek = default_llseek,
};
static inline int device_user_init(void)
{
struct device *dev;
int ret = 0;
main_ctx.user_dev = MKDEV(MAJOR(main_ctx.device), 1);
/* Create the user node */
mc_user_init(&main_ctx.user_cdev);
ret = cdev_add(&main_ctx.user_cdev, main_ctx.user_dev, 1);
if (ret) {
mc_dev_err("user cdev_add failed\n");
return ret;
}
main_ctx.user_cdev.owner = THIS_MODULE;
dev = device_create(main_ctx.class, NULL, main_ctx.user_dev, NULL,
MC_USER_DEVNODE);
if (IS_ERR(dev)) {
cdev_del(&main_ctx.user_cdev);
mc_dev_err("user device_create failed\n");
return PTR_ERR(dev);
}
/* Create debugfs structs entry */
debugfs_create_file("structs", 0400, g_ctx.debug_dir, NULL,
&mc_debug_structs_ops);
return 0;
}
static inline void device_user_exit(void)
{
device_destroy(main_ctx.class, main_ctx.user_dev);
cdev_del(&main_ctx.user_cdev);
}
#ifdef MC_PM_RUNTIME
static int reboot_notifier(struct notifier_block *nb, unsigned long event,
void *dummy)
{
switch (event) {
case SYS_HALT:
case SYS_POWER_OFF:
main_ctx.did_hibernate = true;
break;
}
return 0;
}
static int suspend_notifier(struct notifier_block *nb, unsigned long event,
void *dummy)
{
int ret = 0;
main_ctx.did_hibernate = false;
switch (event) {
case PM_SUSPEND_PREPARE:
return mc_scheduler_suspend();
case PM_POST_SUSPEND:
return mc_scheduler_resume();
case PM_HIBERNATION_PREPARE:
/* Try to stop the TEE nicely (ignore failure) */
mc_scheduler_suspend();
/* Make sure the TEE cannot run anymore */
mc_scheduler_stop();
/* Flush log buffer */
mc_logging_run();
break;
case PM_POST_HIBERNATION:
if (main_ctx.did_hibernate) {
/* Really did hibernate */
clients_kill_sessions();
return mobicore_start();
}
/* Did not hibernate, just restart the TEE */
ret = mc_scheduler_start();
if (!ret)
ret = mc_scheduler_resume();
}
return ret;
}
#endif /* MC_PM_RUNTIME */
static int mobicore_start(void)
{
struct mc_version_info version_info;
bool dynamic_lpae = false;
int ret;
ret = mcp_start();
if (ret) {
mc_dev_err("TEE start failed\n");
goto err_mcp;
}
ret = mc_logging_start();
if (ret) {
mc_dev_err("Log start failed\n");
goto err_log;
}
ret = mc_scheduler_start();
if (ret) {
mc_dev_err("Scheduler start failed\n");
goto err_sched;
}
ret = mc_pm_start();
if (ret) {
mc_dev_err("Power Management start failed\n");
goto err_pm;
}
/* Must be called before creating the user device node to avoid race */
ret = mcp_get_version(&version_info);
if (ret)
goto err_mcp_cmd;
/* CMP version is meaningless in this case and is thus not printed */
mc_dev_info("\n"
" product_id = %s\n"
" version_mci = 0x%08x\n"
" version_so = 0x%08x\n"
" version_mclf = 0x%08x\n"
" version_container = 0x%08x\n"
" version_mc_config = 0x%08x\n"
" version_tl_api = 0x%08x\n"
" version_dr_api = 0x%08x\n"
" version_nwd = 0x%08x\n",
version_info.product_id,
version_info.version_mci,
version_info.version_so,
version_info.version_mclf,
version_info.version_container,
version_info.version_mc_config,
version_info.version_tl_api,
version_info.version_dr_api,
version_info.version_nwd);
if (MC_VERSION_MAJOR(version_info.version_mci) > 1) {
mc_dev_err("MCI too recent for this driver");
goto err_version;
}
if ((MC_VERSION_MAJOR(version_info.version_mci) == 0) &&
(MC_VERSION_MINOR(version_info.version_mci) < 6)) {
mc_dev_err("MCI too old for this driver");
goto err_version;
}
/* Determine which features are supported */
switch (version_info.version_mci) {
case MC_VERSION(1, 4): /* 310 */
dynamic_lpae = true;
/* Fall through */
case MC_VERSION(1, 3):
g_ctx.f_time = true;
/* Fall through */
case MC_VERSION(1, 2):
g_ctx.f_client_login = true;
/* Fall through */
case MC_VERSION(1, 1):
g_ctx.f_multimap = true;
/* Fall through */
case MC_VERSION(1, 0): /* 302 */
g_ctx.f_mem_ext = true;
g_ctx.f_ta_auth = true;
/* Fall through */
case MC_VERSION(0, 7):
g_ctx.f_timeout = true;
/* Fall through */
case MC_VERSION(0, 6): /* 301 */
break;
}
#ifdef CONFIG_TRUSTONIC_TEE_LPAE
if (!dynamic_lpae)
g_ctx.f_lpae = true;
#endif
mc_dev_info("SWd uses %sLPAE MMU table format\n",
g_ctx.f_lpae ? "" : "non-");
#ifdef MC_PM_RUNTIME
main_ctx.reboot_notifier.notifier_call = reboot_notifier;
ret = register_reboot_notifier(&main_ctx.reboot_notifier);
if (ret) {
mc_dev_err("reboot notifier register failed\n");
goto err_pm_notif;
}
main_ctx.pm_notifier.notifier_call = suspend_notifier;
ret = register_pm_notifier(&main_ctx.pm_notifier);
if (ret) {
unregister_reboot_notifier(&main_ctx.reboot_notifier);
mc_dev_err("PM notifier register failed\n");
goto err_pm_notif;
}
#endif
ret = device_user_init();
if (ret)
goto err_create_dev_user;
return 0;
err_create_dev_user:
#ifdef MC_PM_RUNTIME
unregister_reboot_notifier(&main_ctx.reboot_notifier);
unregister_pm_notifier(&main_ctx.pm_notifier);
err_pm_notif:
#endif
err_version:
err_mcp_cmd:
mc_pm_stop();
err_pm:
mc_scheduler_stop();
err_sched:
mc_logging_stop();
err_log:
mcp_stop();
err_mcp:
return ret;
}
static void mobicore_stop(void)
{
device_user_exit();
#ifdef MC_PM_RUNTIME
unregister_reboot_notifier(&main_ctx.reboot_notifier);
unregister_pm_notifier(&main_ctx.pm_notifier);
#endif
mc_pm_stop();
mc_scheduler_stop();
mc_logging_stop();
mcp_stop();
}
static ssize_t debug_sessions_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
return debug_generic_read(file, user_buf, count, ppos,
mcp_debug_sessions);
}
static const struct file_operations mc_debug_sessions_ops = {
.read = debug_sessions_read,
.llseek = default_llseek,
.release = debug_generic_release,
};
static ssize_t debug_mcpcmds_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
return debug_generic_read(file, user_buf, count, ppos,
mcp_debug_mcpcmds);
}
static const struct file_operations mc_debug_mcpcmds_ops = {
.read = debug_mcpcmds_read,
.llseek = default_llseek,
.release = debug_generic_release,
};
static inline int device_admin_init(void)
{
struct device *dev;
int ret = 0;
ret = alloc_chrdev_region(&main_ctx.device, 0, 2, "trustonic_tee");
if (ret) {
mc_dev_err("alloc_chrdev_region failed\n");
return ret;
}
main_ctx.class = class_create(THIS_MODULE, "trustonic_tee");
if (IS_ERR(main_ctx.class)) {
mc_dev_err("class_create failed\n");
ret = PTR_ERR(main_ctx.class);
goto err_class;
}
/* Create the ADMIN node */
ret = mc_admin_init(&main_ctx.admin_cdev, mobicore_start,
mobicore_stop);
if (ret)
goto err_init;
ret = cdev_add(&main_ctx.admin_cdev, main_ctx.device, 1);
if (ret) {
mc_dev_err("admin cdev_add failed\n");
goto err_cdev;
}
main_ctx.admin_cdev.owner = THIS_MODULE;
dev = device_create(main_ctx.class, NULL, main_ctx.device, NULL,
MC_ADMIN_DEVNODE);
if (IS_ERR(dev)) {
mc_dev_err("admin device_create failed\n");
ret = PTR_ERR(dev);
goto err_device;
}
/* Create debugfs sessions and MCP commands entries */
debugfs_create_file("sessions", 0400, g_ctx.debug_dir, NULL,
&mc_debug_sessions_ops);
debugfs_create_file("last_mcp_commands", 0400, g_ctx.debug_dir, NULL,
&mc_debug_mcpcmds_ops);
return 0;
err_device:
cdev_del(&main_ctx.admin_cdev);
err_cdev:
mc_admin_exit();
err_init:
class_destroy(main_ctx.class);
err_class:
unregister_chrdev_region(main_ctx.device, 2);
return ret;
}
static inline void device_admin_exit(void)
{
device_destroy(main_ctx.class, main_ctx.device);
cdev_del(&main_ctx.admin_cdev);
mc_admin_exit();
class_destroy(main_ctx.class);
unregister_chrdev_region(main_ctx.device, 2);
}
/*
* This function is called by the kernel during startup or by a insmod command.
* This device is installed and registered as cdev, then interrupt and
* queue handling is set up
*/
static int mobicore_probe(struct platform_device *pdev)
{
int err = 0;
if (pdev)
g_ctx.mcd->of_node = pdev->dev.of_node;
dev_set_name(g_ctx.mcd, "TEE");
/*
* Do not remove or change the following trace.
* The string "MobiCore" is used to detect if the TEE is in of the image
*/
mc_dev_info("MobiCore mcDrvModuleApi version is %d.%d\n",
MCDRVMODULEAPI_VERSION_MAJOR, MCDRVMODULEAPI_VERSION_MINOR);
#ifdef MOBICORE_COMPONENT_BUILD_TAG
mc_dev_info("MobiCore %s\n", MOBICORE_COMPONENT_BUILD_TAG);
#endif
/* Hardware does not support ARM TrustZone -> Cannot continue! */
if (!has_security_extensions()) {
mc_dev_err("Hardware doesn't support ARM TrustZone!\n");
return -ENODEV;
}
/* Running in secure mode -> Cannot load the driver! */
if (is_secure_mode()) {
mc_dev_err("Running in secure MODE!\n");
return -ENODEV;
}
/* Make sure we can create debugfs entries */
g_ctx.debug_dir = debugfs_create_dir("trustonic_tee", NULL);
/* Initialize debug counters */
atomic_set(&g_ctx.c_clients, 0);
atomic_set(&g_ctx.c_cbufs, 0);
atomic_set(&g_ctx.c_sessions, 0);
atomic_set(&g_ctx.c_wsms, 0);
atomic_set(&g_ctx.c_mmus, 0);
mutex_init(&main_ctx.struct_counters_buf_mutex);
/* Create debugfs info entry */
debugfs_create_file("structs_counters", 0400, g_ctx.debug_dir, NULL,
&mc_debug_struct_counters_ops);
/* Initialize common API layer */
client_init();
/* Initialize plenty of nice features */
err = mc_fastcall_init();
if (err) {
mc_dev_err("Fastcall support init failed!\n");
goto fail_fastcall_init;
}
err = mcp_init();
if (err) {
mc_dev_err("MCP init failed!\n");
goto fail_mcp_init;
}
err = mc_logging_init();
if (err) {
mc_dev_err("Log init failed!\n");
goto fail_log_init;
}
err = mc_scheduler_init();
if (err) {
mc_dev_err("Scheduler init failed!\n");
goto fail_mc_device_sched_init;
}
/*
* Create admin dev so that daemon can already communicate with
* the driver
*/
err = device_admin_init();
if (err)
goto fail_creat_dev_admin;
return 0;
fail_creat_dev_admin:
mc_scheduler_exit();
fail_mc_device_sched_init:
mc_logging_exit();
fail_log_init:
mcp_exit();
fail_mcp_init:
mc_fastcall_exit();
fail_fastcall_init:
debugfs_remove_recursive(g_ctx.debug_dir);
return err;
}
#ifdef MC_DEVICE_PROPNAME
static const struct of_device_id of_match_table[] = {
{ .compatible = MC_DEVICE_PROPNAME },
{ }
};
static struct platform_driver mc_plat_driver = {
.probe = mobicore_probe,
.driver = {
.name = "mcd",
.owner = THIS_MODULE,
.of_match_table = of_match_table,
}
};
#endif /* MC_DEVICE_PROPNAME */
static int __init mobicore_init(void)
{
#ifdef MC_DEVICE_PROPNAME
return platform_driver_register(&mc_plat_driver);
#else
return mobicore_probe(NULL);
#endif
}
static void __exit mobicore_exit(void)
{
#ifdef MC_DEVICE_PROPNAME
platform_driver_unregister(&mc_plat_driver);
#endif
device_admin_exit();
mc_scheduler_exit();
mc_logging_exit();
mcp_exit();
mc_fastcall_exit();
debugfs_remove_recursive(g_ctx.debug_dir);
}
module_init(mobicore_init);
module_exit(mobicore_exit);
MODULE_AUTHOR("Trustonic Limited");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MobiCore driver");

View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_MAIN_H_
#define _MC_MAIN_H_
#include <linux/slab.h> /* gfp_t */
#include <linux/fs.h> /* struct inode and struct file */
#define MC_VERSION(major, minor) \
(((major & 0x0000ffff) << 16) | (minor & 0x0000ffff))
#define MC_VERSION_MAJOR(x) ((x) >> 16)
#define MC_VERSION_MINOR(x) ((x) & 0xffff)
#define mc_dev_err(fmt, ...) \
dev_err(g_ctx.mcd, "%s: " fmt, __func__, ##__VA_ARGS__)
#define mc_dev_info(fmt, ...) \
dev_info(g_ctx.mcd, "%s: " fmt, __func__, ##__VA_ARGS__)
#ifdef DEBUG
#define mc_dev_devel(fmt, ...) \
dev_info(g_ctx.mcd, "%s: " fmt, __func__, ##__VA_ARGS__)
#else /* DEBUG */
#define mc_dev_devel(...) do {} while (0)
#endif /* !DEBUG */
/* MobiCore Driver Kernel Module context data. */
struct mc_device_ctx {
struct device *mcd;
/* debugfs root */
struct dentry *debug_dir;
/* Features */
/* - SWd uses LPAE MMU table format */
bool f_lpae;
/* - SWd can set a time out to get scheduled at a future time */
bool f_timeout;
/* - SWd supports memory extension which allows for bigger TAs */
bool f_mem_ext;
/* - SWd supports TA authorisation */
bool f_ta_auth;
/* - SWd can map several buffers at once */
bool f_multimap;
/* - SWd supports GP client authentication */
bool f_client_login;
/* - SWd needs time updates */
bool f_time;
/* Debug counters */
atomic_t c_clients;
atomic_t c_cbufs;
atomic_t c_sessions;
atomic_t c_wsms;
atomic_t c_mmus;
};
extern struct mc_device_ctx g_ctx;
/* Debug stuff */
struct kasnprintf_buf {
gfp_t gfp;
void *buf;
int size;
int off;
};
extern __printf(2, 3)
int kasnprintf(struct kasnprintf_buf *buf, const char *fmt, ...);
ssize_t debug_generic_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos,
int (*function)(struct kasnprintf_buf *buf));
int debug_generic_release(struct inode *inode, struct file *file);
static inline int kref_read(struct kref *kref)
{
return atomic_read(&kref->refcount);
}
#endif /* _MC_MAIN_H_ */

View file

@ -0,0 +1,150 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MCIFC_H_
#define MCIFC_H_
#include "platform.h"
/** @name MobiCore FastCall Defines
* Defines for the two different FastCall's.
*/
/** @{ */
/* --- global ---- */
#define MC_FC_INVALID ((u32)0) /**< Invalid FastCall ID */
#if defined(CONFIG_ARM64) && !defined(MC_ARMV7_FC)
/* These should be handled as 64-bit FCs; now they are more like 32bits... */
#define MC_FC_STD64_BASE ((u32)0xFF000000)
#define MC_FC_STD64(x) ((u32)(MC_FC_STD64_BASE + (x)))
#define MC_FC_INIT MC_FC_STD64(1) /**< Initializing FastCall. */
#define MC_FC_INFO MC_FC_STD64(2) /**< Info FastCall. */
#define MC_FC_MEM_TRACE MC_FC_STD64(10) /**< Enable SWd tracing via memory */
#define MC_FC_SWAP_CPU MC_FC_STD64(54) /**< Change new active Core */
#else
#define MC_FC_INIT ((u32)(-1)) /**< Initializing FastCall. */
#define MC_FC_INFO ((u32)(-2)) /**< Info FastCall. */
#define MC_FC_MEM_TRACE ((u32)(-31)) /**< Enable SWd tracing via memory */
#define MC_FC_SWAP_CPU ((u32)(0x84000005)) /**< Change new active Core */
#endif
/** @} */
/** @name MobiCore SMC Defines
* Defines the different secure monitor calls (SMC) for world switching.
* @{ */
/**< Yield to switch from NWd to SWd. */
#define MC_SMC_N_YIELD 3
/**< SIQ to switch from NWd to SWd. */
#define MC_SMC_N_SIQ 4
/** @} */
/** @name MobiCore status
* MobiCore status information.
* @{ */
/**< MobiCore is not yet initialized. FastCall FcInit() to set up MobiCore.*/
#define MC_STATUS_NOT_INITIALIZED 0
/**< Bad parameters have been passed in FcInit(). */
#define MC_STATUS_BAD_INIT 1
/**< MobiCore did initialize properly. */
#define MC_STATUS_INITIALIZED 2
/**< MobiCore kernel halted due to an unrecoverable exception. Further
* information is available extended info */
#define MC_STATUS_HALT 3
/** @} */
/** @name Extended Info Identifiers
* Extended info parameters for MC_FC_INFO to obtain further information
* depending on MobiCore state.
* @{ */
/**< Version of the MobiCore Control Interface (MCI) */
#define MC_EXT_INFO_ID_MCI_VERSION 0
/**< MobiCore control flags */
#define MC_EXT_INFO_ID_FLAGS 1
/**< MobiCore halt condition code */
#define MC_EXT_INFO_ID_HALT_CODE 2
/**< MobiCore halt condition instruction pointer */
#define MC_EXT_INFO_ID_HALT_IP 3
/**< MobiCore fault counter */
#define MC_EXT_INFO_ID_FAULT_CNT 4
/**< MobiCore last fault cause */
#define MC_EXT_INFO_ID_FAULT_CAUSE 5
/**< MobiCore last fault meta */
#define MC_EXT_INFO_ID_FAULT_META 6
/**< MobiCore last fault threadid */
#define MC_EXT_INFO_ID_FAULT_THREAD 7
/**< MobiCore last fault instruction pointer */
#define MC_EXT_INFO_ID_FAULT_IP 8
/**< MobiCore last fault stack pointer */
#define MC_EXT_INFO_ID_FAULT_SP 9
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_DFSR 10
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_ADFSR 11
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_DFAR 12
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_IFSR 13
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_AIFSR 14
/**< MobiCore last fault ARM arch information */
#define MC_EXT_INFO_ID_FAULT_ARCH_IFAR 15
/**< MobiCore configured by Daemon via fc_init flag */
#define MC_EXT_INFO_ID_MC_CONFIGURED 16
/**< MobiCore scheduling status: idle/non-idle */
#define MC_EXT_INFO_ID_MC_SCHED_STATUS 17
/**< MobiCore runtime status: initialized, halted */
#define MC_EXT_INFO_ID_MC_STATUS 18
/**< MobiCore exception handler last partner */
#define MC_EXT_INFO_ID_MC_EXC_PARTNER 19
/**< MobiCore exception handler last peer */
#define MC_EXT_INFO_ID_MC_EXC_IPCPEER 20
/**< MobiCore exception handler last IPC message */
#define MC_EXT_INFO_ID_MC_EXC_IPCMSG 21
/**< MobiCore exception handler last IPC data */
#define MC_EXT_INFO_ID_MC_EXC_IPCDATA 22
/**< MobiCore exception handler last UUID (uses 4 slots: 23 to 26) */
#define MC_EXT_INFO_ID_MC_EXC_UUID 23
#define MC_EXT_INFO_ID_MC_EXC_UUID1 24
#define MC_EXT_INFO_ID_MC_EXC_UUID2 25
#define MC_EXT_INFO_ID_MC_EXC_UUID3 26
/** @} */
/** @name FastCall return values
* Return values of the MobiCore FastCalls.
* @{ */
/**< No error. Everything worked fine. */
#define MC_FC_RET_OK 0
/**< FastCall was not successful. */
#define MC_FC_RET_ERR_INVALID 1
/**< MobiCore has already been initialized. */
#define MC_FC_RET_ERR_ALREADY_INITIALIZED 5
/** @} */
/** @name Init FastCall flags
* Return flags of the Init FastCall.
* @{ */
/**< SWd uses LPAE MMU table format. */
#define MC_FC_INIT_FLAG_LPAE BIT(0)
/** @} */
#endif /** MCIFC_H_ */
/** @} */

View file

@ -0,0 +1,523 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MCP_H_
#define MCP_H_
#include "mci/mcloadformat.h"
/** Indicates a response */
#define FLAG_RESPONSE BIT(31)
/** Maximum number of buffers that can be mapped at once */
#define MCP_MAP_MAX_BUF 4
/** MobiCore Return Code Defines.
* List of the possible MobiCore return codes.
*/
enum mcp_result {
/** Memory has successfully been mapped */
MC_MCP_RET_OK = 0,
/** The session ID is invalid */
MC_MCP_RET_ERR_INVALID_SESSION = 1,
/** The UUID of the Trustlet is unknown */
MC_MCP_RET_ERR_UNKNOWN_UUID = 2,
/** The ID of the driver is unknown */
MC_MCP_RET_ERR_UNKNOWN_DRIVER_ID = 3,
/** No more session are allowed */
MC_MCP_RET_ERR_NO_MORE_SESSIONS = 4,
/** The container is invalid */
MC_MCP_RET_ERR_CONTAINER_INVALID = 5,
/** The Trustlet is invalid */
MC_MCP_RET_ERR_TRUSTLET_INVALID = 6,
/** The memory block has already been mapped before */
MC_MCP_RET_ERR_ALREADY_MAPPED = 7,
/** Alignment or length error in the command parameters */
MC_MCP_RET_ERR_INVALID_PARAM = 8,
/** No space left in the virtual address space of the session */
MC_MCP_RET_ERR_OUT_OF_RESOURCES = 9,
/** WSM type unknown or broken WSM */
MC_MCP_RET_ERR_INVALID_WSM = 10,
/** unknown error */
MC_MCP_RET_ERR_UNKNOWN = 11,
/** Length of map invalid */
MC_MCP_RET_ERR_INVALID_MAPPING_LENGTH = 12,
/** Map can only be applied to Trustlet session */
MC_MCP_RET_ERR_MAPPING_TARGET = 13,
/** Couldn't open crypto session */
MC_MCP_RET_ERR_OUT_OF_CRYPTO_RESOURCES = 14,
/** System Trustlet signature verification failed */
MC_MCP_RET_ERR_SIGNATURE_VERIFICATION_FAILED = 15,
/** System Trustlet public key is wrong */
MC_MCP_RET_ERR_WRONG_PUBLIC_KEY = 16,
/** Wrong containter type(s) */
MC_MCP_RET_ERR_CONTAINER_TYPE_MISMATCH = 17,
/** Container is locked (or not activated) */
MC_MCP_RET_ERR_CONTAINER_LOCKED = 18,
/** SPID is not registered with root container */
MC_MCP_RET_ERR_SP_NO_CHILD = 19,
/** UUID is not registered with sp container */
MC_MCP_RET_ERR_TL_NO_CHILD = 20,
/** Unwrapping of root container failed */
MC_MCP_RET_ERR_UNWRAP_ROOT_FAILED = 21,
/** Unwrapping of service provider container failed */
MC_MCP_RET_ERR_UNWRAP_SP_FAILED = 22,
/** Unwrapping of Trustlet container failed */
MC_MCP_RET_ERR_UNWRAP_TRUSTLET_FAILED = 23,
/** Container version mismatch */
MC_MCP_RET_ERR_CONTAINER_VERSION_MISMATCH = 24,
/** Decryption of service provider trustlet failed */
MC_MCP_RET_ERR_SP_TL_DECRYPTION_FAILED = 25,
/** Hash check of service provider trustlet failed */
MC_MCP_RET_ERR_SP_TL_HASH_CHECK_FAILED = 26,
/** Activation/starting of task failed */
MC_MCP_RET_ERR_LAUNCH_TASK_FAILED = 27,
/** Closing of task not yet possible, try again later */
MC_MCP_RET_ERR_CLOSE_TASK_FAILED = 28,
/**< Service is blocked and a session cannot be opened to it */
MC_MCP_RET_ERR_SERVICE_BLOCKED = 29,
/**< Service is locked and a session cannot be opened to it */
MC_MCP_RET_ERR_SERVICE_LOCKED = 30,
/**< Service was forcefully killed (due to an administrative command) */
MC_MCP_RET_ERR_SERVICE_KILLED = 31,
/** The command is unknown */
MC_MCP_RET_ERR_UNKNOWN_COMMAND = 50,
/** The command data is invalid */
MC_MCP_RET_ERR_INVALID_DATA = 51
};
/** Possible MCP Command IDs
* Command ID must be between 0 and 0x7FFFFFFF.
*/
enum cmd_id {
/** Invalid command ID */
MC_MCP_CMD_ID_INVALID = 0x00,
/** Open a session */
MC_MCP_CMD_OPEN_SESSION = 0x01,
/** Close an existing session */
MC_MCP_CMD_CLOSE_SESSION = 0x03,
/** Map WSM to session */
MC_MCP_CMD_MAP = 0x04,
/** Unmap WSM from session */
MC_MCP_CMD_UNMAP = 0x05,
/** Prepare for suspend */
MC_MCP_CMD_SUSPEND = 0x06,
/** Resume from suspension */
MC_MCP_CMD_RESUME = 0x07,
/** Get MobiCore version information */
MC_MCP_CMD_GET_MOBICORE_VERSION = 0x09,
/** Close MCP and unmap MCI */
MC_MCP_CMD_CLOSE_MCP = 0x0A,
/** Load token for device attestation */
MC_MCP_CMD_LOAD_TOKEN = 0x0B,
/** Check that TA can be loaded */
MC_MCP_CMD_CHECK_LOAD_TA = 0x0C,
/** Map multiple WSMs to session */
MC_MCP_CMD_MULTIMAP = 0x0D,
/** Unmap multiple WSMs to session */
MC_MCP_CMD_MULTIUNMAP = 0x0E,
};
/*
* Types of WSM known to the MobiCore.
*/
#define WSM_TYPE_MASK 0xFF
#define WSM_INVALID 0 /** Invalid memory type */
#define WSM_L2 2 /** Buffer mapping uses L2/L3 table */
#define WSM_L1 3 /** Buffer mapping uses fake L1 table */
/*
* Magic number used to identify if Open Command supports GP client
* authentication.
*/
#define MC_GP_CLIENT_AUTH_MAGIC 0x47504131 /* "GPA1" */
/*
* Initialisation values flags
*/
#define MC_IV_FLAG_IRQ BIT(0) /* Set if IRQ is present */
#define MC_IV_FLAG_TIME BIT(1) /* Set if GP TIME is supported */
struct init_values {
u32 flags;
u32 irq;
u32 time_ofs;
u32 time_len;
};
/** Command header.
* It just contains the command ID. Only values specified in cmd_id are
* allowed as command IDs. If the command ID is unspecified the MobiCore
* returns an empty response with the result set to
* MC_MCP_RET_ERR_UNKNOWN_COMMAND.
*/
struct cmd_header {
enum cmd_id cmd_id; /** Command ID of the command */
};
/** Response header.
* MobiCore will reply to every MCP command with an MCP response. Like the MCP
* command the response consists of a header followed by response data. The
* response is written to the same memory location as the MCP command.
*/
struct rsp_header {
u32 rsp_id; /** Command ID | FLAG_RESPONSE */
enum mcp_result result; /** Result of the command execution */
};
/** @defgroup CMD MCP Commands
*/
/** @defgroup ASMCMD Administrative Commands
*/
/** @defgroup MCPGETMOBICOREVERSION GET_MOBICORE_VERSION
* Get MobiCore version info.
*
*/
/** Get MobiCore Version Command */
struct cmd_get_version {
struct cmd_header cmd_header; /** Command header */
};
/** Get MobiCore Version Command Response */
struct rsp_get_version {
struct rsp_header rsp_header; /** Response header */
struct mc_version_info version_info; /** MobiCore version info */
};
/** @defgroup POWERCMD Power Management Commands
*/
/** @defgroup MCPSUSPEND SUSPEND
* Prepare MobiCore suspension.
* This command allows MobiCore and MobiCore drivers to release or clean
* resources and save device state.
*
*/
/** Suspend Command */
struct cmd_suspend {
struct cmd_header cmd_header; /** Command header */
};
/** Suspend Command Response */
struct rsp_suspend {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPRESUME RESUME
* Resume MobiCore from suspension.
* This command allows MobiCore and MobiCore drivers to reinitialize hardware
* affected by suspension.
*
*/
/** Resume Command */
struct cmd_resume {
struct cmd_header cmd_header; /** Command header */
};
/** Resume Command Response */
struct rsp_resume {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup SESSCMD Session Management Commands
*/
/** @defgroup MCPOPEN OPEN
* Load and open a session to a Trustlet.
* The OPEN command loads Trustlet data to the MobiCore context and opens a
* session to the Trustlet. If wsm_data_type is WSM_INVALID MobiCore tries to
* start a pre-installed Trustlet associated with the uuid passed. The uuid
* passed must match the uuid contained in the load data (if available).
* On success, MobiCore returns the session ID which can be used for further
* communication.
*/
/** GP client authentication data */
struct cmd_open_data {
u32 mclf_magic; /** ASCII "MCLF" on older versions */
struct identity identity; /** Login method and data */
};
/** Open Command */
struct cmd_open {
struct cmd_header cmd_header; /** Command header */
struct mc_uuid_t uuid; /** Service UUID */
u8 unused[4]; /** Padding to be 64-bit aligned */
u64 adr_tci_buffer; /** Physical address of the TCI MMU */
u64 adr_load_data; /** Physical address of the data MMU */
u32 ofs_tci_buffer; /** Offset to the data */
u32 len_tci_buffer; /** Length of the TCI */
u32 wsmtype_tci; /** Type of WSM used for the TCI */
u32 wsm_data_type; /** Type of MMU */
u32 ofs_load_data; /** Offset to the data */
u32 len_load_data; /** Length of the data to load */
union {
struct cmd_open_data cmd_open_data; /** Client login data */
union mclf_header tl_header; /** Service header */
};
u32 is_gpta; /** true if looking for an SD/GP-TA */
};
/** Open Command Response */
struct rsp_open {
struct rsp_header rsp_header; /** Response header */
u32 session_id; /** Session ID */
};
/** TA Load Check Command */
struct cmd_check_load {
struct cmd_header cmd_header; /** Command header */
struct mc_uuid_t uuid; /** Service UUID */
u64 adr_load_data; /** Physical address of the data */
u32 wsm_data_type; /** Type of MMU */
u32 ofs_load_data; /** Offset to the data */
u32 len_load_data; /** Length of the data to load */
union mclf_header tl_header; /** Service header */
};
/** TA Load Check Response */
struct rsp_check_load {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPCLOSE CLOSE
* Close an existing session to a Trustlet.
* The CLOSE command terminates a session and frees all resources in the
* MobiCore system which are currently occupied by the session. Before closing
* the session, the MobiCore runtime management waits until all pending
* operations, like calls to drivers, invoked by the Trustlet have been
* terminated. Mapped memory will automatically be unmapped from the MobiCore
* context. The NWd is responsible for processing the freed memory according to
* the Rich-OS needs.
*
*/
/** Close Command */
struct cmd_close {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
};
/** Close Command Response */
struct rsp_close {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPMAP MAP
* Map a portion of memory to a session.
* The MAP command provides a block of memory to the context of a service.
* The memory then becomes world-shared memory (WSM).
* The only allowed memory type here is WSM_L2.
*/
/** Map Command */
struct cmd_map {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
u32 wsm_type; /** Type of MMU */
u32 ofs_buffer; /** Offset to the payload */
u64 adr_buffer; /** Physical address of the MMU */
u32 len_buffer; /** Length of the buffer */
};
#define MCP_MAP_MAX 0x100000 /** Maximum length for MCP map */
/** Map Command Response */
struct rsp_map {
struct rsp_header rsp_header; /** Response header */
/** Virtual address the WSM is mapped to, may include an offset! */
u32 secure_va;
};
/** @defgroup MCPUNMAP UNMAP
* Unmap a portion of world-shared memory from a session.
* The UNMAP command is used to unmap a previously mapped block of
* world shared memory from the context of a session.
*
* Attention: The memory block will be immediately unmapped from the specified
* session. If the service is still accessing the memory, the service will
* trigger a segmentation fault.
*/
/** Unmap Command */
struct cmd_unmap {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
u32 wsm_type; /** Type of WSM used of the memory */
/** Virtual address the WSM is mapped to, may include an offset! */
u32 secure_va;
u32 virtual_buffer_len; /** Length of virtual buffer */
};
/** Unmap Command Response */
struct rsp_unmap {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPLOADTOKEN
* Load a token from the normal world and share it with the TEE
* If something fails, the device attestation functionality will be disabled
*/
/** Load Token */
struct cmd_load_token {
struct cmd_header cmd_header; /** Command header */
u32 wsm_data_type; /** Type of MMU */
u64 adr_load_data; /** Physical address of the MMU */
u64 ofs_load_data; /** Offset to the data */
u64 len_load_data; /** Length of the data */
};
/** Load Token Command Response */
struct rsp_load_token {
struct rsp_header rsp_header; /** Response header */
};
/** @defgroup MCPMULTIMAP MULTIMAP
* Map up to MCP_MAP_MAX_BUF portions of memory to a session.
* The MULTIMAP command provides MCP_MAP_MAX_BUF blocks of memory to the context
* of a service.
* The memory then becomes world-shared memory (WSM).
* The only allowed memory type here is WSM_L2.
* @{ */
/** NWd physical buffer description
*
* Note: Information is coming from NWd kernel. So it should not be trusted
* more than NWd kernel is trusted.
*/
struct buffer_map {
u64 adr_buffer; /**< Physical address */
u32 ofs_buffer; /**< Offset of buffer */
u32 len_buffer; /**< Length of buffer */
u32 wsm_type; /**< Type of address */
};
/** MultiMap Command */
struct cmd_multimap {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
struct buffer_map bufs[MC_MAP_MAX]; /** NWd buffer info */
};
/** Multimap Command Response */
struct rsp_multimap {
struct rsp_header rsp_header; /** Response header */
/** Virtual address the WSM is mapped to, may include an offset! */
u64 secure_va[MC_MAP_MAX];
};
/** @defgroup MCPMULTIUNMAP MULTIUNMAP
* Unmap up to MCP_MAP_MAX_BUF portions of world-shared memory from a session.
* The MULTIUNMAP command is used to unmap MCP_MAP_MAX_BUF previously mapped
* blocks of world shared memory from the context of a session.
*
* Attention: The memory blocks will be immediately unmapped from the specified
* session. If the service is still accessing the memory, the service will
* trigger a segmentation fault.
* @{ */
/** NWd mapped buffer description
*
* Note: Information is coming from NWd kernel. So it should not be trusted more
* than NWd kernel is trusted.
*/
struct buffer_unmap {
u64 secure_va; /**< Secure virtual address */
u32 len_buffer; /**< Length of buffer */
};
/** Multiunmap Command */
struct cmd_multiunmap {
struct cmd_header cmd_header; /** Command header */
u32 session_id; /** Session ID */
struct buffer_unmap bufs[MC_MAP_MAX]; /** NWd buffer info */
};
/** Multiunmap Command Response */
struct rsp_multiunmap {
struct rsp_header rsp_header; /** Response header */
};
/** Structure of the MCP buffer */
union mcp_message {
struct init_values init_values; /** Intialisation values */
struct cmd_header cmd_header; /** Command header */
struct rsp_header rsp_header;
struct cmd_open cmd_open; /** Load and open service */
struct rsp_open rsp_open;
struct cmd_close cmd_close; /** Close command */
struct rsp_close rsp_close;
struct cmd_map cmd_map; /** Map WSM to service */
struct rsp_map rsp_map;
struct cmd_unmap cmd_unmap; /** Unmap WSM from service */
struct rsp_unmap rsp_unmap;
struct cmd_suspend cmd_suspend; /** Suspend MobiCore */
struct rsp_suspend rsp_suspend;
struct cmd_resume cmd_resume; /** Resume MobiCore */
struct rsp_resume rsp_resume;
struct cmd_get_version cmd_get_version; /** Get MobiCore Version */
struct rsp_get_version rsp_get_version;
struct cmd_load_token cmd_load_token; /** Load token */
struct rsp_load_token rsp_load_token;
struct cmd_check_load cmd_check_load; /** TA load check */
struct rsp_check_load rsp_check_load;
struct cmd_multimap cmd_multimap; /** Map multiple WSMs */
struct rsp_multimap rsp_multimap;
struct cmd_multiunmap cmd_multiunmap; /** Map multiple WSMs */
struct rsp_multiunmap rsp_multiunmap;
};
/** Minimum MCP buffer length (in bytes) */
#define MIN_MCP_LEN sizeof(mcp_message_t)
#define MC_FLAG_NO_SLEEP_REQ 0
#define MC_FLAG_REQ_TO_SLEEP 1
#define MC_STATE_NORMAL_EXECUTION 0
#define MC_STATE_READY_TO_SLEEP 1
struct sleep_mode {
u16 sleep_req; /** Ask SWd to get ready to sleep */
u16 ready_to_sleep; /** SWd is now ready to sleep */
};
/** MobiCore status flags */
struct mcp_flags {
/** If not MC_FLAG_SCHEDULE_IDLE, MobiCore needsscheduling */
u32 schedule;
struct sleep_mode sleep_mode;
/** Secure-world sleep timeout in milliseconds */
s32 timeout_ms;
/** Reserved for future use: Must not be interpreted */
u32 RFU3;
};
/** MobiCore is idle. No scheduling required */
#define MC_FLAG_SCHEDULE_IDLE 0
/** MobiCore is non idle, scheduling is required */
#define MC_FLAG_SCHEDULE_NON_IDLE 1
/** MCP buffer structure */
struct mcp_buffer {
struct mcp_flags flags; /** MobiCore Flags */
union mcp_message message; /** MCP message buffer */
};
#endif /* MCP_H_ */

View file

@ -0,0 +1,90 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef NQ_H_
#define NQ_H_
/** \name NQ Size Defines
* Minimum and maximum count of elements in the notification queue.
*/
#define MIN_NQ_ELEM 1 /** Minimum notification queue elements */
#define MAX_NQ_ELEM 64 /** Maximum notification queue elements */
/* Compute notification queue size in bytes from its number of elements */
#define NQ_SIZE(n) (2 * (sizeof(struct notification_queue_header) \
+ (n) * sizeof(struct notification)))
/** \name NQ Length Defines
* Note that there is one queue for NWd->SWd and one queue for SWd->NWd
*/
/** Minimum size for the notification queue data structure */
#define MIN_NQ_LEN NQ_SIZE(MIN_NQ_ELEM)
/** Maximum size for the notification queue data structure */
#define MAX_NQ_LEN NQ_SIZE(MAX_NQ_ELEM)
/** \name Session ID Defines
* Standard Session IDs.
*/
/** MCP session ID, used to communicate with MobiCore (e.g. to start/stop TA) */
#define SID_MCP 0
/** Invalid session id, returned in case of error */
#define SID_INVALID 0xffffffff
/** Notification data structure */
struct notification {
u32 session_id; /** Session ID */
s32 payload; /** Additional notification info */
};
/** Notification payload codes.
* 0 indicated a plain simple notification,
* a positive value is a termination reason from the task,
* a negative value is a termination reason from MobiCore.
* Possible negative values are given below.
*/
enum notification_payload {
/** task terminated, but exit code is invalid */
ERR_INVALID_EXIT_CODE = -1,
/** task terminated due to session end, no exit code available */
ERR_SESSION_CLOSE = -2,
/** task terminated due to invalid operation */
ERR_INVALID_OPERATION = -3,
/** session ID is unknown */
ERR_INVALID_SID = -4,
/** session is not active */
ERR_SID_NOT_ACTIVE = -5,
/** session was force-killed (due to an administrative command). */
ERR_SESSION_KILLED = -6,
};
/** Declaration of the notification queue header.
* layout as specified in the data structure specification.
*/
struct notification_queue_header {
u32 write_cnt; /** Write counter */
u32 read_cnt; /** Read counter */
u32 queue_size; /** Queue size */
};
/** Queue struct which defines a queue object.
* The queue struct is accessed by the queue<operation> type of
* function. elementCnt must be a power of two and the power needs
* to be smaller than power of u32 (obviously 32).
*/
struct notification_queue {
struct notification_queue_header hdr; /** Queue header */
struct notification notification[MIN_NQ_ELEM]; /** Elements */
};
#endif /** NQ_H_ */

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MCITIME_H_
#define MCITIME_H_
/*
* Trustonic TEE RICH OS Time:
* Seconds and nanoseconds since Jan 1, 1970, UTC
*/
struct mcp_time {
u64 seconds;
u64 nsec;
};
#endif /* MCITIME_H_ */

View file

@ -0,0 +1,134 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef MCLOADFORMAT_H_
#define MCLOADFORMAT_H_
/** Trustlet Blob length info */
#define MC_TLBLOBLEN_MAGIC 0x7672746C /* Magic for SWd: vrtl */
#define MAX_SO_CONT_SIZE 512 /* Max size for a container */
/** MCLF flags */
/**< Loaded service cannot be unloaded from MobiCore. */
#define MC_SERVICE_HEADER_FLAGS_PERMANENT BIT(0)
/**< Service has no WSM control interface. */
#define MC_SERVICE_HEADER_FLAGS_NO_CONTROL_INTERFACE BIT(1)
/**< Service can be debugged. */
#define MC_SERVICE_HEADER_FLAGS_DEBUGGABLE BIT(2)
/**< New-layout trusted application or trusted driver. */
#define MC_SERVICE_HEADER_FLAGS_EXTENDED_LAYOUT BIT(3)
/** Service type.
* The service type defines the type of executable.
*/
enum service_type {
SERVICE_TYPE_ILLEGAL = 0,
SERVICE_TYPE_DRIVER = 1,
SERVICE_TYPE_SP_TRUSTLET = 2,
SERVICE_TYPE_SYSTEM_TRUSTLET = 3,
SERVICE_TYPE_MIDDLEWARE = 4,
SERVICE_TYPE_LAST_ENTRY = 5,
};
/**
* Descriptor for a memory segment.
*/
struct segment_descriptor {
u32 start; /**< Virtual start address */
u32 len; /**< Segment length in bytes */
};
/**
* MCLF intro for data structure identification.
* Must be the first element of a valid MCLF file.
*/
struct mclf_intro {
u32 magic; /**< Header magic value ASCII "MCLF" */
u32 version; /**< Version the MCLF header struct */
};
/**
* @defgroup MCLF_VER_V2 MCLF Version 32
* @ingroup MCLF_VER
*
* @addtogroup MCLF_VER_V2
*/
/*
* GP TA identity.
*/
struct identity {
/**< GP TA login type */
u32 login_type;
/**< GP TA login data */
u8 login_data[16];
};
/**
* Version 2.1/2.2 MCLF header.
*/
struct mclf_header_v2 {
/**< MCLF header start with the mandatory intro */
struct mclf_intro intro;
/**< Service flags */
u32 flags;
/**< Type of memory the service must be executed from */
u32 mem_type;
/**< Type of service */
enum service_type service_type;
/**< Number of instances which can be run simultaneously */
u32 num_instances;
/**< Loadable service unique identifier (UUID) */
struct mc_uuid_t uuid;
/**< If the service_type is SERVICE_TYPE_DRIVER the Driver ID is used */
u32 driver_id;
/**<
* Number of threads (N) in a service:
* SERVICE_TYPE_SP_TRUSTLET: N = 1
* SERVICE_TYPE_SYSTEM_TRUSTLET: N = 1
* SERVICE_TYPE_DRIVER: N >= 1
*/
u32 num_threads;
/**< Virtual text segment */
struct segment_descriptor text;
/**< Virtual data segment */
struct segment_descriptor data;
/**< Length of the BSS segment in bytes. MUST be at least 8 byte */
u32 bss_len;
/**< Virtual start address of service code */
u32 entry;
/**< Version of the interface the driver exports */
u32 service_version;
};
/**
* @addtogroup MCLF
*/
/** MCLF header */
union mclf_header {
/**< Intro for data identification */
struct mclf_intro intro;
/**< Version 2 header */
struct mclf_header_v2 mclf_header_v2;
};
struct mc_blob_len_info {
u32 magic; /**< New blob format magic number */
u32 root_size; /**< Root container size */
u32 sp_size; /**< SP container size */
u32 ta_size; /**< TA container size */
u32 reserved[4]; /**< Reserved for further Use */
};
#endif /* MCLOADFORMAT_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,136 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_MCP_H_
#define _MC_MCP_H_
#include "mci/mcloadformat.h" /* struct identity */
/* Structure to hold the TA/driver descriptor to pass to MCP */
struct tee_object {
u32 length; /* Total length */
u32 header_length; /* Length of header before payload */
u8 data[]; /* Header followed by payload */
};
/* Structure to hold all mapped buffer data to pass to MCP */
struct mcp_buffer_map {
u64 phys_addr; /** Page-aligned physical address */
u64 secure_va; /** Page-aligned physical address */
u32 offset; /** Data offset inside the first page */
u32 length; /** Length of the data */
u32 type; /** Type of MMU */
};
struct mcp_session {
/* Work descriptor to handle delayed closing, set by upper layer */
struct work_struct close_work;
/* Sessions list (protected by mcp sessions_lock) */
struct list_head list;
/* Notifications list (protected by mcp notifications_mutex) */
struct list_head notifications_list;
/* Notification waiter lock */
struct mutex notif_wait_lock; /* Only one at a time */
/* Notification debug (protected by mcp notifications_mutex) */
enum mcp_notification_state {
MCP_NOTIF_IDLE, /* Nothing happened yet */
MCP_NOTIF_QUEUED, /* Notification in overflow queue */
MCP_NOTIF_SENT, /* Notification in send queue */
MCP_NOTIF_RECEIVED, /* Notification received */
MCP_NOTIF_CONSUMED, /* Notification reported to CA */
MCP_NOTIF_DEAD, /* Error reported to CA */
} notif_state;
/* Notification received */
struct completion completion;
/* Notification lock */
struct mutex exit_code_lock;
/* Last notification */
s32 exit_code;
/* Session id */
u32 id;
/* Session state (protected by mcp sessions_lock) */
enum mcp_session_state {
MCP_SESSION_RUNNING,
MCP_SESSION_CLOSE_FAILED,
MCP_SESSION_CLOSE_REQUESTED,
MCP_SESSION_CLOSE_NOTIFIED,
MCP_SESSION_CLOSING_GP,
MCP_SESSION_CLOSED,
} state;
/* This TA is of Global Platform type, set by upper layer */
bool is_gp;
/* GP TAs have login information */
struct identity identity;
};
/* Init for the mcp_session structure */
void mcp_session_init(struct mcp_session *session, bool is_gp,
const struct identity *identity);
int mcp_session_waitnotif(struct mcp_session *session, s32 timeout);
s32 mcp_session_exitcode(struct mcp_session *mcp_session);
/* SWd suspend/resume */
int mcp_suspend(void);
int mcp_resume(void);
bool mcp_suspended(void);
/* Callback to scheduler registration */
enum mcp_scheduler_commands {
MCP_YIELD,
MCP_NSIQ,
};
void mcp_register_scheduler(int (*scheduler_cb)(enum mcp_scheduler_commands));
bool mcp_notifications_flush(void);
void mcp_register_crashhandler(void (*crashhandler_cb)(void));
/*
* Get the requested SWd sleep timeout value (ms)
* - if the timeout is -1, wait indefinitely
* - if the timeout is 0, re-schedule immediately (timeouts in µs in the SWd)
* - otherwise sleep for the required time
* returns true if sleep is required, false otherwise
*/
bool mcp_get_idle_timeout(s32 *timeout);
void mcp_reset_idle_timeout(void);
void mcp_update_time(void);
/* MCP commands */
int mcp_get_version(struct mc_version_info *version_info);
int mcp_load_token(uintptr_t data, const struct mcp_buffer_map *buffer_map);
int mcp_load_check(const struct tee_object *obj,
const struct mcp_buffer_map *buffer_map);
int mcp_open_session(struct mcp_session *session,
const struct tee_object *obj,
const struct mcp_buffer_map *map,
const struct mcp_buffer_map *tci_map);
int mcp_close_session(struct mcp_session *session);
void mcp_kill_session(struct mcp_session *session);
int mcp_map(u32 session_id, struct mcp_buffer_map *buffer_map);
int mcp_unmap(u32 session_id, const struct mcp_buffer_map *buffer_map);
int mcp_multimap(u32 session_id, struct mcp_buffer_map *buffer_maps);
int mcp_multiunmap(u32 session_id, const struct mcp_buffer_map *buffer_maps);
int mcp_notify(struct mcp_session *mcp_session);
/* MCP initialisation/cleanup */
int mcp_init(void);
void mcp_exit(void);
int mcp_start(void);
void mcp_stop(void);
/* MCP debug */
int mcp_debug_sessions(struct kasnprintf_buf *buf);
int mcp_debug_mcpcmds(struct kasnprintf_buf *buf);
#endif /* _MC_MCP_H_ */

View file

@ -0,0 +1,476 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/pgtable.h>
#include <linux/semaphore.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <linux/device.h>
#include "public/mc_linux.h"
#include "mci/mcimcp.h"
#include "platform.h" /* CONFIG_TRUSTONIC_TEE_LPAE */
#include "main.h"
#include "mcp.h" /* mcp_buffer_map */
#include "mmu.h"
/* Common */
#define MMU_BUFFERABLE BIT(2) /* AttrIndx[0] */
#define MMU_CACHEABLE BIT(3) /* AttrIndx[1] */
#define MMU_EXT_NG BIT(11) /* ARMv6 and higher */
/* LPAE */
#define MMU_TYPE_PAGE (3 << 0)
#define MMU_NS BIT(5)
#define MMU_AP_RW_ALL BIT(6) /* AP[2:1], RW, at any privilege level */
#define MMU_EXT_SHARED_64 (3 << 8) /* SH[1:0], inner shareable */
#define MMU_EXT_AF BIT(10) /* Access Flag */
#define MMU_EXT_XN (((u64)1) << 54) /* XN */
/* Non-LPAE */
#define MMU_TYPE_EXT (3 << 0) /* v5 */
#define MMU_TYPE_SMALL (2 << 0)
#define MMU_EXT_AP0 BIT(4)
#define MMU_EXT_AP1 (2 << 4)
#define MMU_EXT_TEX(x) ((x) << 6) /* v5 */
#define MMU_EXT_SHARED_32 BIT(10) /* ARMv6 and higher */
/*
* MobiCore specific page tables for world shared memory.
* Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
* MobiCore uses the default ARM format.
*
* Number of page table entries in one L2 MMU table. This is ARM specific, an
* MMU table covers 1 MiB by using 256 entries referring to 4KiB pages each.
*/
#define L2_ENTRIES_MAX 256
/*
* Small buffers (below 1MiB) are mapped using the legacy L2 table, but bigger
* buffers now use a fake L1 table that holds 64-bit pointers to L2 tables. As
* this must be exactly one page, we can hold up to 512 entries.
*/
#define L1_ENTRIES_MAX 512
/*
* Fake L1 MMU table.
*/
union l1_table {
u64 *pages_phys; /* Array of physical page addresses */
unsigned long page;
};
/*
* L2 MMU table, which is more a L3 table in the LPAE case.
*/
union l2_table {
union { /* Array of PTEs */
u32 *ptes_32;
u64 *ptes_64;
};
unsigned long page;
};
/*
* MMU table allocated to the Daemon or a TLC describing a world shared
* buffer.
* When users map a malloc()ed area into SWd, a MMU table is allocated.
* In addition, the area of maximum 1MB virtual address space is mapped into
* the MMU table and a handle for this table is returned to the user.
*/
struct tee_mmu {
union l2_table l2_tables[L1_ENTRIES_MAX]; /* L2 tables */
size_t l2_tables_nr; /* Actual number of L2 tables */
union l1_table l1_table; /* Fake L1 table */
union l2_table l1_l2_table; /* L2 table for the L1 table */
u32 offset;
u32 length;
bool user; /* Pages are from user space */
};
/*
* Linux uses different mappings for SMP systems(the sharing flag is set for the
* pte. In order not to confuse things too much in Mobicore make sure the shared
* buffers have the same flags. This should also be done in SWD side.
*/
static u64 pte_flags_64 = MMU_BUFFERABLE | MMU_CACHEABLE | MMU_EXT_NG |
#ifdef CONFIG_SMP
MMU_EXT_SHARED_64 |
#endif /* CONFIG_SMP */
MMU_EXT_XN | MMU_EXT_AF | MMU_AP_RW_ALL |
MMU_NS | MMU_TYPE_PAGE;
static u32 pte_flags_32 = MMU_BUFFERABLE | MMU_CACHEABLE | MMU_EXT_NG |
#ifdef CONFIG_SMP
MMU_EXT_SHARED_32 | MMU_EXT_TEX(1) |
#endif /* CONFIG_SMP */
MMU_EXT_AP1 | MMU_EXT_AP0 |
MMU_TYPE_SMALL | MMU_TYPE_EXT;
static uintptr_t mmu_table_pointer(const struct tee_mmu *mmu)
{
if (mmu->l1_table.page) {
return g_ctx.f_lpae ?
(uintptr_t)mmu->l1_l2_table.ptes_64 :
(uintptr_t)mmu->l1_l2_table.ptes_32;
} else {
return g_ctx.f_lpae ?
(uintptr_t)mmu->l2_tables[0].ptes_64 :
(uintptr_t)mmu->l2_tables[0].ptes_32;
}
}
static void free_all_pages(struct tee_mmu *mmu_table)
{
union l2_table *l2_table = &mmu_table->l2_tables[0];
size_t i;
for (i = 0; i < mmu_table->l2_tables_nr; i++, l2_table++) {
if (!l2_table->page)
break;
free_page(l2_table->page);
}
if (mmu_table->l1_l2_table.page)
free_page(mmu_table->l1_l2_table.page);
if (mmu_table->l1_table.page)
free_page(mmu_table->l1_table.page);
}
/*
* Create a MMU table for a buffer or trustlet.
*/
static inline int map_buffer(struct task_struct *task, const void *data,
unsigned int length, struct tee_mmu *mmu_table)
{
const void *reader = (const void *)((uintptr_t)data & PAGE_MASK);
struct page **pages; /* Same as above, conveniently typed */
unsigned long pages_page; /* Page to contain the page pointers */
size_t chunk;
unsigned long total_pages_nr;
int l1_entries_max;
int ret = 0;
/* Check that we have enough space to map data */
mmu_table->length = length;
mmu_table->offset = (uintptr_t)data & ~PAGE_MASK;
total_pages_nr = PAGE_ALIGN(mmu_table->offset + length) / PAGE_SIZE;
if (g_ctx.f_mem_ext)
l1_entries_max = L1_ENTRIES_MAX;
else
l1_entries_max = 1;
if (total_pages_nr > (l1_entries_max * L2_ENTRIES_MAX)) {
mc_dev_err("data mapping exceeds %d pages",
l1_entries_max * L2_ENTRIES_MAX);
return -EINVAL;
}
/* Get number of L2 tables needed */
mmu_table->l2_tables_nr = (total_pages_nr + L2_ENTRIES_MAX - 1) /
L2_ENTRIES_MAX;
mc_dev_devel("total_pages_nr %lu l2_tables_nr %zu",
total_pages_nr, mmu_table->l2_tables_nr);
/* Get a page to store page pointers */
pages_page = get_zeroed_page(GFP_KERNEL);
if (!pages_page)
return -ENOMEM;
pages = (struct page **)pages_page;
/* Allocate a page for the L1 table */
if (mmu_table->l2_tables_nr > 1) {
mmu_table->l1_table.page = get_zeroed_page(GFP_KERNEL);
mmu_table->l1_l2_table.page = get_zeroed_page(GFP_KERNEL);
if (!mmu_table->l1_table.page || !mmu_table->l1_l2_table.page) {
ret = -ENOMEM;
goto end;
}
/* Map it */
if (g_ctx.f_lpae) {
u64 *pte;
pte = &mmu_table->l1_l2_table.ptes_64[0];
*pte = virt_to_phys(mmu_table->l1_table.pages_phys);
*pte |= pte_flags_64;
} else {
u32 *pte;
pte = &mmu_table->l1_l2_table.ptes_32[0];
*pte = virt_to_phys(mmu_table->l1_table.pages_phys);
*pte |= pte_flags_32;
}
}
for (chunk = 0; chunk < mmu_table->l2_tables_nr; chunk++) {
unsigned long pages_nr, i;
struct page **page_ptr;
/* Size to map for this chunk */
if (chunk == (mmu_table->l2_tables_nr - 1))
pages_nr = ((total_pages_nr - 1) % L2_ENTRIES_MAX) + 1;
else
pages_nr = L2_ENTRIES_MAX;
/* Allocate a page for the MMU descriptor */
mmu_table->l2_tables[chunk].page = get_zeroed_page(GFP_KERNEL);
if (!mmu_table->l2_tables[chunk].page) {
ret = -ENOMEM;
goto end;
}
/* Add page address to L1 table if needed */
if (mmu_table->l1_table.page) {
void *table;
if (g_ctx.f_lpae)
table = mmu_table->l2_tables[chunk].ptes_64;
else
table = mmu_table->l2_tables[chunk].ptes_32;
mmu_table->l1_table.pages_phys[chunk] =
virt_to_phys(table);
}
/* Get pages */
if (task) {
long gup_ret;
unsigned int foll_flags =
FOLL_TOUCH | FOLL_GET | FOLL_WRITE | FOLL_CMA;
/* Buffer was allocated in user space */
down_read(&task->mm->mmap_sem);
gup_ret = __get_user_pages(task, task->mm,
(uintptr_t)reader, pages_nr,
foll_flags, pages, 0, 0);
up_read(&task->mm->mmap_sem);
if (gup_ret < 0) {
ret = gup_ret;
mc_dev_err("failed to get user pages @%p: %d",
reader, ret);
goto end;
}
/* check if we could lock all pages. */
if (gup_ret != pages_nr) {
mc_dev_err("get_user_pages() failed, ret: %ld",
gup_ret);
release_pages(pages, gup_ret, 0);
ret = -ENOMEM;
goto end;
}
reader += pages_nr * PAGE_SIZE;
mmu_table->user = true;
} else if (is_vmalloc_addr(data)) {
/* Buffer vmalloc'ed in kernel space */
page_ptr = &pages[0];
for (i = 0; i < pages_nr; i++) {
struct page *page = vmalloc_to_page(reader);
if (!page) {
mc_dev_err("failed to map address");
ret = -EINVAL;
goto end;
}
*page_ptr++ = page;
reader += PAGE_SIZE;
}
} else {
/* Buffer kmalloc'ed in kernel space */
struct page *page = virt_to_page(reader);
reader += pages_nr * PAGE_SIZE;
page_ptr = &pages[0];
for (i = 0; i < pages_nr; i++)
*page_ptr++ = page++;
}
/* Create MMU Table entries */
page_ptr = &pages[0];
/*
* Create MMU table entry, see ARM MMU docu for details about
* flags stored in the lowest 12 bits. As a side reference, the
* Article "ARM's multiply-mapped memory mess" found in the
* collection at http://lwn.net/Articles/409032/ is also worth
* reading.
*/
if (g_ctx.f_lpae) {
u64 *pte = &mmu_table->l2_tables[chunk].ptes_64[0];
for (i = 0; i < pages_nr; i++, page_ptr++, pte++) {
unsigned long phys = page_to_phys(*page_ptr);
*pte = phys;
*pte |= pte_flags_64;
}
} else {
u32 *pte = &mmu_table->l2_tables[chunk].ptes_32[0];
for (i = 0; i < pages_nr; i++, page_ptr++, pte++) {
unsigned long phys = page_to_phys(*page_ptr);
#if defined CONFIG_ARM64
if (phys & 0xffffffff00000000) {
mc_dev_err("64-bit pointer: 0x%16lx",
phys);
ret = -EFAULT;
goto end;
}
#endif
*pte = phys;
*pte |= pte_flags_32;
}
}
}
end:
if (ret)
free_all_pages(mmu_table);
free_page(pages_page);
return ret;
}
static inline void unmap_buffer(struct tee_mmu *mmu_table)
{
int t;
mc_dev_devel("clear MMU table, virt %p", mmu_table);
if (!mmu_table->user)
goto end;
/* Release all locked user space pages */
for (t = 0; t < mmu_table->l2_tables_nr; t++) {
if (g_ctx.f_lpae) {
u64 *pte = mmu_table->l2_tables[t].ptes_64;
int i;
for (i = 0; i < L2_ENTRIES_MAX; i++, pte++) {
/* Unused entries are 0 */
if (!*pte)
break;
/* pte_page() cannot return NULL */
page_cache_release(pte_page(*pte));
}
} else {
u32 *pte = mmu_table->l2_tables[t].ptes_32;
int i;
for (i = 0; i < L2_ENTRIES_MAX; i++, pte++) {
/* Unused entries are 0 */
if (!*pte)
break;
/* pte_page() cannot return NULL */
page_cache_release(pte_page(*pte));
}
}
}
end:
free_all_pages(mmu_table);
}
/*
* Delete a MMU table.
*/
void tee_mmu_delete(struct tee_mmu *mmu)
{
if (WARN(!mmu, "NULL mmu pointer given"))
return;
unmap_buffer(mmu);
mc_dev_devel("freed mmu %p: %s len %u off %u L%d table %lx\n",
mmu, mmu->user ? "user" : "kernel", mmu->length,
mmu->offset, mmu->l1_table.page ? 1 : 2,
mmu_table_pointer(mmu));
kfree(mmu);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_mmus);
}
/*
* Allocate MMU table and map buffer into it.
* That is, create respective table entries.
*/
struct tee_mmu *tee_mmu_create(struct task_struct *task, const void *addr,
unsigned int length)
{
struct tee_mmu *mmu;
int ret;
/* Check input arguments */
if (WARN(!addr, "data address is NULL"))
return ERR_PTR(-EINVAL);
if (WARN(!length, "data length is 0"))
return ERR_PTR(-EINVAL);
/* Allocate the struct */
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
return ERR_PTR(-ENOMEM);
/* Increment debug counter */
atomic_inc(&g_ctx.c_mmus);
/* Create the MMU mapping for the data */
ret = map_buffer(task, addr, length, mmu);
if (ret) {
kfree(mmu);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_mmus);
return ERR_PTR(ret);
}
mc_dev_devel("created mmu %p: %s addr %p len %u off %u L%d table %lx\n",
mmu, mmu->user ? "user" : "kernel", addr, mmu->length,
mmu->offset, mmu->l1_table.page ? 1 : 2,
mmu_table_pointer(mmu));
return mmu;
}
void tee_mmu_buffer(const struct tee_mmu *mmu, struct mcp_buffer_map *map)
{
uintptr_t table = mmu_table_pointer(mmu);
map->phys_addr = virt_to_phys((void *)table);
map->secure_va = 0;
map->offset = mmu->offset;
map->length = mmu->length;
if (mmu->l1_table.page)
map->type = WSM_L1;
else
map->type = WSM_L2;
}
int tee_mmu_debug_structs(struct kasnprintf_buf *buf, const struct tee_mmu *mmu)
{
return kasnprintf(buf,
"\t\t\tmmu %p: %s len %u off %u table %lx type L%d\n",
mmu, mmu->user ? "user" : "kernel", mmu->length,
mmu->offset, mmu_table_pointer(mmu),
mmu->l1_table.page ? 1 : 2);
}

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _TBASE_MEM_H_
#define _TBASE_MEM_H_
struct tee_mmu;
struct mcp_buffer_map;
/*
* Allocate MMU table and map buffer into it.
* That is, create respective table entries.
*/
struct tee_mmu *tee_mmu_create(struct task_struct *task, const void *wsm_buffer,
unsigned int wsm_len);
/*
* Delete a used MMU table.
*/
void tee_mmu_delete(struct tee_mmu *mmu);
/*
* Fill in buffer info for MMU table.
*/
void tee_mmu_buffer(const struct tee_mmu *mmu, struct mcp_buffer_map *map);
/*
* Add info to debug buffer.
*/
int tee_mmu_debug_structs(struct kasnprintf_buf *buf,
const struct tee_mmu *mmu);
#endif /* _TBASE_MEM_H_ */

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Header file of MobiCore Driver Kernel Module Platform
* specific structures
*
* Internal structures of the McDrvModule
*
* Header file the MobiCore Driver Kernel Module,
* its internal structures and defines.
*/
#ifndef _MC_DRV_PLATFORM_H_
#define _MC_DRV_PLATFORM_H_
#define IRQ_SPI(x) (x + 32)
/* MobiCore Interrupt. */
#define MC_INTR_SSIQ IRQ_SPI(223)
#define TBASE_CORE_SWITCHER
#define COUNT_OF_CPUS 8
/* Values of MPIDR regs */
#define CPU_IDS {0x0100, 0x0101, 0x0102, 0x0103, 0x0000, 0x0001, 0x0002, 0x0003}
/* uid/gid behave like old kernels but with new types (temporary) */
#ifndef CONFIG_UIDGID_STRICT_TYPE_CHECKS
#define MC_UIDGID_OLDSTYLE
#endif
/* SWd LPAE */
#ifndef CONFIG_TRUSTONIC_TEE_LPAE
#define CONFIG_TRUSTONIC_TEE_LPAE
#endif
/* Enable Fastcall worker thread */
#define MC_FASTCALL_WORKER_THREAD
/* Set Parameters for Secure OS Boosting */
#define DEFAULT_LITTLE_CORE 0
#define DEFAULT_BIG_CORE 4
#define MIGRATE_TARGET_CORE 3
#define MC_INTR_LOCAL_TIMER (IRQ_SPI(85) + DEFAULT_BIG_CORE)
#define LOCAL_TIMER_PERIOD 50
#define DEFAULT_SECOS_BOOST_TIME 5000
#define MAX_SECOS_BOOST_TIME 600000 /* 600 sec */
#define DUMP_TBASE_HALT_STATUS
#endif /* _MC_DRV_PLATFORM_H_ */

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "platform.h" /* MC_BL_NOTIFIER */
#ifdef MC_BL_NOTIFIER
#include "main.h"
#include "scheduler.h" /* SWd suspend/resume commands */
#include "pm.h"
#include <asm/bL_switcher.h>
static struct pm_context {
struct notifier_block bl_swicher_notifier;
} pm_ctx;
static int bl_switcher_notifier_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
unsigned int mpidr, cpu, cluster;
int ret = 0;
asm volatile ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
cpu = mpidr & 0x3;
cluster = (mpidr >> 8) & 0xf;
mc_dev_devel("%s switching!!, cpu: %u, Out=%u\n",
event == SWITCH_ENTER ? "Before" : "After", cpu, cluster);
if (cpu != 0)
return 0;
switch (event) {
case SWITCH_ENTER:
ret = mc_scheduler_suspend();
break;
case SWITCH_EXIT:
ret = mc_scheduler_resume();
break;
default:
mc_dev_devel("MobiCore: Unknown switch event!\n");
}
return 0;
}
int mc_pm_start(void)
{
pm_ctx.bl_swicher_notifier.notifier_call = bl_switcher_notifier_handler;
register_bL_swicher_notifier(&pm_ctx.bl_swicher_notifier);
return 0;
}
void mc_pm_stop(void)
{
unregister_bL_swicher_notifier(&pm_ctx.bl_swicher_notifier);
}
#endif /* MC_BL_NOTIFIER */

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_PM_H_
#define _MC_PM_H_
#include "platform.h" /* MC_BL_NOTIFIER */
#ifdef MC_BL_NOTIFIER
/* Initialize Power Management */
int mc_pm_start(void);
/* Free all Power Management resources*/
void mc_pm_stop(void);
#else
static inline int mc_pm_start(void)
{
return 0;
}
static inline void mc_pm_stop(void)
{
}
#endif
#endif /* _MC_PM_H_ */

View file

@ -0,0 +1,82 @@
/*
* Copyright (c) 2013-2014 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MC_ADMIN_IOCTL_H__
#define __MC_ADMIN_IOCTL_H__
#include <linux/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define MC_ADMIN_DEVNODE "mobicore"
/* Driver/daemon commands */
enum {
/* Command 0 is reserved */
MC_DRV_GET_ROOT_CONTAINER = 1,
MC_DRV_GET_SP_CONTAINER = 2,
MC_DRV_GET_TRUSTLET_CONTAINER = 3,
MC_DRV_GET_TRUSTLET = 4,
MC_DRV_SIGNAL_CRASH = 5,
};
/* MobiCore IOCTL magic number */
#define MC_IOC_MAGIC 'M'
struct mc_admin_request {
__u32 request_id; /* Unique request identifier */
__u32 command; /* Command to daemon */
struct mc_uuid_t uuid; /* UUID of trustlet, if relevant */
__u32 is_gp; /* Whether trustlet is GP */
__u32 spid; /* SPID of trustlet, if relevant */
};
struct mc_admin_response {
__u32 request_id; /* Unique request identifier */
__u32 error_no; /* Errno from daemon */
__u32 spid; /* SPID of trustlet, if relevant */
__u32 service_type; /* Type of trustlet being returned */
__u32 length; /* Length of data to get */
/* Any data follows */
};
struct mc_admin_driver_info {
/* Version, and something else..*/
__u32 drv_version;
__u32 initial_cmd_id;
};
struct mc_admin_load_info {
__u32 spid; /* SPID of trustlet, if relevant */
__u64 address; /* Address of the data */
__u32 length; /* Length of data to get */
};
#define MC_ADMIN_IO_GET_DRIVER_REQUEST \
_IOR(MC_IOC_MAGIC, 0, struct mc_admin_request)
#define MC_ADMIN_IO_GET_INFO \
_IOR(MC_IOC_MAGIC, 1, struct mc_admin_driver_info)
#define MC_ADMIN_IO_LOAD_DRIVER \
_IOW(MC_IOC_MAGIC, 2, struct mc_admin_load_info)
#define MC_ADMIN_IO_LOAD_TOKEN \
_IOW(MC_IOC_MAGIC, 3, struct mc_admin_load_info)
#define MC_ADMIN_IO_LOAD_CHECK \
_IOW(MC_IOC_MAGIC, 4, struct mc_admin_load_info)
#ifdef __cplusplus
}
#endif
#endif /* __MC_ADMIN_IOCTL_H__ */

View file

@ -0,0 +1,172 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_LINUX_H_
#define _MC_LINUX_H_
#define MCDRVMODULEAPI_VERSION_MAJOR 2
#define MCDRVMODULEAPI_VERSION_MINOR 0
#include <linux/types.h>
#define MC_USER_DEVNODE "mobicore-user"
/** Maximum length of MobiCore product ID string. */
#define MC_PRODUCT_ID_LEN 64
/** Number of buffers that can be mapped at once */
#define MC_MAP_MAX 4
/** Flags for buffers to map (aligned on GP) */
#define MC_IO_MAP_INPUT 0x1
#define MC_IO_MAP_OUTPUT 0x2
/*
* Universally Unique Identifier (UUID) according to ISO/IEC 11578.
*/
struct mc_uuid_t {
__u8 value[16]; /* Value of the UUID. */
};
/*
* GP TA login types.
*/
enum mc_login_type {
TEEC_LOGIN_PUBLIC = 0,
TEEC_LOGIN_USER,
TEEC_LOGIN_GROUP,
TEEC_LOGIN_APPLICATION = 4,
TEEC_LOGIN_USER_APPLICATION,
TEEC_LOGIN_GROUP_APPLICATION,
};
/*
* GP TA identity structure.
*/
struct mc_identity {
enum mc_login_type login_type;
union {
__u8 login_data[16];
gid_t gid; /* Requested group id */
struct {
kuid_t euid;
kuid_t ruid;
} uid;
};
};
/*
* Data exchange structure of the MC_IO_OPEN_SESSION ioctl command.
*/
struct mc_ioctl_open_session {
struct mc_uuid_t uuid; /* trustlet uuid */
__u32 is_gp_uuid; /* uuid is for GP TA */
__u32 sid; /* session id (out) */
__u64 tci; /* tci buffer pointer */
__u32 tcilen; /* tci length */
struct mc_identity identity; /* GP TA identity */
};
/*
* Data exchange structure of the MC_IO_OPEN_TRUSTLET ioctl command.
*/
struct mc_ioctl_open_trustlet {
__u32 sid; /* session id (out) */
__u32 spid; /* trustlet spid */
__u64 buffer; /* trustlet binary pointer */
__u32 tlen; /* binary length */
__u64 tci; /* tci buffer pointer */
__u32 tcilen; /* tci length */
};
/*
* Data exchange structure of the MC_IO_WAIT ioctl command.
*/
struct mc_ioctl_wait {
__u32 sid; /* session id (in) */
__s32 timeout; /* notification timeout */
};
/*
* Data exchange structure of the MC_IO_ALLOC ioctl command.
*/
struct mc_ioctl_alloc {
__u32 len; /* buffer length */
__u32 handle; /* user handle for the buffer (out) */
};
/*
* Buffer mapping incoming and outgoing information.
*/
struct mc_ioctl_buffer {
__u64 va; /* user space address of buffer */
__u32 len; /* buffer length */
__u64 sva; /* SWd virt address of buffer (out) */
__u32 flags; /* buffer flags */
};
/*
* Data exchange structure of the MC_IO_MAP and MC_IO_UNMAP ioctl commands.
*/
struct mc_ioctl_map {
__u32 sid; /* session id */
struct mc_ioctl_buffer bufs[MC_MAP_MAX]; /* buffers info */
};
/*
* Data exchange structure of the MC_IO_ERR ioctl command.
*/
struct mc_ioctl_geterr {
__u32 sid; /* session id */
__s32 value; /* error value (out) */
};
/*
* Global MobiCore Version Information.
*/
struct mc_version_info {
char product_id[MC_PRODUCT_ID_LEN]; /** Product ID string */
__u32 version_mci; /** Mobicore Control Interface */
__u32 version_so; /** Secure Objects */
__u32 version_mclf; /** MobiCore Load Format */
__u32 version_container; /** MobiCore Container Format */
__u32 version_mc_config; /** MobiCore Config. Block Format */
__u32 version_tl_api; /** MobiCore Trustlet API */
__u32 version_dr_api; /** MobiCore Driver API */
__u32 version_nwd; /** This Driver */
};
/*
* defines for the ioctl mobicore driver module function call from user space.
*/
/* MobiCore IOCTL magic number */
#define MC_IOC_MAGIC 'M'
/*
* Implement corresponding functions from user api
*/
#define MC_IO_OPEN_SESSION \
_IOWR(MC_IOC_MAGIC, 0, struct mc_ioctl_open_session)
#define MC_IO_OPEN_TRUSTLET \
_IOWR(MC_IOC_MAGIC, 1, struct mc_ioctl_open_trustlet)
#define MC_IO_CLOSE_SESSION _IO(MC_IOC_MAGIC, 2)
#define MC_IO_NOTIFY _IO(MC_IOC_MAGIC, 3)
#define MC_IO_WAIT _IOW(MC_IOC_MAGIC, 4, struct mc_ioctl_wait)
#define MC_IO_MAP _IOWR(MC_IOC_MAGIC, 5, struct mc_ioctl_map)
#define MC_IO_UNMAP _IOW(MC_IOC_MAGIC, 6, struct mc_ioctl_map)
#define MC_IO_ERR _IOWR(MC_IOC_MAGIC, 7, struct mc_ioctl_geterr)
#define MC_IO_FREEZE _IO(MC_IOC_MAGIC, 8)
#define MC_IO_VERSION _IOR(MC_IOC_MAGIC, 9, struct mc_version_info)
#endif /* _MC_LINUX_H_ */

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MC_LINUX_API_H_
#define _MC_LINUX_API_H_
#include <linux/types.h>
/*
* Switch TEE active core to core_num, defined as linux
* core id
*/
int mc_switch_core(__u32 core_num);
/*
* Return TEE active core as Linux core id
*/
__u32 mc_active_core(void);
#endif /* _MC_LINUX_API_H_ */

View file

@ -0,0 +1,464 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MOBICORE_DRIVER_API_H_
#define _MOBICORE_DRIVER_API_H_
#include "mc_linux.h"
#define __MC_CLIENT_LIB_API
/*
* Return values of MobiCore driver functions.
*/
enum mc_result {
/* Function call succeeded. */
MC_DRV_OK = 0,
/* No notification available. */
MC_DRV_NO_NOTIFICATION = 1,
/* Error during notification on communication level. */
MC_DRV_ERR_NOTIFICATION = 2,
/* Function not implemented. */
MC_DRV_ERR_NOT_IMPLEMENTED = 3,
/* No more resources available. */
MC_DRV_ERR_OUT_OF_RESOURCES = 4,
/* Driver initialization failed. */
MC_DRV_ERR_INIT = 5,
/* Unknown error. */
MC_DRV_ERR_UNKNOWN = 6,
/* The specified device is unknown. */
MC_DRV_ERR_UNKNOWN_DEVICE = 7,
/* The specified session is unknown.*/
MC_DRV_ERR_UNKNOWN_SESSION = 8,
/* The specified operation is not allowed. */
MC_DRV_ERR_INVALID_OPERATION = 9,
/* The response header from the MC is invalid. */
MC_DRV_ERR_INVALID_RESPONSE = 10,
/* Function call timed out. */
MC_DRV_ERR_TIMEOUT = 11,
/* Can not allocate additional memory. */
MC_DRV_ERR_NO_FREE_MEMORY = 12,
/* Free memory failed. */
MC_DRV_ERR_FREE_MEMORY_FAILED = 13,
/* Still some open sessions pending. */
MC_DRV_ERR_SESSION_PENDING = 14,
/* MC daemon not reachable */
MC_DRV_ERR_DAEMON_UNREACHABLE = 15,
/* The device file of the kernel module could not be opened. */
MC_DRV_ERR_INVALID_DEVICE_FILE = 16,
/* Invalid parameter. */
MC_DRV_ERR_INVALID_PARAMETER = 17,
/* Unspecified error from Kernel Module*/
MC_DRV_ERR_KERNEL_MODULE = 18,
/* Error during mapping of additional bulk memory to session. */
MC_DRV_ERR_BULK_MAPPING = 19,
/* Error during unmapping of additional bulk memory to session. */
MC_DRV_ERR_BULK_UNMAPPING = 20,
/* Notification received, exit code available. */
MC_DRV_INFO_NOTIFICATION = 21,
/* Set up of NWd connection failed. */
MC_DRV_ERR_NQ_FAILED = 22,
/* Wrong daemon version. */
MC_DRV_ERR_DAEMON_VERSION = 23,
/* Wrong container version. */
MC_DRV_ERR_CONTAINER_VERSION = 24,
/* System Trustlet public key is wrong. */
MC_DRV_ERR_WRONG_PUBLIC_KEY = 25,
/* Wrong container type(s). */
MC_DRV_ERR_CONTAINER_TYPE_MISMATCH = 26,
/* Container is locked (or not activated). */
MC_DRV_ERR_CONTAINER_LOCKED = 27,
/* SPID is not registered with root container. */
MC_DRV_ERR_SP_NO_CHILD = 28,
/* UUID is not registered with sp container. */
MC_DRV_ERR_TL_NO_CHILD = 29,
/* Unwrapping of root container failed. */
MC_DRV_ERR_UNWRAP_ROOT_FAILED = 30,
/* Unwrapping of service provider container failed. */
MC_DRV_ERR_UNWRAP_SP_FAILED = 31,
/* Unwrapping of Trustlet container failed. */
MC_DRV_ERR_UNWRAP_TRUSTLET_FAILED = 32,
/* No device associated with connection. */
MC_DRV_ERR_DAEMON_DEVICE_NOT_OPEN = 33,
/* TA blob attestation is incorrect. */
MC_DRV_ERR_TA_ATTESTATION_ERROR = 34,
/* Interrupted system call. */
MC_DRV_ERR_INTERRUPTED_BY_SIGNAL = 35,
/* Service is blocked and opensession is thus not allowed. */
MC_DRV_ERR_SERVICE_BLOCKED = 36,
/* Service is locked and opensession is thus not allowed. */
MC_DRV_ERR_SERVICE_LOCKED = 37,
/* Service was killed by the TEE (due to an administrative command). */
MC_DRV_ERR_SERVICE_KILLED = 38,
/* All permitted instances to the service are used */
MC_DRV_ERR_NO_FREE_INSTANCES = 39,
/* TA blob header is incorrect. */
MC_DRV_ERR_TA_HEADER_ERROR = 40,
};
/*
* Structure of Session Handle, includes the Session ID and the Device ID the
* Session belongs to.
* The session handle will be used for session-based MobiCore communication.
* It will be passed to calls which address a communication end point in the
* MobiCore environment.
*/
struct mc_session_handle {
u32 session_id; /* MobiCore session ID */
u32 device_id; /* Device ID the session belongs to */
};
/*
* Information structure about additional mapped Bulk buffer between the
* Trustlet Connector (NWd) and the Trustlet (SWd). This structure is
* initialized from a Trustlet Connector by calling mc_map().
* In order to use the memory within a Trustlet the Trustlet Connector has to
* inform the Trustlet with the content of this structure via the TCI.
*/
struct mc_bulk_map {
/* The virtual address of the Bulk buffer regarding the address space
* of the Trustlet, already includes a possible offset! */
u32 secure_virt_addr;
u32 secure_virt_len; /* Length of the mapped Bulk buffer */
};
/* The default device ID */
#define MC_DEVICE_ID_DEFAULT 0
/* Wait infinite for a response of the MC. */
#define MC_INFINITE_TIMEOUT ((s32)(-1))
/* Do not wait for a response of the MC. */
#define MC_NO_TIMEOUT 0
/* TCI/DCI must not exceed 1MiB */
#define MC_MAX_TCI_LEN 0x100000
/**
* mc_open_device() - Open a new connection to a MobiCore device.
* @device_id: Identifier for the MobiCore device to be used.
* MC_DEVICE_ID_DEFAULT refers to the default device.
*
* Initializes all device specific resources required to communicate with a
* MobiCore instance located on the specified device in the system. If the
* device does not exist the function will return MC_DRV_ERR_UNKNOWN_DEVICE.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_ERR_INVALID_OPERATION: device already opened
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon
* MC_DRV_ERR_UNKNOWN_DEVICE: device_id unknown
* MC_DRV_ERR_INVALID_DEVICE_FILE: kernel module under /dev/mobicore
* cannot be opened
*/
__MC_CLIENT_LIB_API enum mc_result mc_open_device(
u32 device_id);
/**
* mc_close_device() - Close the connection to a MobiCore device.
* @device_id: Identifier for the MobiCore device.
*
* When closing a device, active sessions have to be closed beforehand.
* Resources associated with the device will be released.
* The device may be opened again after it has been closed.
*
* MC_DEVICE_ID_DEFAULT refers to the default device.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_SESSION_PENDING: a session is still open
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
*/
__MC_CLIENT_LIB_API enum mc_result mc_close_device(
u32 device_id);
/**
* mc_open_session() - Open a new session to a Trustlet.
* @session: On success, the session data will be returned
* @uuid: UUID of the Trustlet to be opened
* @tci: TCI buffer for communicating with the Trustlet
* @tci_len: Length of the TCI buffer. Maximum allowed value
* is MC_MAX_TCI_LEN
*
* The Trustlet with the given UUID has to be available in the flash filesystem.
*
* Write MCP open message to buffer and notify MobiCore about the availability
* of a new command.
*
* Waits till the MobiCore responses with the new session ID (stored in the MCP
* buffer).
*
* Note that session.device_id has to be the device id of an opened device.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon socket occur
* MC_DRV_ERR_NQ_FAILED: daemon returns an error
*/
__MC_CLIENT_LIB_API enum mc_result mc_open_session(
struct mc_session_handle *session,
const struct mc_uuid_t *uuid,
u8 *tci,
u32 tci_len);
/**
* mc_open_trustlet() - Open a new session to the provided Trustlet.
* @session: On success, the session data will be returned
* @spid: Service Provider ID (for SP trustlets otherwise ignored)
* @trustlet Memory buffer containing the Trusted Application binary
* @trustlet_len Trusted Application length
* @tci: TCI buffer for communicating with the Trustlet
* @tci_len: Length of the TCI buffer. Maximum allowed value
* is MC_MAX_TCI_LEN
*
* Write MCP open message to buffer and notify MobiCore about the availability
* of a new command.
*
* Waits till the MobiCore responses with the new session ID (stored in the MCP
* buffer).
*
* Note that session.device_id has to be the device id of an opened device.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon socket occur
* MC_DRV_ERR_NQ_FAILED: daemon returns an error
*/
__MC_CLIENT_LIB_API enum mc_result mc_open_trustlet(
struct mc_session_handle *session,
u32 spid,
u8 *trustlet,
u32 trustlet_len,
u8 *tci,
u32 len);
/**
* mc_close_session() - Close a Trustlet session.
* @session: Session to be closed.
*
* Closes the specified MobiCore session. The call will block until the
* session has been closed.
*
* Device device_id has to be opened in advance.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
* MC_DRV_ERR_INVALID_DEVICE_FILE: daemon cannot open Trustlet file
*/
__MC_CLIENT_LIB_API enum mc_result mc_close_session(
struct mc_session_handle *session);
/**
* mc_notify() - Notify a session.
* @session: The session to be notified.
*
* Notifies the session end point about available message data.
* If the session parameter is correct, notify will always succeed.
* Corresponding errors can only be received by mc_wait_notification().
*
* A session has to be opened in advance.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: session parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
*/
__MC_CLIENT_LIB_API enum mc_result mc_notify(
struct mc_session_handle *session);
/**
* mc_wait_notification() - Wait for a notification.
* @session: The session the notification should correspond to.
* @timeout: Time in milliseconds to wait
* (MC_NO_TIMEOUT : direct return, > 0 : milliseconds,
* MC_INFINITE_TIMEOUT : wait infinitely)
*
* Wait for a notification issued by the MobiCore for a specific session.
* The timeout parameter specifies the number of milliseconds the call will wait
* for a notification.
*
* If the caller passes 0 as timeout value the call will immediately return.
* If timeout value is below 0 the call will block until a notification for the
* session has been received.
*
* If timeout is below 0, call will block.
*
* Caller has to trust the other side to send a notification to wake him up
* again.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_ERR_TIMEOUT: no notification arrived in time
* MC_DRV_INFO_NOTIFICATION: a problem with the session was
* encountered. Get more details with
* mc_get_session_error_code()
* MC_DRV_ERR_NOTIFICATION: a problem with the socket occurred
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
*/
__MC_CLIENT_LIB_API enum mc_result mc_wait_notification(
struct mc_session_handle *session,
s32 timeout);
/**
* mc_malloc_wsm() - Allocate a block of world shared memory (WSM).
* @device_id: The ID of an opened device to retrieve the WSM from.
* @align: The alignment (number of pages) of the memory block
* (e.g. 0x00000001 for 4kb).
* @len: Length of the block in bytes.
* @wsm: Virtual address of the world shared memory block.
* @wsm_flags: Platform specific flags describing the memory to
* be allocated.
*
* The MC driver allocates a contiguous block of memory which can be used as
* WSM.
* This implicates that the allocated memory is aligned according to the
* alignment parameter.
*
* Always returns a buffer of size WSM_SIZE aligned to 4K.
*
* Align and wsm_flags are currently ignored
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
* MC_DRV_ERR_NO_FREE_MEMORY: no more contiguous memory is
* available in this size or for this
* process
*/
__MC_CLIENT_LIB_API enum mc_result mc_malloc_wsm(
u32 device_id,
u32 align,
u32 len,
u8 **wsm,
u32 wsm_flags);
/**
* mc_free_wsm() - Free a block of world shared memory (WSM).
* @device_id: The ID to which the given address belongs
* @wsm: Address of WSM block to be freed
*
* The MC driver will free a block of world shared memory (WSM) previously
* allocated with mc_malloc_wsm(). The caller has to assure that the address
* handed over to the driver is a valid WSM address.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: when device id is invalid
* MC_DRV_ERR_FREE_MEMORY_FAILED: on failure
*/
__MC_CLIENT_LIB_API enum mc_result mc_free_wsm(
u32 device_id,
u8 *wsm);
/**
*mc_map() - Map additional bulk buffer between a Trustlet Connector (TLC)
* and the Trustlet (TL) for a session
* @session: Session handle with information of the device_id and
* the session_id. The given buffer is mapped to the
* session specified in the sessionHandle
* @buf: Virtual address of a memory portion (relative to TLC)
* to be shared with the Trustlet, already includes a
* possible offset!
* @len: length of buffer block in bytes.
* @map_info: Information structure about the mapped Bulk buffer
* between the TLC (NWd) and the TL (SWd).
*
* Memory allocated in user space of the TLC can be mapped as additional
* communication channel (besides TCI) to the Trustlet. Limitation of the
* Trustlet memory structure apply: only 6 chunks can be mapped with a maximum
* chunk size of 1 MiB each.
*
* It is up to the application layer (TLC) to inform the Trustlet
* about the additional mapped bulk memory.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
* MC_DRV_ERR_BULK_MAPPING: buf is already uses as bulk buffer or
* when registering the buffer failed
*/
__MC_CLIENT_LIB_API enum mc_result mc_map(
struct mc_session_handle *session,
void *buf,
u32 len,
struct mc_bulk_map *map_info);
/**
* mc_unmap() - Remove additional mapped bulk buffer between Trustlet Connector
* (TLC) and the Trustlet (TL) for a session
* @session: Session handle with information of the device_id and
* the session_id. The given buffer is unmapped from the
* session specified in the sessionHandle.
* @buf: Virtual address of a memory portion (relative to TLC)
* shared with the TL, already includes a possible offset!
* @map_info: Information structure about the mapped Bulk buffer
* between the TLC (NWd) and the TL (SWd)
*
* The bulk buffer will immediately be unmapped from the session context.
*
* The application layer (TLC) must inform the TL about unmapping of the
* additional bulk memory before calling mc_unmap!
*
* The clientlib currently ignores the len field in map_info.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
* MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
* MC_DRV_ERR_BULK_UNMAPPING: buf was not registered earlier
* or when unregistering failed
*/
__MC_CLIENT_LIB_API enum mc_result mc_unmap(
struct mc_session_handle *session,
void *buf,
struct mc_bulk_map *map_info);
/*
* mc_get_session_error_code() - Get additional error information of the last
* error that occurred on a session.
* @session: Session handle with information of the device_id and
* the session_id
* @exit_code: >0 Trustlet has terminated itself with this value,
* <0 Trustlet is dead because of an error within the
* MobiCore (e.g. Kernel exception). See also MCI
* definition.
*
* After the request the stored error code will be deleted.
*
* Return codes:
* MC_DRV_OK: operation completed successfully
* MC_DRV_INVALID_PARAMETER: a parameter is invalid
* MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
* MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
*/
__MC_CLIENT_LIB_API enum mc_result mc_get_session_error_code(
struct mc_session_handle *session,
s32 *exit_code);
#endif /* _MOBICORE_DRIVER_API_H_ */

View file

@ -0,0 +1,232 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/stringify.h>
#include <linux/version.h>
#include "public/mc_linux.h"
#include "main.h"
#include "fastcall.h"
#include "logging.h"
#include "mcp.h"
#include "scheduler.h"
#define SCHEDULING_FREQ 5 /**< N-SIQ every n-th time */
static struct sched_ctx {
struct task_struct *thread;
bool thread_run;
struct completion idle_complete; /* Unblock scheduler thread */
struct completion sleep_complete; /* Wait for sleep status */
struct mutex sleep_mutex; /* Protect sleep request */
struct mutex request_mutex; /* Protect all below */
/* The order of this enum matters */
enum {
NONE, /* No specific request */
YIELD, /* Run the SWd */
NSIQ, /* Schedule the SWd */
SUSPEND, /* Suspend the SWd */
RESUME, /* Resume the SWd */
} request;
bool suspended;
} sched_ctx;
static int mc_scheduler_command(int command)
{
if (IS_ERR_OR_NULL(sched_ctx.thread))
return -EFAULT;
mutex_lock(&sched_ctx.request_mutex);
if (sched_ctx.request < command) {
sched_ctx.request = command;
complete(&sched_ctx.idle_complete);
}
mutex_unlock(&sched_ctx.request_mutex);
return 0;
}
static int mc_scheduler_pm_command(int command)
{
int ret = -EPERM;
if (IS_ERR_OR_NULL(sched_ctx.thread))
return -EFAULT;
mutex_lock(&sched_ctx.sleep_mutex);
/* Send request */
mc_scheduler_command(command);
/* Wait for scheduler to reply */
wait_for_completion(&sched_ctx.sleep_complete);
mutex_lock(&sched_ctx.request_mutex);
if (command == SUSPEND) {
if (sched_ctx.suspended)
ret = 0;
} else {
if (!sched_ctx.suspended)
ret = 0;
}
mutex_unlock(&sched_ctx.request_mutex);
mutex_unlock(&sched_ctx.sleep_mutex);
return ret;
}
static int mc_dev_command(enum mcp_scheduler_commands command)
{
switch (command) {
case MCP_YIELD:
return mc_scheduler_command(YIELD);
case MCP_NSIQ:
return mc_scheduler_command(NSIQ);
}
return -EINVAL;
}
int mc_scheduler_suspend(void)
{
return mc_scheduler_pm_command(SUSPEND);
}
int mc_scheduler_resume(void)
{
return mc_scheduler_pm_command(RESUME);
}
/*
* This thread, and only this thread, schedules the SWd. Hence, reading the idle
* status and its associated timeout is safe from race conditions.
*/
static int tee_scheduler(void *arg)
{
int timeslice = 0; /* Actually scheduling period */
int ret = 0;
while (1) {
s32 timeout_ms = -1;
bool pm_request = false;
if (sched_ctx.suspended || mcp_get_idle_timeout(&timeout_ms)) {
/* If timeout is 0 we keep scheduling the SWd */
if (!timeout_ms)
mc_scheduler_command(NSIQ);
else if (timeout_ms < 0)
wait_for_completion(&sched_ctx.idle_complete);
else if (!wait_for_completion_timeout(
&sched_ctx.idle_complete,
msecs_to_jiffies(timeout_ms)))
/* Timed out, force SWd schedule */
mc_scheduler_command(NSIQ);
}
if (kthread_should_stop() || !sched_ctx.thread_run)
break;
/* Get requested command if any */
mutex_lock(&sched_ctx.request_mutex);
if (sched_ctx.request == YIELD)
/* Yield forced: increment timeslice */
timeslice++;
else if (sched_ctx.request >= NSIQ) {
/* Force N_SIQ, also to suspend/resume SWd */
timeslice = 0;
if (sched_ctx.request == SUSPEND) {
mcp_suspend();
pm_request = true;
} else if (sched_ctx.request == RESUME) {
mcp_resume();
pm_request = true;
}
}
if (g_ctx.f_time)
mcp_update_time();
sched_ctx.request = NONE;
mutex_unlock(&sched_ctx.request_mutex);
/* Reset timeout so we don't loop if SWd halted */
mcp_reset_idle_timeout();
if (timeslice--) {
/* Resume SWd from where it was */
ret = mc_fc_yield();
} else {
timeslice = SCHEDULING_FREQ;
/* Call SWd scheduler */
ret = mc_fc_nsiq();
}
/* Always flush log buffer after the SWd has run */
mc_logging_run();
if (ret)
break;
/* Should have suspended by now if requested */
mutex_lock(&sched_ctx.request_mutex);
if (pm_request) {
sched_ctx.suspended = mcp_suspended();
complete(&sched_ctx.sleep_complete);
}
mutex_unlock(&sched_ctx.request_mutex);
/* Flush pending notifications if possible */
if (mcp_notifications_flush())
complete(&sched_ctx.idle_complete);
}
mc_dev_devel("exit, ret is %d\n", ret);
return ret;
}
int mc_scheduler_start(void)
{
sched_ctx.thread_run = true;
sched_ctx.thread = kthread_run(tee_scheduler, NULL, "tee_scheduler");
if (IS_ERR(sched_ctx.thread)) {
mc_dev_err("tee_scheduler thread creation failed\n");
return PTR_ERR(sched_ctx.thread);
}
mcp_register_scheduler(mc_dev_command);
complete(&sched_ctx.idle_complete);
return 0;
}
void mc_scheduler_stop(void)
{
mcp_register_scheduler(NULL);
sched_ctx.thread_run = false;
complete(&sched_ctx.idle_complete);
kthread_stop(sched_ctx.thread);
}
int mc_scheduler_init(void)
{
init_completion(&sched_ctx.idle_complete);
init_completion(&sched_ctx.sleep_complete);
mutex_init(&sched_ctx.sleep_mutex);
mutex_init(&sched_ctx.request_mutex);
return 0;
}

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MC_SCHEDULER_H__
#define __MC_SCHEDULER_H__
int mc_scheduler_init(void);
static inline void mc_scheduler_exit(void) {}
int mc_scheduler_start(void);
void mc_scheduler_stop(void);
int mc_scheduler_suspend(void);
int mc_scheduler_resume(void);
#endif /* __MC_SCHEDULER_H__ */

View file

@ -0,0 +1,752 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/uidgid.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <crypto/hash.h>
#include <linux/scatterlist.h>
#include <linux/fs.h>
#include "public/mc_linux.h"
#include "public/mc_admin.h"
#include "platform.h" /* MC_UIDGID_OLDSTYLE */
#include "main.h"
#include "mmu.h"
#include "mcp.h"
#include "client.h" /* *cbuf* */
#include "session.h"
#include "mci/mcimcp.h" /* WSM_INVALID */
#define SHA1_HASH_SIZE 20
struct wsm {
/* Buffer NWd addr (uva or kva, used only for lookup) */
uintptr_t va;
/* buffer length */
u32 len;
/* Buffer SWd addr */
u32 sva;
/* mmu L2 table */
struct tee_mmu *mmu;
/* possibly a pointer to a cbuf */
struct cbuf *cbuf;
/* list node */
struct list_head list;
};
/*
* Postponed closing for GP TAs.
* Implemented as a worker because cannot be executed from within isr_worker.
*/
static void session_close_worker(struct work_struct *work)
{
struct mcp_session *mcp_session;
struct tee_session *session;
mcp_session = container_of(work, struct mcp_session, close_work);
session = container_of(mcp_session, struct tee_session, mcp_session);
session_close(session);
}
static struct wsm *wsm_create(struct tee_session *session, uintptr_t va,
u32 len)
{
struct wsm *wsm;
/* Allocate structure */
wsm = kzalloc(sizeof(*wsm), GFP_KERNEL);
if (!wsm)
return ERR_PTR(-ENOMEM);
wsm->mmu = client_mmu_create(session->client, va, len, &wsm->cbuf);
if (IS_ERR(wsm->mmu)) {
int ret = PTR_ERR(wsm->mmu);
kfree(wsm);
return ERR_PTR(ret);
}
/* Increment debug counter */
atomic_inc(&g_ctx.c_wsms);
wsm->va = va;
wsm->len = len;
list_add_tail(&wsm->list, &session->wsms);
mc_dev_devel("created wsm %p: mmu %p cbuf %p va %lx len %u\n",
wsm, wsm->mmu, wsm->cbuf, wsm->va, wsm->len);
return wsm;
}
/*
* Free a WSM object, must be called under the session's wsms_lock
*/
static void wsm_free(struct tee_session *session, struct wsm *wsm)
{
/* Remove wsm from its parent session's list */
list_del(&wsm->list);
/* Free MMU table */
client_mmu_free(session->client, wsm->va, wsm->mmu, wsm->cbuf);
/* Delete wsm object */
mc_dev_devel("freed wsm %p: mmu %p cbuf %p va %lx len %u\n",
wsm, wsm->mmu, wsm->cbuf, wsm->va, wsm->len);
kfree(wsm);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_wsms);
}
static int hash_path_and_data(u8 *hash, const void *data, unsigned int data_len)
{
struct mm_struct *mm = current->mm;
struct hash_desc desc;
struct scatterlist sg;
char *buf;
char *path;
unsigned int path_len;
int ret = 0;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
down_read(&mm->mmap_sem);
if (!mm->exe_file) {
ret = -ENOENT;
goto end;
}
path = d_path(&mm->exe_file->f_path, buf, PAGE_SIZE);
if (IS_ERR(path)) {
ret = PTR_ERR(path);
goto end;
}
mc_dev_devel("current process path =\n");
{
char *c;
for (c = path; *c; c++)
mc_dev_devel("%c %d\n", *c, *c);
}
path_len = strnlen(path, PAGE_SIZE);
mc_dev_devel("path_len = %u\n", path_len);
desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm)) {
ret = PTR_ERR(desc.tfm);
mc_dev_devel("could not alloc hash = %d\n", ret);
goto end;
}
desc.flags = 0;
sg_init_one(&sg, path, path_len);
crypto_hash_init(&desc);
crypto_hash_update(&desc, &sg, path_len);
if (data) {
mc_dev_devel("current process path: hashing additional data\n");
sg_init_one(&sg, data, data_len);
crypto_hash_update(&desc, &sg, data_len);
}
crypto_hash_final(&desc, hash);
crypto_free_hash(desc.tfm);
end:
up_read(&mm->mmap_sem);
free_page((unsigned long)buf);
return ret;
}
static int check_prepare_identity(const struct mc_identity *identity,
struct identity *mcp_identity)
{
struct mc_identity *mcp_id = (struct mc_identity *)mcp_identity;
u8 hash[SHA1_HASH_SIZE];
bool application = false;
const void *data;
unsigned int data_len;
/* Mobicore doesn't support GP client authentication. */
if (!g_ctx.f_client_login &&
(identity->login_type != TEEC_LOGIN_PUBLIC)) {
mc_dev_err("Unsupported login type %d\n", identity->login_type);
return -EINVAL;
}
/* Copy login type */
mcp_identity->login_type = identity->login_type;
/* Fill in uid field */
if ((identity->login_type == TEEC_LOGIN_USER) ||
(identity->login_type == TEEC_LOGIN_USER_APPLICATION)) {
/* Set euid and ruid of the process. */
#if !defined(KUIDT_INIT) || defined(MC_UIDGID_OLDSTYLE)
mcp_id->uid.euid = current_euid();
mcp_id->uid.ruid = current_uid();
#else
mcp_id->uid.euid = current_euid().val;
mcp_id->uid.ruid = current_uid().val;
#endif
}
/* Check gid field */
if ((identity->login_type == TEEC_LOGIN_GROUP) ||
(identity->login_type == TEEC_LOGIN_GROUP_APPLICATION)) {
#if !defined(KUIDT_INIT) || defined(MC_UIDGID_OLDSTYLE)
kgid_t gid = {
.val = identity->gid,
};
#else
kgid_t gid = {
.val = identity->gid,
};
#endif
/* Check if gid is one of: egid of the process, its rgid or one
* of its supplementary groups */
if (!in_egroup_p(gid) && !in_group_p(gid)) {
mc_dev_devel("group %d not allowed\n", identity->gid);
return -EACCES;
}
mc_dev_devel("group %d found\n", identity->gid);
mcp_id->gid = identity->gid;
}
switch (identity->login_type) {
case TEEC_LOGIN_PUBLIC:
case TEEC_LOGIN_USER:
case TEEC_LOGIN_GROUP:
break;
case TEEC_LOGIN_APPLICATION:
application = true;
data = NULL;
data_len = 0;
break;
case TEEC_LOGIN_USER_APPLICATION:
application = true;
data = &mcp_id->uid;
data_len = sizeof(mcp_id->uid);
break;
case TEEC_LOGIN_GROUP_APPLICATION:
application = true;
data = &identity->gid;
data_len = sizeof(identity->gid);
break;
default:
/* Any other login_type value is invalid. */
mc_dev_err("Invalid login type %d\n", identity->login_type);
return -EINVAL;
}
if (application) {
if (hash_path_and_data(hash, data, data_len)) {
mc_dev_devel("error in hash calculation\n");
return -EAGAIN;
}
memcpy(&mcp_id->login_data, hash, sizeof(mcp_id->login_data));
}
return 0;
}
/*
* Create a session object.
* Note: object is not attached to client yet.
*/
struct tee_session *session_create(struct tee_client *client, bool is_gp,
struct mc_identity *identity)
{
struct tee_session *session;
struct identity mcp_identity;
if (is_gp) {
/* Check identity method and data. */
int ret = check_prepare_identity(identity, &mcp_identity);
if (ret)
return ERR_PTR(ret);
}
/* Allocate session object */
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return ERR_PTR(-ENOMEM);
/* Increment debug counter */
atomic_inc(&g_ctx.c_sessions);
mutex_init(&session->close_lock);
/* Initialise object members */
mcp_session_init(&session->mcp_session, is_gp, &mcp_identity);
INIT_WORK(&session->mcp_session.close_work, session_close_worker);
client_get(client);
session->client = client;
kref_init(&session->kref);
INIT_LIST_HEAD(&session->list);
mutex_init(&session->wsms_lock);
INIT_LIST_HEAD(&session->wsms);
mc_dev_devel("created session %p: client %p\n",
session, session->client);
return session;
}
int session_open(struct tee_session *session, const struct tee_object *obj,
const struct tee_mmu *obj_mmu, uintptr_t tci, size_t len)
{
struct mcp_buffer_map map;
tee_mmu_buffer(obj_mmu, &map);
/* Create wsm object for tci */
if (tci && len) {
struct wsm *wsm;
struct mcp_buffer_map tci_map;
int ret = 0;
mutex_lock(&session->wsms_lock);
wsm = wsm_create(session, tci, len);
if (IS_ERR(wsm))
ret = PTR_ERR(wsm);
mutex_unlock(&session->wsms_lock);
if (ret)
return ret;
tee_mmu_buffer(wsm->mmu, &tci_map);
ret = mcp_open_session(&session->mcp_session, obj, &map,
&tci_map);
if (ret) {
mutex_lock(&session->wsms_lock);
wsm_free(session, wsm);
mutex_unlock(&session->wsms_lock);
}
return ret;
}
if (tci || len) {
mc_dev_err("Tci pointer and length are incoherent\n");
return -EINVAL;
}
return mcp_open_session(&session->mcp_session, obj, &map, NULL);
}
/*
* Close TA and unreference session object.
* Object will be freed if reference reaches 0.
* Session object is assumed to have been removed from main list, which means
* that session_close cannot be called anymore.
*/
int session_close(struct tee_session *session)
{
int ret = 0;
mutex_lock(&session->close_lock);
switch (mcp_close_session(&session->mcp_session)) {
case 0:
/* TA is closed, remove from client's closing list */
mutex_lock(&session->client->sessions_lock);
list_del(&session->list);
mutex_unlock(&session->client->sessions_lock);
/* Remove the ref we took on creation, exit if session freed */
if (session_put(session))
return 0;
break;
case -EBUSY:
/*
* (GP) TA needs time to close. The "TA closed" notification
* will trigger a new call to session_close().
* Return OK but do not unref.
*/
break;
default:
mc_dev_err("failed to close session %x in SWd\n",
session->mcp_session.id);
ret = -EPERM;
}
mutex_unlock(&session->close_lock);
return ret;
}
/*
* Free session object and all objects it contains (wsm).
*/
static void session_release(struct kref *kref)
{
struct tee_session *session;
struct wsm *wsm, *next;
/* Remove remaining shared buffers (unmapped in SWd by mcp_close) */
session = container_of(kref, struct tee_session, kref);
list_for_each_entry_safe(wsm, next, &session->wsms, list) {
mc_dev_devel("session %p: free wsm %p\n", session, wsm);
wsm_free(session, wsm);
}
mc_dev_devel("freed session %p: client %p id %x\n",
session, session->client, session->mcp_session.id);
client_put(session->client);
kfree(session);
/* Decrement debug counter */
atomic_dec(&g_ctx.c_sessions);
}
/*
* Unreference session.
* Free session object if reference reaches 0.
*/
int session_put(struct tee_session *session)
{
return kref_put(&session->kref, session_release);
}
/*
* Session is to be removed from NWd records as SWd is dead
*/
int session_kill(struct tee_session *session)
{
mcp_kill_session(&session->mcp_session);
return session_put(session);
}
/*
* Send a notification to TA
*/
int session_notify_swd(struct tee_session *session)
{
if (!session) {
mc_dev_err("Session pointer is null\n");
return -EINVAL;
}
return mcp_notify(&session->mcp_session);
}
/*
* Read and clear last notification received from TA
*/
s32 session_exitcode(struct tee_session *session)
{
return mcp_session_exitcode(&session->mcp_session);
}
static inline int wsm_check(struct tee_session *session,
struct mc_ioctl_buffer *buf)
{
struct wsm *wsm;
list_for_each_entry(wsm, &session->wsms, list) {
if ((buf->va < (wsm->va + wsm->len)) &&
((buf->va + buf->len) > wsm->va)) {
mc_dev_err("buffer %lx overlaps with existing wsm\n",
wsm->va);
return -EADDRINUSE;
}
}
return 0;
}
static inline struct wsm *wsm_find(struct tee_session *session, uintptr_t va)
{
struct wsm *wsm;
list_for_each_entry(wsm, &session->wsms, list)
if (wsm->va == va)
return wsm;
return NULL;
}
static inline int wsm_debug_structs(struct kasnprintf_buf *buf, struct wsm *wsm)
{
ssize_t ret;
ret = kasnprintf(buf, "\t\twsm %p: mmu %p cbuf %p va %lx len %u\n",
wsm, wsm->mmu, wsm->cbuf, wsm->va, wsm->len);
if (ret < 0)
return ret;
if (wsm->mmu) {
ret = tee_mmu_debug_structs(buf, wsm->mmu);
if (ret < 0)
return ret;
}
return 0;
}
/*
* Share buffers with SWd and add corresponding WSM objects to session.
*/
int session_wsms_add(struct tee_session *session,
struct mc_ioctl_buffer *bufs)
{
struct mc_ioctl_buffer *buf;
struct mcp_buffer_map maps[MC_MAP_MAX];
struct mcp_buffer_map *map;
int i, ret = 0;
u32 n_null_buf = 0;
/* Check parameters */
if (!session)
return -ENXIO;
/* Lock the session */
mutex_lock(&session->wsms_lock);
for (i = 0, buf = bufs, map = maps; i < MC_MAP_MAX; i++, buf++, map++) {
if (!buf->va) {
n_null_buf++;
continue;
}
/* Avoid mapping overlaps */
if (wsm_check(session, buf)) {
ret = -EADDRINUSE;
mc_dev_err("maps[%d] va=%llx already map'd\n",
i, buf->va);
goto unlock;
}
}
if (n_null_buf >= MC_MAP_MAX) {
ret = -EINVAL;
mc_dev_err("va=NULL\n");
goto unlock;
}
for (i = 0, buf = bufs, map = maps; i < MC_MAP_MAX; i++, buf++, map++) {
struct wsm *wsm;
if (!buf->va) {
map->type = WSM_INVALID;
continue;
}
wsm = wsm_create(session, buf->va, buf->len);
if (IS_ERR(wsm)) {
ret = PTR_ERR(wsm);
mc_dev_err("maps[%d] va=%llx create failed: %d\n",
i, buf->va, ret);
goto end;
}
tee_mmu_buffer(wsm->mmu, map);
mc_dev_devel("maps[%d] va=%llx: t:%u a:%llx o:%u l:%u\n",
i, buf->va, map->type, map->phys_addr, map->offset,
map->length);
}
/* Map buffers */
if (g_ctx.f_multimap) {
/* Send MCP message to map buffers in SWd */
ret = mcp_multimap(session->mcp_session.id, maps);
if (ret)
mc_dev_err("multimap failed: %d\n", ret);
} else {
/* Map each buffer */
for (i = 0, buf = bufs, map = maps; i < MC_MAP_MAX; i++, buf++,
map++) {
if (!buf->va)
continue;
/* Send MCP message to map buffer in SWd */
ret = mcp_map(session->mcp_session.id, map);
if (ret) {
mc_dev_err("maps[%d] va=%llx map failed: %d\n",
i, buf->va, ret);
break;
}
}
}
end:
for (i = 0, buf = bufs, map = maps; i < MC_MAP_MAX; i++, buf++, map++) {
struct wsm *wsm = wsm_find(session, buf->va);
if (!buf->va)
continue;
if (ret) {
if (!wsm)
break;
/* Destroy mapping */
wsm_free(session, wsm);
} else {
/* Store mapping */
buf->sva = map->secure_va;
wsm->sva = buf->sva;
mc_dev_devel("maps[%d] va=%llx map'd len=%u sva=%llx\n",
i, buf->va, buf->len, buf->sva);
}
}
unlock:
/* Unlock the session */
mutex_unlock(&session->wsms_lock);
return ret;
}
/*
* Stop sharing buffers and delete corrsponding WSM objects.
*/
int session_wsms_remove(struct tee_session *session,
const struct mc_ioctl_buffer *bufs)
{
const struct mc_ioctl_buffer *buf;
struct mcp_buffer_map maps[MC_MAP_MAX];
struct mcp_buffer_map *map;
int i, ret = 0;
u32 n_null_buf = 0;
if (!session) {
mc_dev_err("session pointer is null\n");
return -EINVAL;
}
/* Lock the session */
mutex_lock(&session->wsms_lock);
/* Find, check and map buffer */
for (i = 0, buf = bufs, map = maps; i < MC_MAP_MAX; i++, buf++, map++) {
struct wsm *wsm;
if (!buf->va) {
n_null_buf++;
map->secure_va = 0;
continue;
}
wsm = wsm_find(session, buf->va);
if (!wsm) {
ret = -EADDRNOTAVAIL;
mc_dev_err("maps[%d] va=%llx not found\n", i, buf->va);
goto out;
}
/* Check input params consistency */
if ((wsm->sva != buf->sva) || (wsm->len != buf->len)) {
mc_dev_err("maps[%d] va=%llx no match: %x != %llx\n",
i, buf->va, wsm->sva, buf->sva);
mc_dev_err("maps[%d] va=%llx no match: %u != %u\n",
i, buf->va, wsm->len, buf->len);
ret = -EINVAL;
goto out;
}
tee_mmu_buffer(wsm->mmu, map);
map->secure_va = buf->sva;
mc_dev_devel("maps[%d] va=%llx: t:%u a:%llx o:%u l:%u s:%llx\n",
i, buf->va, map->type, map->phys_addr, map->offset,
map->length, map->secure_va);
}
if (n_null_buf >= MC_MAP_MAX) {
ret = -EINVAL;
mc_dev_err("va=NULL\n");
goto out;
}
if (g_ctx.f_multimap) {
/* Send MCP command to unmap buffers in SWd */
ret = mcp_multiunmap(session->mcp_session.id, maps);
if (ret)
mc_dev_err("mcp_multiunmap failed: %d\n", ret);
} else {
for (i = 0, buf = bufs, map = maps; i < MC_MAP_MAX;
i++, buf++, map++) {
if (!buf->va)
continue;
/* Send MCP command to unmap buffer in SWd */
ret = mcp_unmap(session->mcp_session.id, map);
if (ret) {
mc_dev_err(
"maps[%d] va=%llx unmap failed: %d\n",
i, buf->va, ret);
break;
}
}
}
for (i = 0, buf = bufs; i < MC_MAP_MAX; i++, buf++) {
struct wsm *wsm = wsm_find(session, buf->va);
if (!wsm)
break;
/* Free wsm */
wsm_free(session, wsm);
mc_dev_devel("maps[%d] va=%llx unmap'd len=%u sva=%llx\n",
i, buf->va, buf->len, buf->sva);
}
out:
mutex_unlock(&session->wsms_lock);
return ret;
}
/*
* Sleep until next notification from SWd.
*/
int session_waitnotif(struct tee_session *session, s32 timeout)
{
return mcp_session_waitnotif(&session->mcp_session, timeout);
}
int session_debug_structs(struct kasnprintf_buf *buf,
struct tee_session *session, bool is_closing)
{
struct wsm *wsm;
s32 exit_code;
int ret;
exit_code = mcp_session_exitcode(&session->mcp_session);
ret = kasnprintf(buf, "\tsession %p [%d]: %x %s ec %d%s\n", session,
kref_read(&session->kref), session->mcp_session.id,
session->mcp_session.is_gp ? "GP" : "MC", exit_code,
is_closing ? " <closing>" : "");
if (ret < 0)
return ret;
/* WMSs */
mutex_lock(&session->wsms_lock);
if (list_empty(&session->wsms))
goto done;
list_for_each_entry(wsm, &session->wsms, list) {
ret = wsm_debug_structs(buf, wsm);
if (ret < 0)
goto done;
}
done:
mutex_unlock(&session->wsms_lock);
if (ret < 0)
return ret;
return 0;
}

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _SESSION_H_
#define _SESSION_H_
#include <linux/list.h>
#include "mcp.h"
struct tee_object;
struct tee_mmu;
struct mc_ioctl_buffer;
struct tee_session {
/* Session list lock */
struct mutex close_lock;
/* MCP session descriptor (MUST BE FIRST) */
struct mcp_session mcp_session;
/* Owner */
struct tee_client *client;
/* Number of references kept to this object */
struct kref kref;
/* The list entry to attach to session list of owner */
struct list_head list;
/* Session WSMs lock */
struct mutex wsms_lock;
/* List of WSMs for a session */
struct list_head wsms;
};
struct tee_session *session_create(struct tee_client *client, bool is_gp,
struct mc_identity *identity);
int session_open(struct tee_session *session, const struct tee_object *obj,
const struct tee_mmu *obj_mmu, uintptr_t tci, size_t len);
int session_close(struct tee_session *session);
static inline void session_get(struct tee_session *session)
{
kref_get(&session->kref);
}
int session_put(struct tee_session *session);
int session_kill(struct tee_session *session);
int session_wsms_add(struct tee_session *session,
struct mc_ioctl_buffer *bufs);
int session_wsms_remove(struct tee_session *session,
const struct mc_ioctl_buffer *bufs);
s32 session_exitcode(struct tee_session *session);
int session_notify_swd(struct tee_session *session);
int session_waitnotif(struct tee_session *session, s32 timeout);
int session_debug_structs(struct kasnprintf_buf *buf,
struct tee_session *session, bool is_closing);
#endif /* _SESSION_H_ */

View file

@ -0,0 +1,297 @@
/*
* Copyright (c) 2013-2015 TRUSTONIC LIMITED
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/uaccess.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/fs.h>
#include "public/mc_linux.h"
#include "main.h"
#include "user.h"
#include "client.h"
#include "mcp.h" /* mcp_get_version */
/*
* Get client object from file pointer
*/
static inline struct tee_client *get_client(struct file *file)
{
return (struct tee_client *)file->private_data;
}
/*
* Callback for system open()
* A set of internal client data are created and initialized.
*
* @inode
* @file
* Returns 0 if OK or -ENOMEM if no allocation was possible.
*/
static int user_open(struct inode *inode, struct file *file)
{
struct tee_client *client;
mc_dev_devel("from %s\n", current->comm);
/* Create client */
client = client_create(false);
if (!client)
return -ENOMEM;
/* Store client in user file */
file->private_data = client;
return 0;
}
/*
* Callback for system close()
* The client object is freed.
* @inode
* @file
* Returns 0
*/
static int user_release(struct inode *inode, struct file *file)
{
struct tee_client *client = get_client(file);
mc_dev_devel("from %s\n", current->comm);
if (WARN(!client, "No client data available"))
return -EPROTO;
/* Detach client from user file */
file->private_data = NULL;
/* Destroy client, including remaining sessions */
client_close(client);
return 0;
}
/*
* Check r/w access to referenced memory
*/
static inline int ioctl_check_pointer(unsigned int cmd, int __user *uarg)
{
int err = 0;
if (_IOC_DIR(cmd) & _IOC_READ)
err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
if (err)
return -EFAULT;
return 0;
}
/*
* Callback for system ioctl()
* Implement most of ClientLib API functions
* @file pointer to file
* @cmd command
* @arg arguments
*
* Returns 0 for OK and an errno in case of error
*/
static long user_ioctl(struct file *file, unsigned int id, unsigned long arg)
{
struct tee_client *client = get_client(file);
int __user *uarg = (int __user *)arg;
int ret = -EINVAL;
mc_dev_devel("%u from %s\n", _IOC_NR(id), current->comm);
if (WARN(!client, "No client data available"))
return -EPROTO;
if (ioctl_check_pointer(id, uarg))
return -EFAULT;
switch (id) {
case MC_IO_FREEZE:
/* Freeze the client */
ret = client_freeze(client);
break;
case MC_IO_OPEN_SESSION: {
struct mc_ioctl_open_session session;
if (copy_from_user(&session, uarg, sizeof(session))) {
ret = -EFAULT;
break;
}
ret = client_open_session(client, &session.sid, &session.uuid,
session.tci, session.tcilen,
session.is_gp_uuid,
&session.identity);
if (ret)
break;
if (copy_to_user(uarg, &session, sizeof(session))) {
ret = -EFAULT;
client_remove_session(client, session.sid);
break;
}
break;
}
case MC_IO_OPEN_TRUSTLET: {
struct mc_ioctl_open_trustlet trustlet;
if (copy_from_user(&trustlet, uarg, sizeof(trustlet))) {
ret = -EFAULT;
break;
}
/* Call internal api */
ret = client_open_trustlet(client, &trustlet.sid, trustlet.spid,
trustlet.buffer, trustlet.tlen,
trustlet.tci, trustlet.tcilen);
if (ret)
break;
if (copy_to_user(uarg, &trustlet, sizeof(trustlet))) {
ret = -EFAULT;
client_remove_session(client, trustlet.sid);
break;
}
break;
}
case MC_IO_CLOSE_SESSION: {
u32 sid = (u32)arg;
ret = client_remove_session(client, sid);
break;
}
case MC_IO_NOTIFY: {
u32 sid = (u32)arg;
ret = client_notify_session(client, sid);
break;
}
case MC_IO_WAIT: {
struct mc_ioctl_wait wait;
if (copy_from_user(&wait, uarg, sizeof(wait))) {
ret = -EFAULT;
break;
}
ret = client_waitnotif_session(client, wait.sid, wait.timeout);
break;
}
case MC_IO_MAP: {
struct mc_ioctl_map map;
if (copy_from_user(&map, uarg, sizeof(map))) {
ret = -EFAULT;
break;
}
ret = client_map_session_wsms(client, map.sid, map.bufs);
if (ret)
break;
/* Fill in return struct */
if (copy_to_user(uarg, &map, sizeof(map))) {
ret = -EFAULT;
client_unmap_session_wsms(client, map.sid, map.bufs);
break;
}
break;
}
case MC_IO_UNMAP: {
struct mc_ioctl_map map;
if (copy_from_user(&map, uarg, sizeof(map))) {
ret = -EFAULT;
break;
}
ret = client_unmap_session_wsms(client, map.sid, map.bufs);
break;
}
case MC_IO_ERR: {
struct mc_ioctl_geterr *uerr = (struct mc_ioctl_geterr *)uarg;
u32 sid;
s32 exit_code;
if (get_user(sid, &uerr->sid)) {
ret = -EFAULT;
break;
}
ret = client_get_session_exitcode(client, sid, &exit_code);
if (ret)
break;
/* Fill in return struct */
if (put_user(exit_code, &uerr->value)) {
ret = -EFAULT;
break;
}
break;
}
case MC_IO_VERSION: {
struct mc_version_info version_info;
ret = mcp_get_version(&version_info);
if (ret)
break;
if (copy_to_user(uarg, &version_info, sizeof(version_info)))
ret = -EFAULT;
break;
}
default:
mc_dev_err("unsupported cmd=0x%x\n", id);
ret = -ENOIOCTLCMD;
}
return ret;
}
/*
* Callback for system mmap()
*/
static int user_mmap(struct file *file, struct vm_area_struct *vmarea)
{
struct tee_client *client = get_client(file);
u32 len = (u32)(vmarea->vm_end - vmarea->vm_start);
/* Alloc contiguous buffer for this client */
return client_cbuf_create(client, len, NULL, vmarea);
}
static const struct file_operations mc_user_fops = {
.owner = THIS_MODULE,
.open = user_open,
.release = user_release,
.unlocked_ioctl = user_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = user_ioctl,
#endif
.mmap = user_mmap,
};
int mc_user_init(struct cdev *cdev)
{
cdev_init(cdev, &mc_user_fops);
return 0;
}

Some files were not shown because too many files have changed in this diff Show more