Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

521
lib/Kconfig Normal file
View file

@ -0,0 +1,521 @@
#
# Library configuration
#
config BINARY_PRINTF
def_bool n
menu "Library routines"
config RAID6_PQ
tristate
config BITREVERSE
tristate
config RATIONAL
boolean
config GENERIC_STRNCPY_FROM_USER
bool
config GENERIC_STRNLEN_USER
bool
config GENERIC_NET_UTILS
bool
config GENERIC_FIND_FIRST_BIT
bool
config NO_GENERIC_PCI_IOPORT_MAP
bool
config GENERIC_PCI_IOMAP
bool
config GENERIC_IOMAP
bool
select GENERIC_PCI_IOMAP
config GENERIC_IO
boolean
default n
config STMP_DEVICE
bool
config PERCPU_RWSEM
boolean
config ARCH_USE_CMPXCHG_LOCKREF
bool
config ARCH_HAS_FAST_MULTIPLIER
bool
config CRC_CCITT
tristate "CRC-CCITT functions"
help
This option is provided for the case where no in-kernel-tree
modules require CRC-CCITT functions, but a module built outside
the kernel tree does. Such modules that use library CRC-CCITT
functions require M here.
config CRC16
tristate "CRC16 functions"
help
This option is provided for the case where no in-kernel-tree
modules require CRC16 functions, but a module built outside
the kernel tree does. Such modules that use library CRC16
functions require M here.
config CRC_T10DIF
tristate "CRC calculation for the T10 Data Integrity Field"
select CRYPTO
select CRYPTO_CRCT10DIF
help
This option is only needed if a module that's not in the
kernel tree needs to calculate CRC checks for use with the
SCSI data integrity subsystem.
config CRC_ITU_T
tristate "CRC ITU-T V.41 functions"
help
This option is provided for the case where no in-kernel-tree
modules require CRC ITU-T V.41 functions, but a module built outside
the kernel tree does. Such modules that use library CRC ITU-T V.41
functions require M here.
config CRC32
tristate "CRC32/CRC32c functions"
default y
select BITREVERSE
help
This option is provided for the case where no in-kernel-tree
modules require CRC32/CRC32c functions, but a module built outside
the kernel tree does. Such modules that use library CRC32/CRC32c
functions require M here.
config CRC32_SELFTEST
bool "CRC32 perform self test on init"
default n
depends on CRC32
help
This option enables the CRC32 library functions to perform a
self test on initialization. The self test computes crc32_le
and crc32_be over byte strings with random alignment and length
and computes the total elapsed time and number of bytes processed.
choice
prompt "CRC32 implementation"
depends on CRC32
default CRC32_SLICEBY8
help
This option allows a kernel builder to override the default choice
of CRC32 algorithm. Choose the default ("slice by 8") unless you
know that you need one of the others.
config CRC32_SLICEBY8
bool "Slice by 8 bytes"
help
Calculate checksum 8 bytes at a time with a clever slicing algorithm.
This is the fastest algorithm, but comes with a 8KiB lookup table.
Most modern processors have enough cache to hold this table without
thrashing the cache.
This is the default implementation choice. Choose this one unless
you have a good reason not to.
config CRC32_SLICEBY4
bool "Slice by 4 bytes"
help
Calculate checksum 4 bytes at a time with a clever slicing algorithm.
This is a bit slower than slice by 8, but has a smaller 4KiB lookup
table.
Only choose this option if you know what you are doing.
config CRC32_SARWATE
bool "Sarwate's Algorithm (one byte at a time)"
help
Calculate checksum a byte at a time using Sarwate's algorithm. This
is not particularly fast, but has a small 256 byte lookup table.
Only choose this option if you know what you are doing.
config CRC32_BIT
bool "Classic Algorithm (one bit at a time)"
help
Calculate checksum one bit at a time. This is VERY slow, but has
no lookup table. This is provided as a debugging option.
Only choose this option if you are debugging crc32.
endchoice
config CRC7
tristate "CRC7 functions"
help
This option is provided for the case where no in-kernel-tree
modules require CRC7 functions, but a module built outside
the kernel tree does. Such modules that use library CRC7
functions require M here.
config LIBCRC32C
tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
select CRYPTO
select CRYPTO_CRC32C
help
This option is provided for the case where no in-kernel-tree
modules require CRC32c functions, but a module built outside the
kernel tree does. Such modules that use library CRC32c functions
require M here. See Castagnoli93.
Module will be libcrc32c.
config CRC8
tristate "CRC8 function"
help
This option provides CRC8 function. Drivers may select this
when they need to do cyclic redundancy check according CRC8
algorithm. Module will be called crc8.
config AUDIT_GENERIC
bool
depends on AUDIT && !AUDIT_ARCH
default y
config AUDIT_ARCH_COMPAT_GENERIC
bool
default n
config AUDIT_COMPAT_GENERIC
bool
depends on AUDIT_GENERIC && AUDIT_ARCH_COMPAT_GENERIC && COMPAT
default y
config RANDOM32_SELFTEST
bool "PRNG perform self test on init"
default n
help
This option enables the 32 bit PRNG library functions to perform a
self test on initialization.
#
# compression support is select'ed if needed
#
config ZLIB_INFLATE
tristate
config ZLIB_DEFLATE
tristate
config LZO_COMPRESS
tristate
config LZO_DECOMPRESS
tristate
config LZ4_COMPRESS
tristate
config LZ4HC_COMPRESS
tristate
config LZ4_DECOMPRESS
tristate
source "lib/xz/Kconfig"
#
# These all provide a common interface (hence the apparent duplication with
# ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
#
config DECOMPRESS_GZIP
select ZLIB_INFLATE
tristate
config DECOMPRESS_BZIP2
tristate
config DECOMPRESS_LZMA
tristate
config DECOMPRESS_XZ
select XZ_DEC
tristate
config DECOMPRESS_LZO
select LZO_DECOMPRESS
tristate
config DECOMPRESS_LZ4
select LZ4_DECOMPRESS
tristate
#
# Generic allocator support is selected if needed
#
config GENERIC_ALLOCATOR
boolean
#
# reed solomon support is select'ed if needed
#
config REED_SOLOMON
tristate
config REED_SOLOMON_ENC8
boolean
config REED_SOLOMON_DEC8
boolean
config REED_SOLOMON_ENC16
boolean
config REED_SOLOMON_DEC16
boolean
#
# BCH support is selected if needed
#
config BCH
tristate
config BCH_CONST_PARAMS
boolean
help
Drivers may select this option to force specific constant
values for parameters 'm' (Galois field order) and 't'
(error correction capability). Those specific values must
be set by declaring default values for symbols BCH_CONST_M
and BCH_CONST_T.
Doing so will enable extra compiler optimizations,
improving encoding and decoding performance up to 2x for
usual (m,t) values (typically such that m*t < 200).
When this option is selected, the BCH library supports
only a single (m,t) configuration. This is mainly useful
for NAND flash board drivers requiring known, fixed BCH
parameters.
config BCH_CONST_M
int
range 5 15
help
Constant value for Galois field order 'm'. If 'k' is the
number of data bits to protect, 'm' should be chosen such
that (k + m*t) <= 2**m - 1.
Drivers should declare a default value for this symbol if
they select option BCH_CONST_PARAMS.
config BCH_CONST_T
int
help
Constant value for error correction capability in bits 't'.
Drivers should declare a default value for this symbol if
they select option BCH_CONST_PARAMS.
#
# Textsearch support is select'ed if needed
#
config TEXTSEARCH
boolean
config TEXTSEARCH_KMP
tristate
config TEXTSEARCH_BM
tristate
config TEXTSEARCH_FSM
tristate
config BTREE
boolean
config INTERVAL_TREE
boolean
help
Simple, embeddable, interval-tree. Can find the start of an
overlapping range in log(n) time and then iterate over all
overlapping nodes. The algorithm is implemented as an
augmented rbtree.
See:
Documentation/rbtree.txt
for more information.
config ASSOCIATIVE_ARRAY
bool
help
Generic associative array. Can be searched and iterated over whilst
it is being modified. It is also reasonably quick to search and
modify. The algorithms are non-recursive, and the trees are highly
capacious.
See:
Documentation/assoc_array.txt
for more information.
config HAS_IOMEM
boolean
depends on !NO_IOMEM
select GENERIC_IO
default y
config HAS_IOPORT_MAP
boolean
depends on HAS_IOMEM && !NO_IOPORT_MAP
default y
config HAS_DMA
boolean
depends on !NO_DMA
default y
config CHECK_SIGNATURE
bool
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids
stack overflow.
config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
depends on BROKEN
config CPU_RMAP
bool
depends on SMP
config DQL
bool
config GLOB
bool
# This actually supports modular compilation, but the module overhead
# is ridiculous for the amount of code involved. Until an out-of-tree
# driver asks for it, we'll just link it directly it into the kernel
# when required. Since we're ignoring out-of-tree users, there's also
# no need bother prompting for a manual decision:
# prompt "glob_match() function"
help
This option provides a glob_match function for performing
simple text pattern matching. It originated in the ATA code
to blacklist particular drive models, but other device drivers
may need similar functionality.
All drivers in the Linux kernel tree that require this function
should automatically select this option. Say N unless you
are compiling an out-of tree driver which tells you that it
depends on this.
config GLOB_SELFTEST
bool "glob self-test on init"
default n
depends on GLOB
help
This option enables a simple self-test of the glob_match
function on startup. It is primarily useful for people
working on the code to ensure they haven't introduced any
regressions.
It only adds a little bit of code and slows kernel boot (or
module load) by a small amount, so you're welcome to play with
it, but you probably don't need it.
#
# Netlink attribute parsing support is select'ed if needed
#
config NLATTR
bool
#
# Generic 64-bit atomic support is selected if needed
#
config GENERIC_ATOMIC64
bool
config ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
def_bool y if GENERIC_ATOMIC64
config LRU_CACHE
tristate
config AVERAGE
bool "Averaging functions"
help
This option is provided for the case where no in-kernel-tree
modules require averaging functions, but a module built outside
the kernel tree does. Such modules that use library averaging
functions require Y here.
If unsure, say N.
config CLZ_TAB
bool
config CORDIC
tristate "CORDIC algorithm"
help
This option provides an implementation of the CORDIC algorithm;
calculations are in fixed point. Module will be called cordic.
config DDR
bool "JEDEC DDR data"
help
Data from JEDEC specs for DDR SDRAM memories,
particularly the AC timing parameters and addressing
information. This data is useful for drivers handling
DDR SDRAM controllers.
config MPILIB
tristate
select CLZ_TAB
help
Multiprecision maths library from GnuPG.
It is used to implement RSA digital signature verification,
which is used by IMA/EVM digital signature extension.
config SIGNATURE
tristate
depends on KEYS
select CRYPTO
select CRYPTO_SHA1
select MPILIB
help
Digital signature verification. Currently only RSA is supported.
Implementation is done using GnuPG MPI library
#
# libfdt files, only selected if needed.
#
config LIBFDT
bool
config OID_REGISTRY
tristate
help
Enable fast lookup object identifier registry.
config UCS2_STRING
tristate
source "lib/fonts/Kconfig"
#
# sg chaining option
#
config ARCH_HAS_SG_CHAIN
def_bool n
endmenu

1736
lib/Kconfig.debug Normal file

File diff suppressed because it is too large Load diff

101
lib/Kconfig.kgdb Normal file
View file

@ -0,0 +1,101 @@
config HAVE_ARCH_KGDB
bool
menuconfig KGDB
bool "KGDB: kernel debugger"
depends on HAVE_ARCH_KGDB
depends on DEBUG_KERNEL
help
If you say Y here, it will be possible to remotely debug the
kernel using gdb. It is recommended but not required, that
you also turn on the kernel config option
CONFIG_FRAME_POINTER to aid in producing more reliable stack
backtraces in the external debugger. Documentation of
kernel debugger is available at http://kgdb.sourceforge.net
as well as in DocBook form in Documentation/DocBook/. If
unsure, say N.
if KGDB
config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
select CONSOLE_POLL
select MAGIC_SYSRQ
depends on TTY
default y
help
Share a serial console with kgdb. Sysrq-g must be used
to break in initially.
config KGDB_TESTS
bool "KGDB: internal test suite"
default n
help
This is a kgdb I/O module specifically designed to test
kgdb's internal functions. This kgdb I/O module is
intended to for the development of new kgdb stubs
as well as regression testing the kgdb internals.
See the drivers/misc/kgdbts.c for the details about
the tests. The most basic of this I/O module is to boot
a kernel boot arguments "kgdbwait kgdbts=V1F100"
config KGDB_TESTS_ON_BOOT
bool "KGDB: Run tests on boot"
depends on KGDB_TESTS
default n
help
Run the kgdb tests on boot up automatically without the need
to pass in a kernel parameter
config KGDB_TESTS_BOOT_STRING
string "KGDB: which internal kgdb tests to run"
depends on KGDB_TESTS_ON_BOOT
default "V1F100"
help
This is the command string to send the kgdb test suite on
boot. See the drivers/misc/kgdbts.c for detailed
information about other strings you could use beyond the
default of V1F100.
config KGDB_LOW_LEVEL_TRAP
bool "KGDB: Allow debugging with traps in notifiers"
depends on X86 || MIPS
default n
help
This will add an extra call back to kgdb for the breakpoint
exception handler which will allow kgdb to step through a
notify handler.
config KGDB_KDB
bool "KGDB_KDB: include kdb frontend for kgdb"
default n
help
KDB frontend for kernel
config KDB_KEYBOARD
bool "KGDB_KDB: keyboard as input device"
depends on VT && KGDB_KDB
default n
help
KDB can use a PS/2 type keyboard for an input device
config KDB_CONTINUE_CATASTROPHIC
int "KDB: continue after catastrophic errors"
depends on KGDB_KDB
default "0"
help
This integer controls the behaviour of kdb when the kernel gets a
catastrophic error, i.e. for a panic or oops.
When KDB is active and a catastrophic error occurs, nothing extra
will happen until you type 'go'.
CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time
you type 'go', you will be warned by kdb. The secend time you type
'go', KDB tries to continue. No guarantees that the
kernel is still usable in this situation.
CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue.
No guarantees that the kernel is still usable in this situation.
CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot.
If you are not sure, say 0.
endif # KGDB

94
lib/Kconfig.kmemcheck Normal file
View file

@ -0,0 +1,94 @@
config HAVE_ARCH_KMEMCHECK
bool
if HAVE_ARCH_KMEMCHECK
menuconfig KMEMCHECK
bool "kmemcheck: trap use of uninitialized memory"
depends on DEBUG_KERNEL
depends on !X86_USE_3DNOW
depends on SLUB || SLAB
depends on !CC_OPTIMIZE_FOR_SIZE
depends on !FUNCTION_TRACER
select FRAME_POINTER
select STACKTRACE
default n
help
This option enables tracing of dynamically allocated kernel memory
to see if memory is used before it has been given an initial value.
Be aware that this requires half of your memory for bookkeeping and
will insert extra code at *every* read and write to tracked memory
thus slow down the kernel code (but user code is unaffected).
The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
or enable kmemcheck at boot-time. If the kernel is started with
kmemcheck=0, the large memory and CPU overhead is not incurred.
choice
prompt "kmemcheck: default mode at boot"
depends on KMEMCHECK
default KMEMCHECK_ONESHOT_BY_DEFAULT
help
This option controls the default behaviour of kmemcheck when the
kernel boots and no kmemcheck= parameter is given.
config KMEMCHECK_DISABLED_BY_DEFAULT
bool "disabled"
depends on KMEMCHECK
config KMEMCHECK_ENABLED_BY_DEFAULT
bool "enabled"
depends on KMEMCHECK
config KMEMCHECK_ONESHOT_BY_DEFAULT
bool "one-shot"
depends on KMEMCHECK
help
In one-shot mode, only the first error detected is reported before
kmemcheck is disabled.
endchoice
config KMEMCHECK_QUEUE_SIZE
int "kmemcheck: error queue size"
depends on KMEMCHECK
default 64
help
Select the maximum number of errors to store in the queue. Since
errors can occur virtually anywhere and in any context, we need a
temporary storage area which is guarantueed not to generate any
other faults. The queue will be emptied as soon as a tasklet may
be scheduled. If the queue is full, new error reports will be
lost.
config KMEMCHECK_SHADOW_COPY_SHIFT
int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
depends on KMEMCHECK
range 2 8
default 5
help
Select the number of shadow bytes to save along with each entry of
the queue. These bytes indicate what parts of an allocation are
initialized, uninitialized, etc. and will be displayed when an
error is detected to help the debugging of a particular problem.
config KMEMCHECK_PARTIAL_OK
bool "kmemcheck: allow partially uninitialized memory"
depends on KMEMCHECK
default y
help
This option works around certain GCC optimizations that produce
32-bit reads from 16-bit variables where the upper 16 bits are
thrown away afterwards. This may of course also hide some real
bugs.
config KMEMCHECK_BITOPS_OK
bool "kmemcheck: allow bit-field manipulation"
depends on KMEMCHECK
default n
help
This option silences warnings that would be generated for bit-field
accesses where not all the bits are initialized at the same time.
This may also hide some real bugs.
endif

198
lib/Makefile Normal file
View file

@ -0,0 +1,198 @@
#
# Makefile for some libs needed in the kernel.
#
ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o \
sha1.o md5.o irq_regs.o argv_split.o \
proportions.o flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o
obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o klist.o
obj-y += lockref.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
CFLAGS_kobject_uevent.o += -DDEBUG
endif
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
GCOV_PROFILE_hweight.o := n
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
endif
obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_BCH) += bch.o
obj-$(CONFIG_LZO_COMPRESS) += lzo/
obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_LZ4_COMPRESS) += lz4/
obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/
obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
obj-$(CONFIG_XZ_DEC) += xz/
obj-$(CONFIG_RAID6_PQ) += raid6/
lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o
lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o
obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
of-reconfig-notifier-error-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
obj-$(CONFIG_NLATTR) += nlattr.o
obj-$(CONFIG_LRU_CACHE) += lru_cache.o
obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
obj-$(CONFIG_GENERIC_CSUM) += checksum.o
obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
obj-$(CONFIG_AVERAGE) += average.o
obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
obj-$(CONFIG_MPILIB) += mpi/
obj-$(CONFIG_SIGNATURE) += digsig.o
obj-$(CONFIG_CLZ_TAB) += clz_tab.o
obj-$(CONFIG_DDR) += jedec_ddr_data.o
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
fdt_empty_tree.o
$(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
obj-$(CONFIG_ASN1) += asn1_decoder.o
obj-$(CONFIG_FONT_SUPPORT) += fonts/
hostprogs-y := gen_crc32table
clean-files := crc32table.h
$(obj)/crc32.o: $(obj)/crc32table.h
quiet_cmd_crc32 = GEN $@
cmd_crc32 = $< > $@
$(obj)/crc32table.h: $(obj)/gen_crc32table
$(call cmd,crc32)
#
# Build a fast OID lookip registry from include/linux/oid_registry.h
#
obj-$(CONFIG_OID_REGISTRY) += oid_registry.o
$(obj)/oid_registry.o: $(obj)/oid_registry_data.c
$(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \
$(src)/build_OID_registry
$(call cmd,build_OID_registry)
quiet_cmd_build_OID_registry = GEN $@
cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
clean-files += oid_registry_data.c
obj-$(CONFIG_UCS2_STRING) += ucs2_string.o

94
lib/argv_split.c Normal file
View file

@ -0,0 +1,94 @@
/*
* Helper function for splitting a string into an argv-like array.
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/export.h>
static int count_argc(const char *str)
{
int count = 0;
bool was_space;
for (was_space = true; *str; str++) {
if (isspace(*str)) {
was_space = true;
} else if (was_space) {
was_space = false;
count++;
}
}
return count;
}
/**
* argv_free - free an argv
* @argv - the argument vector to be freed
*
* Frees an argv and the strings it points to.
*/
void argv_free(char **argv)
{
argv--;
kfree(argv[0]);
kfree(argv);
}
EXPORT_SYMBOL(argv_free);
/**
* argv_split - split a string at whitespace, returning an argv
* @gfp: the GFP mask used to allocate memory
* @str: the string to be split
* @argcp: returned argument count
*
* Returns an array of pointers to strings which are split out from
* @str. This is performed by strictly splitting on white-space; no
* quote processing is performed. Multiple whitespace characters are
* considered to be a single argument separator. The returned array
* is always NULL-terminated. Returns NULL on memory allocation
* failure.
*
* The source string at `str' may be undergoing concurrent alteration via
* userspace sysctl activity (at least). The argv_split() implementation
* attempts to handle this gracefully by taking a local copy to work on.
*/
char **argv_split(gfp_t gfp, const char *str, int *argcp)
{
char *argv_str;
bool was_space;
char **argv, **argv_ret;
int argc;
argv_str = kstrndup(str, KMALLOC_MAX_SIZE - 1, gfp);
if (!argv_str)
return NULL;
argc = count_argc(argv_str);
argv = kmalloc(sizeof(*argv) * (argc + 2), gfp);
if (!argv) {
kfree(argv_str);
return NULL;
}
*argv = argv_str;
argv_ret = ++argv;
for (was_space = true; *argv_str; argv_str++) {
if (isspace(*argv_str)) {
was_space = true;
*argv_str = 0;
} else if (was_space) {
was_space = false;
*argv++ = argv_str;
}
}
*argv = NULL;
if (argcp)
*argcp = argc;
return argv_ret;
}
EXPORT_SYMBOL(argv_split);

489
lib/asn1_decoder.c Normal file
View file

@ -0,0 +1,489 @@
/* Decoder for ASN.1 BER/DER/CER encoded bytestream
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/asn1_decoder.h>
#include <linux/asn1_ber_bytecode.h>
static const unsigned char asn1_op_lengths[ASN1_OP__NR] = {
/* OPC TAG JMP ACT */
[ASN1_OP_MATCH] = 1 + 1,
[ASN1_OP_MATCH_OR_SKIP] = 1 + 1,
[ASN1_OP_MATCH_ACT] = 1 + 1 + 1,
[ASN1_OP_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_MATCH_JUMP] = 1 + 1 + 1,
[ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_MATCH_ANY] = 1,
[ASN1_OP_MATCH_ANY_ACT] = 1 + 1,
[ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_COND_MATCH_ANY] = 1,
[ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1,
[ASN1_OP_COND_FAIL] = 1,
[ASN1_OP_COMPLETE] = 1,
[ASN1_OP_ACT] = 1 + 1,
[ASN1_OP_RETURN] = 1,
[ASN1_OP_END_SEQ] = 1,
[ASN1_OP_END_SEQ_OF] = 1 + 1,
[ASN1_OP_END_SET] = 1,
[ASN1_OP_END_SET_OF] = 1 + 1,
[ASN1_OP_END_SEQ_ACT] = 1 + 1,
[ASN1_OP_END_SEQ_OF_ACT] = 1 + 1 + 1,
[ASN1_OP_END_SET_ACT] = 1 + 1,
[ASN1_OP_END_SET_OF_ACT] = 1 + 1 + 1,
};
/*
* Find the length of an indefinite length object
* @data: The data buffer
* @datalen: The end of the innermost containing element in the buffer
* @_dp: The data parse cursor (updated before returning)
* @_len: Where to return the size of the element.
* @_errmsg: Where to return a pointer to an error message on error
*/
static int asn1_find_indefinite_length(const unsigned char *data, size_t datalen,
size_t *_dp, size_t *_len,
const char **_errmsg)
{
unsigned char tag, tmp;
size_t dp = *_dp, len, n;
int indef_level = 1;
next_tag:
if (unlikely(datalen - dp < 2)) {
if (datalen == dp)
goto missing_eoc;
goto data_overrun_error;
}
/* Extract a tag from the data */
tag = data[dp++];
if (tag == ASN1_EOC) {
/* It appears to be an EOC. */
if (data[dp++] != 0)
goto invalid_eoc;
if (--indef_level <= 0) {
*_len = dp - *_dp;
*_dp = dp;
return 0;
}
goto next_tag;
}
if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) {
do {
if (unlikely(datalen - dp < 2))
goto data_overrun_error;
tmp = data[dp++];
} while (tmp & 0x80);
}
/* Extract the length */
len = data[dp++];
if (len <= 0x7f)
goto check_length;
if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
/* Indefinite length */
if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5))
goto indefinite_len_primitive;
indef_level++;
goto next_tag;
}
n = len - 0x80;
if (unlikely(n > sizeof(len) - 1))
goto length_too_long;
if (unlikely(n > datalen - dp))
goto data_overrun_error;
len = 0;
for (; n > 0; n--) {
len <<= 8;
len |= data[dp++];
}
check_length:
if (len > datalen - dp)
goto data_overrun_error;
dp += len;
goto next_tag;
length_too_long:
*_errmsg = "Unsupported length";
goto error;
indefinite_len_primitive:
*_errmsg = "Indefinite len primitive not permitted";
goto error;
invalid_eoc:
*_errmsg = "Invalid length EOC";
goto error;
data_overrun_error:
*_errmsg = "Data overrun error";
goto error;
missing_eoc:
*_errmsg = "Missing EOC in indefinite len cons";
error:
*_dp = dp;
return -1;
}
/**
* asn1_ber_decoder - Decoder BER/DER/CER ASN.1 according to pattern
* @decoder: The decoder definition (produced by asn1_compiler)
* @context: The caller's context (to be passed to the action functions)
* @data: The encoded data
* @datalen: The size of the encoded data
*
* Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern
* produced by asn1_compiler. Action functions are called on marked tags to
* allow the caller to retrieve significant data.
*
* LIMITATIONS:
*
* To keep down the amount of stack used by this function, the following limits
* have been imposed:
*
* (1) This won't handle datalen > 65535 without increasing the size of the
* cons stack elements and length_too_long checking.
*
* (2) The stack of constructed types is 10 deep. If the depth of non-leaf
* constructed types exceeds this, the decode will fail.
*
* (3) The SET type (not the SET OF type) isn't really supported as tracking
* what members of the set have been seen is a pain.
*/
int asn1_ber_decoder(const struct asn1_decoder *decoder,
void *context,
const unsigned char *data,
size_t datalen)
{
const unsigned char *machine = decoder->machine;
const asn1_action_t *actions = decoder->actions;
size_t machlen = decoder->machlen;
enum asn1_opcode op;
unsigned char tag = 0, csp = 0, jsp = 0, optag = 0, hdr = 0;
const char *errmsg;
size_t pc = 0, dp = 0, tdp = 0, len = 0;
int ret;
unsigned char flags = 0;
#define FLAG_INDEFINITE_LENGTH 0x01
#define FLAG_MATCHED 0x02
#define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag
* - ie. whether or not we are going to parse
* a compound type.
*/
#define NR_CONS_STACK 10
unsigned short cons_dp_stack[NR_CONS_STACK];
unsigned short cons_datalen_stack[NR_CONS_STACK];
unsigned char cons_hdrlen_stack[NR_CONS_STACK];
#define NR_JUMP_STACK 10
unsigned char jump_stack[NR_JUMP_STACK];
if (datalen > 65535)
return -EMSGSIZE;
next_op:
pr_debug("next_op: pc=\e[32m%zu\e[m/%zu dp=\e[33m%zu\e[m/%zu C=%d J=%d\n",
pc, machlen, dp, datalen, csp, jsp);
if (unlikely(pc >= machlen))
goto machine_overrun_error;
op = machine[pc];
if (unlikely(pc + asn1_op_lengths[op] > machlen))
goto machine_overrun_error;
/* If this command is meant to match a tag, then do that before
* evaluating the command.
*/
if (op <= ASN1_OP__MATCHES_TAG) {
unsigned char tmp;
/* Skip conditional matches if possible */
if ((op & ASN1_OP_MATCH__COND &&
flags & FLAG_MATCHED) ||
dp == datalen) {
pc += asn1_op_lengths[op];
goto next_op;
}
flags = 0;
hdr = 2;
/* Extract a tag from the data */
if (unlikely(dp >= datalen - 1))
goto data_overrun_error;
tag = data[dp++];
if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
goto long_tag_not_supported;
if (op & ASN1_OP_MATCH__ANY) {
pr_debug("- any %02x\n", tag);
} else {
/* Extract the tag from the machine
* - Either CONS or PRIM are permitted in the data if
* CONS is not set in the op stream, otherwise CONS
* is mandatory.
*/
optag = machine[pc + 1];
flags |= optag & FLAG_CONS;
/* Determine whether the tag matched */
tmp = optag ^ tag;
tmp &= ~(optag & ASN1_CONS_BIT);
pr_debug("- match? %02x %02x %02x\n", tag, optag, tmp);
if (tmp != 0) {
/* All odd-numbered tags are MATCH_OR_SKIP. */
if (op & ASN1_OP_MATCH__SKIP) {
pc += asn1_op_lengths[op];
dp--;
goto next_op;
}
goto tag_mismatch;
}
}
flags |= FLAG_MATCHED;
len = data[dp++];
if (len > 0x7f) {
if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
/* Indefinite length */
if (unlikely(!(tag & ASN1_CONS_BIT)))
goto indefinite_len_primitive;
flags |= FLAG_INDEFINITE_LENGTH;
if (unlikely(2 > datalen - dp))
goto data_overrun_error;
} else {
int n = len - 0x80;
if (unlikely(n > 2))
goto length_too_long;
if (unlikely(dp >= datalen - n))
goto data_overrun_error;
hdr += n;
for (len = 0; n > 0; n--) {
len <<= 8;
len |= data[dp++];
}
if (unlikely(len > datalen - dp))
goto data_overrun_error;
}
}
if (flags & FLAG_CONS) {
/* For expected compound forms, we stack the positions
* of the start and end of the data.
*/
if (unlikely(csp >= NR_CONS_STACK))
goto cons_stack_overflow;
cons_dp_stack[csp] = dp;
cons_hdrlen_stack[csp] = hdr;
if (!(flags & FLAG_INDEFINITE_LENGTH)) {
cons_datalen_stack[csp] = datalen;
datalen = dp + len;
} else {
cons_datalen_stack[csp] = 0;
}
csp++;
}
pr_debug("- TAG: %02x %zu%s\n",
tag, len, flags & FLAG_CONS ? " CONS" : "");
tdp = dp;
}
/* Decide how to handle the operation */
switch (op) {
case ASN1_OP_MATCH_ANY_ACT:
case ASN1_OP_COND_MATCH_ANY_ACT:
ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
if (ret < 0)
return ret;
goto skip_data;
case ASN1_OP_MATCH_ACT:
case ASN1_OP_MATCH_ACT_OR_SKIP:
case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
if (ret < 0)
return ret;
goto skip_data;
case ASN1_OP_MATCH:
case ASN1_OP_MATCH_OR_SKIP:
case ASN1_OP_MATCH_ANY:
case ASN1_OP_COND_MATCH_OR_SKIP:
case ASN1_OP_COND_MATCH_ANY:
skip_data:
if (!(flags & FLAG_CONS)) {
if (flags & FLAG_INDEFINITE_LENGTH) {
ret = asn1_find_indefinite_length(
data, datalen, &dp, &len, &errmsg);
if (ret < 0)
goto error;
} else {
dp += len;
}
pr_debug("- LEAF: %zu\n", len);
}
pc += asn1_op_lengths[op];
goto next_op;
case ASN1_OP_MATCH_JUMP:
case ASN1_OP_MATCH_JUMP_OR_SKIP:
case ASN1_OP_COND_MATCH_JUMP_OR_SKIP:
pr_debug("- MATCH_JUMP\n");
if (unlikely(jsp == NR_JUMP_STACK))
goto jump_stack_overflow;
jump_stack[jsp++] = pc + asn1_op_lengths[op];
pc = machine[pc + 2];
goto next_op;
case ASN1_OP_COND_FAIL:
if (unlikely(!(flags & FLAG_MATCHED)))
goto tag_mismatch;
pc += asn1_op_lengths[op];
goto next_op;
case ASN1_OP_COMPLETE:
if (unlikely(jsp != 0 || csp != 0)) {
pr_err("ASN.1 decoder error: Stacks not empty at completion (%u, %u)\n",
jsp, csp);
return -EBADMSG;
}
return 0;
case ASN1_OP_END_SET:
case ASN1_OP_END_SET_ACT:
if (unlikely(!(flags & FLAG_MATCHED)))
goto tag_mismatch;
case ASN1_OP_END_SEQ:
case ASN1_OP_END_SET_OF:
case ASN1_OP_END_SEQ_OF:
case ASN1_OP_END_SEQ_ACT:
case ASN1_OP_END_SET_OF_ACT:
case ASN1_OP_END_SEQ_OF_ACT:
if (unlikely(csp <= 0))
goto cons_stack_underflow;
csp--;
tdp = cons_dp_stack[csp];
hdr = cons_hdrlen_stack[csp];
len = datalen;
datalen = cons_datalen_stack[csp];
pr_debug("- end cons t=%zu dp=%zu l=%zu/%zu\n",
tdp, dp, len, datalen);
if (datalen == 0) {
/* Indefinite length - check for the EOC. */
datalen = len;
if (unlikely(datalen - dp < 2))
goto data_overrun_error;
if (data[dp++] != 0) {
if (op & ASN1_OP_END__OF) {
dp--;
csp++;
pc = machine[pc + 1];
pr_debug("- continue\n");
goto next_op;
}
goto missing_eoc;
}
if (data[dp++] != 0)
goto invalid_eoc;
len = dp - tdp - 2;
} else {
if (dp < len && (op & ASN1_OP_END__OF)) {
datalen = len;
csp++;
pc = machine[pc + 1];
pr_debug("- continue\n");
goto next_op;
}
if (dp != len)
goto cons_length_error;
len -= tdp;
pr_debug("- cons len l=%zu d=%zu\n", len, dp - tdp);
}
if (op & ASN1_OP_END__ACT) {
unsigned char act;
if (op & ASN1_OP_END__OF)
act = machine[pc + 2];
else
act = machine[pc + 1];
ret = actions[act](context, hdr, 0, data + tdp, len);
}
pc += asn1_op_lengths[op];
goto next_op;
case ASN1_OP_ACT:
ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
pc += asn1_op_lengths[op];
goto next_op;
case ASN1_OP_RETURN:
if (unlikely(jsp <= 0))
goto jump_stack_underflow;
pc = jump_stack[--jsp];
goto next_op;
default:
break;
}
/* Shouldn't reach here */
pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op);
return -EBADMSG;
data_overrun_error:
errmsg = "Data overrun error";
goto error;
machine_overrun_error:
errmsg = "Machine overrun error";
goto error;
jump_stack_underflow:
errmsg = "Jump stack underflow";
goto error;
jump_stack_overflow:
errmsg = "Jump stack overflow";
goto error;
cons_stack_underflow:
errmsg = "Cons stack underflow";
goto error;
cons_stack_overflow:
errmsg = "Cons stack overflow";
goto error;
cons_length_error:
errmsg = "Cons length error";
goto error;
missing_eoc:
errmsg = "Missing EOC in indefinite len cons";
goto error;
invalid_eoc:
errmsg = "Invalid length EOC";
goto error;
length_too_long:
errmsg = "Unsupported length";
goto error;
indefinite_len_primitive:
errmsg = "Indefinite len primitive not permitted";
goto error;
tag_mismatch:
errmsg = "Unexpected tag";
goto error;
long_tag_not_supported:
errmsg = "Long tag not supported";
error:
pr_debug("\nASN1: %s [m=%zu d=%zu ot=%02x t=%02x l=%zu]\n",
errmsg, pc, dp, optag, tag, len);
return -EBADMSG;
}
EXPORT_SYMBOL_GPL(asn1_ber_decoder);

1748
lib/assoc_array.c Normal file

File diff suppressed because it is too large Load diff

168
lib/atomic64.c Normal file
View file

@ -0,0 +1,168 @@
/*
* Generic implementation of 64-bit atomics using spinlocks,
* useful on processors that don't have 64-bit atomic instructions.
*
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/atomic.h>
/*
* We use a hashed array of spinlocks to provide exclusive access
* to each atomic64_t variable. Since this is expected to used on
* systems with small numbers of CPUs (<= 4 or so), we use a
* relatively small array of 16 spinlocks to avoid wasting too much
* memory on the spinlock array.
*/
#define NR_LOCKS 16
/*
* Ensure each lock is in a separate cacheline.
*/
static union {
raw_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
.lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
},
};
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
addr >>= L1_CACHE_SHIFT;
addr ^= (addr >> 8) ^ (addr >> 16);
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
long long atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_read);
void atomic64_set(atomic64_t *v, long long i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags);
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_set);
#define ATOMIC64_OP(op, c_op) \
void atomic64_##op(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
\
raw_spin_lock_irqsave(lock, flags); \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
EXPORT_SYMBOL(atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
long long atomic64_##op##_return(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
long long val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = (v->counter c_op a); \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op)
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
long long atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_dec_if_positive);
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val == o)
v->counter = n;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_cmpxchg);
long long atomic64_xchg(atomic64_t *v, long long new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
v->counter = new;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_xchg);
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
int ret = 0;
raw_spin_lock_irqsave(lock, flags);
if (v->counter != u) {
v->counter += a;
ret = 1;
}
raw_spin_unlock_irqrestore(lock, flags);
return ret;
}
EXPORT_SYMBOL(atomic64_add_unless);

169
lib/atomic64_test.c Normal file
View file

@ -0,0 +1,169 @@
/*
* Testsuite for atomic64_t functions
*
* Copyright © 2010 Luca Barbieri
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/atomic.h>
#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
static __init int test_atomic64(void)
{
long long v0 = 0xaaa31337c001d00dLL;
long long v1 = 0xdeadbeefdeafcafeLL;
long long v2 = 0xfaceabadf00df001LL;
long long onestwos = 0x1111111122222222LL;
long long one = 1LL;
atomic64_t v = ATOMIC64_INIT(v0);
long long r = v0;
BUG_ON(v.counter != r);
atomic64_set(&v, v1);
r = v1;
BUG_ON(v.counter != r);
BUG_ON(atomic64_read(&v) != r);
INIT(v0);
atomic64_add(onestwos, &v);
r += onestwos;
BUG_ON(v.counter != r);
INIT(v0);
atomic64_add(-one, &v);
r += -one;
BUG_ON(v.counter != r);
INIT(v0);
r += onestwos;
BUG_ON(atomic64_add_return(onestwos, &v) != r);
BUG_ON(v.counter != r);
INIT(v0);
r += -one;
BUG_ON(atomic64_add_return(-one, &v) != r);
BUG_ON(v.counter != r);
INIT(v0);
atomic64_sub(onestwos, &v);
r -= onestwos;
BUG_ON(v.counter != r);
INIT(v0);
atomic64_sub(-one, &v);
r -= -one;
BUG_ON(v.counter != r);
INIT(v0);
r -= onestwos;
BUG_ON(atomic64_sub_return(onestwos, &v) != r);
BUG_ON(v.counter != r);
INIT(v0);
r -= -one;
BUG_ON(atomic64_sub_return(-one, &v) != r);
BUG_ON(v.counter != r);
INIT(v0);
atomic64_inc(&v);
r += one;
BUG_ON(v.counter != r);
INIT(v0);
r += one;
BUG_ON(atomic64_inc_return(&v) != r);
BUG_ON(v.counter != r);
INIT(v0);
atomic64_dec(&v);
r -= one;
BUG_ON(v.counter != r);
INIT(v0);
r -= one;
BUG_ON(atomic64_dec_return(&v) != r);
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(atomic64_xchg(&v, v1) != v0);
r = v1;
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
r = v1;
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(atomic64_add_unless(&v, one, v0));
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(!atomic64_add_unless(&v, one, v1));
r += one;
BUG_ON(v.counter != r);
#ifdef CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
INIT(onestwos);
BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
r -= one;
BUG_ON(v.counter != r);
INIT(0);
BUG_ON(atomic64_dec_if_positive(&v) != -one);
BUG_ON(v.counter != r);
INIT(-one);
BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
BUG_ON(v.counter != r);
#else
#warning Please implement atomic64_dec_if_positive for your architecture and select the above Kconfig symbol
#endif
INIT(onestwos);
BUG_ON(!atomic64_inc_not_zero(&v));
r += one;
BUG_ON(v.counter != r);
INIT(0);
BUG_ON(atomic64_inc_not_zero(&v));
BUG_ON(v.counter != r);
INIT(-one);
BUG_ON(!atomic64_inc_not_zero(&v));
r += one;
BUG_ON(v.counter != r);
#ifdef CONFIG_X86
pr_info("passed for %s platform %s CX8 and %s SSE\n",
#ifdef CONFIG_X86_64
"x86-64",
#elif defined(CONFIG_X86_CMPXCHG64)
"i586+",
#else
"i386+",
#endif
boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
#else
pr_info("passed\n");
#endif
return 0;
}
core_initcall(test_atomic64);

81
lib/audit.c Normal file
View file

@ -0,0 +1,81 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
if (audit_is_compat(arch))
return 1;
else
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
if (audit_is_compat(abi))
return audit_classify_compat_syscall(abi, syscall);
switch(syscall) {
#ifdef __NR_open
case __NR_open:
return 2;
#endif
#ifdef __NR_openat
case __NR_openat:
return 3;
#endif
#ifdef __NR_socketcall
case __NR_socketcall:
return 4;
#endif
case __NR_execve:
return 5;
default:
return 0;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
audit_register_class(AUDIT_CLASS_WRITE_32, compat_write_class);
audit_register_class(AUDIT_CLASS_READ_32, compat_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, compat_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, compat_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, compat_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);

64
lib/average.c Normal file
View file

@ -0,0 +1,64 @@
/*
* lib/average.c
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/export.h>
#include <linux/average.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/log2.h>
/**
* DOC: Exponentially Weighted Moving Average (EWMA)
*
* These are generic functions for calculating Exponentially Weighted Moving
* Averages (EWMA). We keep a structure with the EWMA parameters and a scaled
* up internal representation of the average value to prevent rounding errors.
* The factor for scaling up and the exponential weight (or decay rate) have to
* be specified thru the init fuction. The structure should not be accessed
* directly but only thru the helper functions.
*/
/**
* ewma_init() - Initialize EWMA parameters
* @avg: Average structure
* @factor: Factor to use for the scaled up internal value. The maximum value
* of averages can be ULONG_MAX/(factor*weight). For performance reasons
* factor has to be a power of 2.
* @weight: Exponential weight, or decay rate. This defines how fast the
* influence of older values decreases. For performance reasons weight has
* to be a power of 2.
*
* Initialize the EWMA parameters for a given struct ewma @avg.
*/
void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight)
{
WARN_ON(!is_power_of_2(weight) || !is_power_of_2(factor));
avg->weight = ilog2(weight);
avg->factor = ilog2(factor);
avg->internal = 0;
}
EXPORT_SYMBOL(ewma_init);
/**
* ewma_add() - Exponentially weighted moving average (EWMA)
* @avg: Average structure
* @val: Current value
*
* Add a sample to the average.
*/
struct ewma *ewma_add(struct ewma *avg, unsigned long val)
{
unsigned long internal = ACCESS_ONCE(avg->internal);
ACCESS_ONCE(avg->internal) = internal ?
(((internal << avg->weight) - internal) +
(val << avg->factor)) >> avg->weight :
(val << avg->factor);
return avg;
}
EXPORT_SYMBOL(ewma_add);

14
lib/bcd.c Normal file
View file

@ -0,0 +1,14 @@
#include <linux/bcd.h>
#include <linux/export.h>
unsigned _bcd2bin(unsigned char val)
{
return (val & 0x0f) + (val >> 4) * 10;
}
EXPORT_SYMBOL(_bcd2bin);
unsigned char _bin2bcd(unsigned val)
{
return ((val / 10) << 4) + val % 10;
}
EXPORT_SYMBOL(_bin2bcd);

1368
lib/bch.c Normal file

File diff suppressed because it is too large Load diff

1191
lib/bitmap.c Normal file

File diff suppressed because it is too large Load diff

59
lib/bitrev.c Normal file
View file

@ -0,0 +1,59 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/bitrev.h>
MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
MODULE_DESCRIPTION("Bit ordering reversal functions");
MODULE_LICENSE("GPL");
const u8 byte_rev_table[256] = {
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
};
EXPORT_SYMBOL_GPL(byte_rev_table);
u16 bitrev16(u16 x)
{
return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
}
EXPORT_SYMBOL(bitrev16);
/**
* bitrev32 - reverse the order of bits in a u32 value
* @x: value to be bit-reversed
*/
u32 bitrev32(u32 x)
{
return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16);
}
EXPORT_SYMBOL(bitrev32);

53
lib/bsearch.c Normal file
View file

@ -0,0 +1,53 @@
/*
* A generic implementation of binary search for the Linux kernel
*
* Copyright (C) 2008-2009 Ksplice, Inc.
* Author: Tim Abbott <tabbott@ksplice.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2.
*/
#include <linux/export.h>
#include <linux/bsearch.h>
/*
* bsearch - binary search an array of elements
* @key: pointer to item being searched for
* @base: pointer to first element to search
* @num: number of elements
* @size: size of each element
* @cmp: pointer to comparison function
*
* This function does a binary search on the given array. The
* contents of the array should already be in ascending sorted order
* under the provided comparison function.
*
* Note that the key need not have the same type as the elements in
* the array, e.g. key could be a string and the comparison function
* could compare the string with the struct's name field. However, if
* the key and elements in the array are of the same type, you can use
* the same comparison function for both sort() and bsearch().
*/
void *bsearch(const void *key, const void *base, size_t num, size_t size,
int (*cmp)(const void *key, const void *elt))
{
size_t start = 0, end = num;
int result;
while (start < end) {
size_t mid = start + (end - start) / 2;
result = cmp(key, base + mid * size);
if (result < 0)
end = mid;
else if (result > 0)
start = mid + 1;
else
return (void *)base + mid * size;
}
return NULL;
}
EXPORT_SYMBOL(bsearch);

801
lib/btree.c Normal file
View file

@ -0,0 +1,801 @@
/*
* lib/btree.c - Simple In-memory B+Tree
*
* As should be obvious for Linux kernel code, license is GPLv2
*
* Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
* Bits and pieces stolen from Peter Zijlstra's code, which is
* Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
* GPLv2
*
* see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
*
* A relatively simple B+Tree implementation. I have written it as a learning
* exercise to understand how B+Trees work. Turned out to be useful as well.
*
* B+Trees can be used similar to Linux radix trees (which don't have anything
* in common with textbook radix trees, beware). Prerequisite for them working
* well is that access to a random tree node is much faster than a large number
* of operations within each node.
*
* Disks have fulfilled the prerequisite for a long time. More recently DRAM
* has gained similar properties, as memory access times, when measured in cpu
* cycles, have increased. Cacheline sizes have increased as well, which also
* helps B+Trees.
*
* Compared to radix trees, B+Trees are more efficient when dealing with a
* sparsely populated address space. Between 25% and 50% of the memory is
* occupied with valid pointers. When densely populated, radix trees contain
* ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
* pointers.
*
* This particular implementation stores pointers identified by a long value.
* Storing NULL pointers is illegal, lookup will return NULL when no entry
* was found.
*
* A tricks was used that is not commonly found in textbooks. The lowest
* values are to the right, not to the left. All used slots within a node
* are on the left, all unused slots contain NUL values. Most operations
* simply loop once over all slots and terminate on the first NUL.
*/
#include <linux/btree.h>
#include <linux/cache.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define NODESIZE MAX(L1_CACHE_BYTES, 128)
struct btree_geo {
int keylen;
int no_pairs;
int no_longs;
};
struct btree_geo btree_geo32 = {
.keylen = 1,
.no_pairs = NODESIZE / sizeof(long) / 2,
.no_longs = NODESIZE / sizeof(long) / 2,
};
EXPORT_SYMBOL_GPL(btree_geo32);
#define LONG_PER_U64 (64 / BITS_PER_LONG)
struct btree_geo btree_geo64 = {
.keylen = LONG_PER_U64,
.no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
.no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
};
EXPORT_SYMBOL_GPL(btree_geo64);
struct btree_geo btree_geo128 = {
.keylen = 2 * LONG_PER_U64,
.no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
.no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
};
EXPORT_SYMBOL_GPL(btree_geo128);
static struct kmem_cache *btree_cachep;
void *btree_alloc(gfp_t gfp_mask, void *pool_data)
{
return kmem_cache_alloc(btree_cachep, gfp_mask);
}
EXPORT_SYMBOL_GPL(btree_alloc);
void btree_free(void *element, void *pool_data)
{
kmem_cache_free(btree_cachep, element);
}
EXPORT_SYMBOL_GPL(btree_free);
static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
{
unsigned long *node;
node = mempool_alloc(head->mempool, gfp);
if (likely(node))
memset(node, 0, NODESIZE);
return node;
}
static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
{
size_t i;
for (i = 0; i < n; i++) {
if (l1[i] < l2[i])
return -1;
if (l1[i] > l2[i])
return 1;
}
return 0;
}
static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
size_t n)
{
size_t i;
for (i = 0; i < n; i++)
dest[i] = src[i];
return dest;
}
static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
{
size_t i;
for (i = 0; i < n; i++)
s[i] = c;
return s;
}
static void dec_key(struct btree_geo *geo, unsigned long *key)
{
unsigned long val;
int i;
for (i = geo->keylen - 1; i >= 0; i--) {
val = key[i];
key[i] = val - 1;
if (val)
break;
}
}
static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
{
return &node[n * geo->keylen];
}
static void *bval(struct btree_geo *geo, unsigned long *node, int n)
{
return (void *)node[geo->no_longs + n];
}
static void setkey(struct btree_geo *geo, unsigned long *node, int n,
unsigned long *key)
{
longcpy(bkey(geo, node, n), key, geo->keylen);
}
static void setval(struct btree_geo *geo, unsigned long *node, int n,
void *val)
{
node[geo->no_longs + n] = (unsigned long) val;
}
static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
{
longset(bkey(geo, node, n), 0, geo->keylen);
node[geo->no_longs + n] = 0;
}
static inline void __btree_init(struct btree_head *head)
{
head->node = NULL;
head->height = 0;
}
void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
{
__btree_init(head);
head->mempool = mempool;
}
EXPORT_SYMBOL_GPL(btree_init_mempool);
int btree_init(struct btree_head *head)
{
__btree_init(head);
head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
if (!head->mempool)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(btree_init);
void btree_destroy(struct btree_head *head)
{
mempool_free(head->node, head->mempool);
mempool_destroy(head->mempool);
head->mempool = NULL;
}
EXPORT_SYMBOL_GPL(btree_destroy);
void *btree_last(struct btree_head *head, struct btree_geo *geo,
unsigned long *key)
{
int height = head->height;
unsigned long *node = head->node;
if (height == 0)
return NULL;
for ( ; height > 1; height--)
node = bval(geo, node, 0);
longcpy(key, bkey(geo, node, 0), geo->keylen);
return bval(geo, node, 0);
}
EXPORT_SYMBOL_GPL(btree_last);
static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
unsigned long *key)
{
return longcmp(bkey(geo, node, pos), key, geo->keylen);
}
static int keyzero(struct btree_geo *geo, unsigned long *key)
{
int i;
for (i = 0; i < geo->keylen; i++)
if (key[i])
return 0;
return 1;
}
void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
unsigned long *key)
{
int i, height = head->height;
unsigned long *node = head->node;
if (height == 0)
return NULL;
for ( ; height > 1; height--) {
for (i = 0; i < geo->no_pairs; i++)
if (keycmp(geo, node, i, key) <= 0)
break;
if (i == geo->no_pairs)
return NULL;
node = bval(geo, node, i);
if (!node)
return NULL;
}
if (!node)
return NULL;
for (i = 0; i < geo->no_pairs; i++)
if (keycmp(geo, node, i, key) == 0)
return bval(geo, node, i);
return NULL;
}
EXPORT_SYMBOL_GPL(btree_lookup);
int btree_update(struct btree_head *head, struct btree_geo *geo,
unsigned long *key, void *val)
{
int i, height = head->height;
unsigned long *node = head->node;
if (height == 0)
return -ENOENT;
for ( ; height > 1; height--) {
for (i = 0; i < geo->no_pairs; i++)
if (keycmp(geo, node, i, key) <= 0)
break;
if (i == geo->no_pairs)
return -ENOENT;
node = bval(geo, node, i);
if (!node)
return -ENOENT;
}
if (!node)
return -ENOENT;
for (i = 0; i < geo->no_pairs; i++)
if (keycmp(geo, node, i, key) == 0) {
setval(geo, node, i, val);
return 0;
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(btree_update);
/*
* Usually this function is quite similar to normal lookup. But the key of
* a parent node may be smaller than the smallest key of all its siblings.
* In such a case we cannot just return NULL, as we have only proven that no
* key smaller than __key, but larger than this parent key exists.
* So we set __key to the parent key and retry. We have to use the smallest
* such parent key, which is the last parent key we encountered.
*/
void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
unsigned long *__key)
{
int i, height;
unsigned long *node, *oldnode;
unsigned long *retry_key = NULL, key[geo->keylen];
if (keyzero(geo, __key))
return NULL;
if (head->height == 0)
return NULL;
longcpy(key, __key, geo->keylen);
retry:
dec_key(geo, key);
node = head->node;
for (height = head->height ; height > 1; height--) {
for (i = 0; i < geo->no_pairs; i++)
if (keycmp(geo, node, i, key) <= 0)
break;
if (i == geo->no_pairs)
goto miss;
oldnode = node;
node = bval(geo, node, i);
if (!node)
goto miss;
retry_key = bkey(geo, oldnode, i);
}
if (!node)
goto miss;
for (i = 0; i < geo->no_pairs; i++) {
if (keycmp(geo, node, i, key) <= 0) {
if (bval(geo, node, i)) {
longcpy(__key, bkey(geo, node, i), geo->keylen);
return bval(geo, node, i);
} else
goto miss;
}
}
miss:
if (retry_key) {
longcpy(key, retry_key, geo->keylen);
retry_key = NULL;
goto retry;
}
return NULL;
}
EXPORT_SYMBOL_GPL(btree_get_prev);
static int getpos(struct btree_geo *geo, unsigned long *node,
unsigned long *key)
{
int i;
for (i = 0; i < geo->no_pairs; i++) {
if (keycmp(geo, node, i, key) <= 0)
break;
}
return i;
}
static int getfill(struct btree_geo *geo, unsigned long *node, int start)
{
int i;
for (i = start; i < geo->no_pairs; i++)
if (!bval(geo, node, i))
break;
return i;
}
/*
* locate the correct leaf node in the btree
*/
static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
unsigned long *key, int level)
{
unsigned long *node = head->node;
int i, height;
for (height = head->height; height > level; height--) {
for (i = 0; i < geo->no_pairs; i++)
if (keycmp(geo, node, i, key) <= 0)
break;
if ((i == geo->no_pairs) || !bval(geo, node, i)) {
/* right-most key is too large, update it */
/* FIXME: If the right-most key on higher levels is
* always zero, this wouldn't be necessary. */
i--;
setkey(geo, node, i, key);
}
BUG_ON(i < 0);
node = bval(geo, node, i);
}
BUG_ON(!node);
return node;
}
static int btree_grow(struct btree_head *head, struct btree_geo *geo,
gfp_t gfp)
{
unsigned long *node;
int fill;
node = btree_node_alloc(head, gfp);
if (!node)
return -ENOMEM;
if (head->node) {
fill = getfill(geo, head->node, 0);
setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
setval(geo, node, 0, head->node);
}
head->node = node;
head->height++;
return 0;
}
static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
{
unsigned long *node;
int fill;
if (head->height <= 1)
return;
node = head->node;
fill = getfill(geo, node, 0);
BUG_ON(fill > 1);
head->node = bval(geo, node, 0);
head->height--;
mempool_free(node, head->mempool);
}
static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
unsigned long *key, void *val, int level,
gfp_t gfp)
{
unsigned long *node;
int i, pos, fill, err;
BUG_ON(!val);
if (head->height < level) {
err = btree_grow(head, geo, gfp);
if (err)
return err;
}
retry:
node = find_level(head, geo, key, level);
pos = getpos(geo, node, key);
fill = getfill(geo, node, pos);
/* two identical keys are not allowed */
BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
if (fill == geo->no_pairs) {
/* need to split node */
unsigned long *new;
new = btree_node_alloc(head, gfp);
if (!new)
return -ENOMEM;
err = btree_insert_level(head, geo,
bkey(geo, node, fill / 2 - 1),
new, level + 1, gfp);
if (err) {
mempool_free(new, head->mempool);
return err;
}
for (i = 0; i < fill / 2; i++) {
setkey(geo, new, i, bkey(geo, node, i));
setval(geo, new, i, bval(geo, node, i));
setkey(geo, node, i, bkey(geo, node, i + fill / 2));
setval(geo, node, i, bval(geo, node, i + fill / 2));
clearpair(geo, node, i + fill / 2);
}
if (fill & 1) {
setkey(geo, node, i, bkey(geo, node, fill - 1));
setval(geo, node, i, bval(geo, node, fill - 1));
clearpair(geo, node, fill - 1);
}
goto retry;
}
BUG_ON(fill >= geo->no_pairs);
/* shift and insert */
for (i = fill; i > pos; i--) {
setkey(geo, node, i, bkey(geo, node, i - 1));
setval(geo, node, i, bval(geo, node, i - 1));
}
setkey(geo, node, pos, key);
setval(geo, node, pos, val);
return 0;
}
int btree_insert(struct btree_head *head, struct btree_geo *geo,
unsigned long *key, void *val, gfp_t gfp)
{
BUG_ON(!val);
return btree_insert_level(head, geo, key, val, 1, gfp);
}
EXPORT_SYMBOL_GPL(btree_insert);
static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
unsigned long *key, int level);
static void merge(struct btree_head *head, struct btree_geo *geo, int level,
unsigned long *left, int lfill,
unsigned long *right, int rfill,
unsigned long *parent, int lpos)
{
int i;
for (i = 0; i < rfill; i++) {
/* Move all keys to the left */
setkey(geo, left, lfill + i, bkey(geo, right, i));
setval(geo, left, lfill + i, bval(geo, right, i));
}
/* Exchange left and right child in parent */
setval(geo, parent, lpos, right);
setval(geo, parent, lpos + 1, left);
/* Remove left (formerly right) child from parent */
btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
mempool_free(right, head->mempool);
}
static void rebalance(struct btree_head *head, struct btree_geo *geo,
unsigned long *key, int level, unsigned long *child, int fill)
{
unsigned long *parent, *left = NULL, *right = NULL;
int i, no_left, no_right;
if (fill == 0) {
/* Because we don't steal entries from a neighbour, this case
* can happen. Parent node contains a single child, this
* node, so merging with a sibling never happens.
*/
btree_remove_level(head, geo, key, level + 1);
mempool_free(child, head->mempool);
return;
}
parent = find_level(head, geo, key, level + 1);
i = getpos(geo, parent, key);
BUG_ON(bval(geo, parent, i) != child);
if (i > 0) {
left = bval(geo, parent, i - 1);
no_left = getfill(geo, left, 0);
if (fill + no_left <= geo->no_pairs) {
merge(head, geo, level,
left, no_left,
child, fill,
parent, i - 1);
return;
}
}
if (i + 1 < getfill(geo, parent, i)) {
right = bval(geo, parent, i + 1);
no_right = getfill(geo, right, 0);
if (fill + no_right <= geo->no_pairs) {
merge(head, geo, level,
child, fill,
right, no_right,
parent, i);
return;
}
}
/*
* We could also try to steal one entry from the left or right
* neighbor. By not doing so we changed the invariant from
* "all nodes are at least half full" to "no two neighboring
* nodes can be merged". Which means that the average fill of
* all nodes is still half or better.
*/
}
static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
unsigned long *key, int level)
{
unsigned long *node;
int i, pos, fill;
void *ret;
if (level > head->height) {
/* we recursed all the way up */
head->height = 0;
head->node = NULL;
return NULL;
}
node = find_level(head, geo, key, level);
pos = getpos(geo, node, key);
fill = getfill(geo, node, pos);
if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
return NULL;
ret = bval(geo, node, pos);
/* remove and shift */
for (i = pos; i < fill - 1; i++) {
setkey(geo, node, i, bkey(geo, node, i + 1));
setval(geo, node, i, bval(geo, node, i + 1));
}
clearpair(geo, node, fill - 1);
if (fill - 1 < geo->no_pairs / 2) {
if (level < head->height)
rebalance(head, geo, key, level, node, fill - 1);
else if (fill - 1 == 1)
btree_shrink(head, geo);
}
return ret;
}
void *btree_remove(struct btree_head *head, struct btree_geo *geo,
unsigned long *key)
{
if (head->height == 0)
return NULL;
return btree_remove_level(head, geo, key, 1);
}
EXPORT_SYMBOL_GPL(btree_remove);
int btree_merge(struct btree_head *target, struct btree_head *victim,
struct btree_geo *geo, gfp_t gfp)
{
unsigned long key[geo->keylen];
unsigned long dup[geo->keylen];
void *val;
int err;
BUG_ON(target == victim);
if (!(target->node)) {
/* target is empty, just copy fields over */
target->node = victim->node;
target->height = victim->height;
__btree_init(victim);
return 0;
}
/* TODO: This needs some optimizations. Currently we do three tree
* walks to remove a single object from the victim.
*/
for (;;) {
if (!btree_last(victim, geo, key))
break;
val = btree_lookup(victim, geo, key);
err = btree_insert(target, geo, key, val, gfp);
if (err)
return err;
/* We must make a copy of the key, as the original will get
* mangled inside btree_remove. */
longcpy(dup, key, geo->keylen);
btree_remove(victim, geo, dup);
}
return 0;
}
EXPORT_SYMBOL_GPL(btree_merge);
static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
unsigned long *node, unsigned long opaque,
void (*func)(void *elem, unsigned long opaque,
unsigned long *key, size_t index,
void *func2),
void *func2, int reap, int height, size_t count)
{
int i;
unsigned long *child;
for (i = 0; i < geo->no_pairs; i++) {
child = bval(geo, node, i);
if (!child)
break;
if (height > 1)
count = __btree_for_each(head, geo, child, opaque,
func, func2, reap, height - 1, count);
else
func(child, opaque, bkey(geo, node, i), count++,
func2);
}
if (reap)
mempool_free(node, head->mempool);
return count;
}
static void empty(void *elem, unsigned long opaque, unsigned long *key,
size_t index, void *func2)
{
}
void visitorl(void *elem, unsigned long opaque, unsigned long *key,
size_t index, void *__func)
{
visitorl_t func = __func;
func(elem, opaque, *key, index);
}
EXPORT_SYMBOL_GPL(visitorl);
void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
size_t index, void *__func)
{
visitor32_t func = __func;
u32 *key = (void *)__key;
func(elem, opaque, *key, index);
}
EXPORT_SYMBOL_GPL(visitor32);
void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
size_t index, void *__func)
{
visitor64_t func = __func;
u64 *key = (void *)__key;
func(elem, opaque, *key, index);
}
EXPORT_SYMBOL_GPL(visitor64);
void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
size_t index, void *__func)
{
visitor128_t func = __func;
u64 *key = (void *)__key;
func(elem, opaque, key[0], key[1], index);
}
EXPORT_SYMBOL_GPL(visitor128);
size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
unsigned long opaque,
void (*func)(void *elem, unsigned long opaque,
unsigned long *key,
size_t index, void *func2),
void *func2)
{
size_t count = 0;
if (!func2)
func = empty;
if (head->node)
count = __btree_for_each(head, geo, head->node, opaque, func,
func2, 0, head->height, 0);
return count;
}
EXPORT_SYMBOL_GPL(btree_visitor);
size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
unsigned long opaque,
void (*func)(void *elem, unsigned long opaque,
unsigned long *key,
size_t index, void *func2),
void *func2)
{
size_t count = 0;
if (!func2)
func = empty;
if (head->node)
count = __btree_for_each(head, geo, head->node, opaque, func,
func2, 1, head->height, 0);
__btree_init(head);
return count;
}
EXPORT_SYMBOL_GPL(btree_grim_visitor);
static int __init btree_module_init(void)
{
btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
SLAB_HWCACHE_ALIGN, NULL);
return 0;
}
static void __exit btree_module_exit(void)
{
kmem_cache_destroy(btree_cachep);
}
/* If core code starts using btree, initialization should happen even earlier */
module_init(btree_module_init);
module_exit(btree_module_exit);
MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_LICENSE("GPL");

184
lib/bug.c Normal file
View file

@ -0,0 +1,184 @@
/*
Generic support for BUG()
This respects the following config options:
CONFIG_BUG - emit BUG traps. Nothing happens without this.
CONFIG_GENERIC_BUG - enable this code.
CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
the containing struct bug_entry for bug_addr and file.
CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
(though they're generally always on).
CONFIG_GENERIC_BUG is set by each architecture using this code.
To use this, your architecture must:
1. Set up the config options:
- Enable CONFIG_GENERIC_BUG if CONFIG_BUG
2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON)
- Define HAVE_ARCH_BUG
- Implement BUG() to generate a faulting instruction
- NOTE: struct bug_entry does not have "file" or "line" entries
when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate
the values accordingly.
3. Implement the trap
- In the illegal instruction trap handler (typically), verify
that the fault was in kernel mode, and call report_bug()
- report_bug() will return whether it was a false alarm, a warning,
or an actual bug.
- You must implement the is_valid_bugaddr(bugaddr) callback which
returns true if the eip is a real kernel address, and it points
to the expected BUG trap instruction.
Jeremy Fitzhardinge <jeremy@goop.org> 2006
*/
#define pr_fmt(fmt) fmt
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/sched.h>
extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
static inline unsigned long bug_addr(const struct bug_entry *bug)
{
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
return bug->bug_addr;
#else
return (unsigned long)bug + bug->bug_addr_disp;
#endif
}
#ifdef CONFIG_MODULES
/* Updates are protected by module mutex */
static LIST_HEAD(module_bug_list);
static const struct bug_entry *module_find_bug(unsigned long bugaddr)
{
struct module *mod;
list_for_each_entry(mod, &module_bug_list, bug_list) {
const struct bug_entry *bug = mod->bug_table;
unsigned i;
for (i = 0; i < mod->num_bugs; ++i, ++bug)
if (bugaddr == bug_addr(bug))
return bug;
}
return NULL;
}
void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
char *secstrings;
unsigned int i;
mod->bug_table = NULL;
mod->num_bugs = 0;
/* Find the __bug_table section, if present */
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
continue;
mod->bug_table = (void *) sechdrs[i].sh_addr;
mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
break;
}
/*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&mod->bug_list, &module_bug_list);
}
void module_bug_cleanup(struct module *mod)
{
list_del(&mod->bug_list);
}
#else
static inline const struct bug_entry *module_find_bug(unsigned long bugaddr)
{
return NULL;
}
#endif
const struct bug_entry *find_bug(unsigned long bugaddr)
{
const struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
if (bugaddr == bug_addr(bug))
return bug;
return module_find_bug(bugaddr);
}
enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
const struct bug_entry *bug;
const char *file;
unsigned line, warning;
if (!is_valid_bugaddr(bugaddr))
return BUG_TRAP_TYPE_NONE;
bug = find_bug(bugaddr);
file = NULL;
line = 0;
warning = 0;
if (bug) {
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
file = bug->file;
#else
file = (const char *)bug + bug->file_disp;
#endif
line = bug->line;
#endif
warning = (bug->flags & BUGFLAG_WARNING) != 0;
}
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
pr_warn("------------[ cut here ]------------\n");
if (file)
pr_warn("WARNING: at %s:%u\n", file, line);
else
pr_warn("WARNING: at %p [verbose debug info unavailable]\n",
(void *)bugaddr);
print_modules();
show_regs(regs);
print_oops_end_marker();
/* Just a warning, don't kill lockdep. */
add_taint(BUG_GET_TAINT(bug), LOCKDEP_STILL_OK);
return BUG_TRAP_TYPE_WARN;
}
printk(KERN_DEFAULT "------------[ cut here ]------------\n");
if (file)
pr_crit("kernel BUG at %s:%u!\n", file, line);
else
pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
(void *)bugaddr);
return BUG_TRAP_TYPE_BUG;
}

207
lib/build_OID_registry Executable file
View file

@ -0,0 +1,207 @@
#!/usr/bin/perl -w
#
# Build a static ASN.1 Object Identified (OID) registry
#
# Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
# Written by David Howells (dhowells@redhat.com)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public Licence
# as published by the Free Software Foundation; either version
# 2 of the Licence, or (at your option) any later version.
#
use strict;
my @names = ();
my @oids = ();
if ($#ARGV != 1) {
print STDERR "Format: ", $0, " <in-h-file> <out-c-file>\n";
exit(2);
}
#
# Open the file to read from
#
open IN_FILE, "<$ARGV[0]" || die;
while (<IN_FILE>) {
chomp;
if (m!\s+OID_([a-zA-z][a-zA-Z0-9_]+),\s+/[*]\s+([012][.0-9]*)\s+[*]/!) {
push @names, $1;
push @oids, $2;
}
}
close IN_FILE || die;
#
# Open the files to write into
#
open C_FILE, ">$ARGV[1]" or die;
print C_FILE "/*\n";
print C_FILE " * Automatically generated by ", $0, ". Do not edit\n";
print C_FILE " */\n";
#
# Split the data up into separate lists and also determine the lengths of the
# encoded data arrays.
#
my @indices = ();
my @lengths = ();
my $total_length = 0;
for (my $i = 0; $i <= $#names; $i++) {
my $name = $names[$i];
my $oid = $oids[$i];
my @components = split(/[.]/, $oid);
# Determine the encoded length of this OID
my $size = $#components;
for (my $loop = 2; $loop <= $#components; $loop++) {
my $c = $components[$loop];
# We will base128 encode the number
my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
$tmp = int($tmp / 7);
$size += $tmp;
}
push @lengths, $size;
push @indices, $total_length;
$total_length += $size;
}
#
# Emit the look-up-by-OID index table
#
print C_FILE "\n";
if ($total_length <= 255) {
print C_FILE "static const unsigned char oid_index[OID__NR + 1] = {\n";
} else {
print C_FILE "static const unsigned short oid_index[OID__NR + 1] = {\n";
}
for (my $i = 0; $i <= $#names; $i++) {
print C_FILE "\t[OID_", $names[$i], "] = ", $indices[$i], ",\n"
}
print C_FILE "\t[OID__NR] = ", $total_length, "\n";
print C_FILE "};\n";
#
# Encode the OIDs
#
my @encoded_oids = ();
for (my $i = 0; $i <= $#names; $i++) {
my @octets = ();
my @components = split(/[.]/, $oids[$i]);
push @octets, $components[0] * 40 + $components[1];
for (my $loop = 2; $loop <= $#components; $loop++) {
my $c = $components[$loop];
# Base128 encode the number
my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
$tmp = int($tmp / 7);
for (; $tmp > 0; $tmp--) {
push @octets, (($c >> $tmp * 7) & 0x7f) | 0x80;
}
push @octets, $c & 0x7f;
}
push @encoded_oids, \@octets;
}
#
# Create a hash value for each OID
#
my @hash_values = ();
for (my $i = 0; $i <= $#names; $i++) {
my @octets = @{$encoded_oids[$i]};
my $hash = $#octets;
foreach (@octets) {
$hash += $_ * 33;
}
$hash = ($hash >> 24) ^ ($hash >> 16) ^ ($hash >> 8) ^ ($hash);
push @hash_values, $hash & 0xff;
}
#
# Emit the OID data
#
print C_FILE "\n";
print C_FILE "static const unsigned char oid_data[", $total_length, "] = {\n";
for (my $i = 0; $i <= $#names; $i++) {
my @octets = @{$encoded_oids[$i]};
print C_FILE "\t";
print C_FILE $_, ", " foreach (@octets);
print C_FILE "\t// ", $names[$i];
print C_FILE "\n";
}
print C_FILE "};\n";
#
# Build the search index table (ordered by length then hash then content)
#
my @index_table = ( 0 .. $#names );
@index_table = sort {
my @octets_a = @{$encoded_oids[$a]};
my @octets_b = @{$encoded_oids[$b]};
return $hash_values[$a] <=> $hash_values[$b]
if ($hash_values[$a] != $hash_values[$b]);
return $#octets_a <=> $#octets_b
if ($#octets_a != $#octets_b);
for (my $i = $#octets_a; $i >= 0; $i--) {
return $octets_a[$i] <=> $octets_b[$i]
if ($octets_a[$i] != $octets_b[$i]);
}
return 0;
} @index_table;
#
# Emit the search index and hash value table
#
print C_FILE "\n";
print C_FILE "static const struct {\n";
print C_FILE "\tunsigned char hash;\n";
if ($#names <= 255) {
print C_FILE "\tenum OID oid : 8;\n";
} else {
print C_FILE "\tenum OID oid : 16;\n";
}
print C_FILE "} oid_search_table[OID__NR] = {\n";
for (my $i = 0; $i <= $#names; $i++) {
my @octets = @{$encoded_oids[$index_table[$i]]};
printf(C_FILE "\t[%3u] = { %3u, OID_%-35s }, // ",
$i,
$hash_values[$index_table[$i]],
$names[$index_table[$i]]);
printf C_FILE "%02x", $_ foreach (@octets);
print C_FILE "\n";
}
print C_FILE "};\n";
#
# Emit the OID debugging name table
#
#print C_FILE "\n";
#print C_FILE "const char *const oid_name_table[OID__NR + 1] = {\n";
#
#for (my $i = 0; $i <= $#names; $i++) {
# print C_FILE "\t\"", $names[$i], "\",\n"
#}
#print C_FILE "\t\"Unknown-OID\"\n";
#print C_FILE "};\n";
#
# Polish off
#
close C_FILE or die;

31
lib/bust_spinlocks.c Normal file
View file

@ -0,0 +1,31 @@
/*
* lib/bust_spinlocks.c
*
* Provides a minimal bust_spinlocks for architectures which don't have one of their own.
*
* bust_spinlocks() clears any spinlocks which would prevent oops, die(), BUG()
* and panic() information from reaching the user.
*/
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/tty.h>
#include <linux/wait.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
void __attribute__((weak)) bust_spinlocks(int yes)
{
if (yes) {
++oops_in_progress;
} else {
#ifdef CONFIG_VT
unblank_screen();
#endif
console_unblank();
if (--oops_in_progress == 0)
wake_up_klogd();
}
}

26
lib/check_signature.c Normal file
View file

@ -0,0 +1,26 @@
#include <linux/io.h>
#include <linux/export.h>
/**
* check_signature - find BIOS signatures
* @io_addr: mmio address to check
* @signature: signature block
* @length: length of signature
*
* Perform a signature comparison with the mmio address io_addr. This
* address should have been obtained by ioremap.
* Returns 1 on a match.
*/
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length)
{
while (length--) {
if (readb(io_addr) != *signature)
return 0;
io_addr++;
signature++;
}
return 1;
}
EXPORT_SYMBOL(check_signature);

210
lib/checksum.c Normal file
View file

@ -0,0 +1,210 @@
/*
*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Tom May, <ftom@netcom.com>
* Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
* Fixed some nasty bugs, causing some horrible crashes.
* A: At some points, the sum (%0) was used as
* length-counter instead of the length counter
* (%1). Thanks to Roman Hodek for pointing this out.
* B: GCC seems to mess up if one uses too many
* data-registers to hold input values and one tries to
* specify d0 and d1 as scratch registers. Letting gcc
* choose these registers itself solves the problem.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
kills, so most of the assembly has to go. */
#include <linux/export.h>
#include <net/checksum.h>
#include <asm/byteorder.h>
#ifndef do_csum
static inline unsigned short from32to16(unsigned int x)
{
/* add up 16-bit and 16-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
/* add up carry.. */
x = (x & 0xffff) + (x >> 16);
return x;
}
static unsigned int do_csum(const unsigned char *buff, int len)
{
int odd;
unsigned int result = 0;
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
#ifdef __LITTLE_ENDIAN
result += (*buff << 8);
#else
result = *buff;
#endif
len--;
buff++;
}
if (len >= 2) {
if (2 & (unsigned long) buff) {
result += *(unsigned short *) buff;
len -= 2;
buff += 2;
}
if (len >= 4) {
const unsigned char *end = buff + ((unsigned)len & ~3);
unsigned int carry = 0;
do {
unsigned int w = *(unsigned int *) buff;
buff += 4;
result += carry;
result += w;
carry = (w > result);
} while (buff < end);
result += carry;
result = (result & 0xffff) + (result >> 16);
}
if (len & 2) {
result += *(unsigned short *) buff;
buff += 2;
}
}
if (len & 1)
#ifdef __LITTLE_ENDIAN
result += *buff;
#else
result += (*buff << 8);
#endif
result = from32to16(result);
if (odd)
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
out:
return result;
}
#endif
#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
return (__force __sum16)~do_csum(iph, ihl*4);
}
EXPORT_SYMBOL(ip_fast_csum);
#endif
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum wsum)
{
unsigned int sum = (__force unsigned int)wsum;
unsigned int result = do_csum(buff, len);
/* add in old sum, and carry.. */
result += sum;
if (sum > result)
result += 1;
return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
__sum16 ip_compute_csum(const void *buff, int len)
{
return (__force __sum16)~do_csum(buff, len);
}
EXPORT_SYMBOL(ip_compute_csum);
/*
* copy from fs while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *csum_err)
{
int missing;
missing = __copy_from_user(dst, src, len);
if (missing) {
memset(dst + len - missing, 0, missing);
*csum_err = -EFAULT;
} else
*csum_err = 0;
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
/*
* copy from ds while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
EXPORT_SYMBOL(csum_partial_copy);
#ifndef csum_tcpudp_nofold
static inline u32 from64to32(u64 x)
{
/* add up 32-bit and 32-bit for 32+c bit */
x = (x & 0xffffffff) + (x >> 32);
/* add up carry.. */
x = (x & 0xffffffff) + (x >> 32);
return (u32)x;
}
__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
unsigned long long s = (__force u32)sum;
s += (__force u32)saddr;
s += (__force u32)daddr;
#ifdef __BIG_ENDIAN
s += proto + len;
#else
s += (proto + len) << 8;
#endif
return (__force __wsum)from64to32(s);
}
EXPORT_SYMBOL(csum_tcpudp_nofold);
#endif

65
lib/clz_ctz.c Normal file
View file

@ -0,0 +1,65 @@
/*
* lib/clz_ctz.c
*
* Copyright (C) 2013 Chanho Min <chanho.min@lge.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
* The functions in this file aren't called directly, but are required by
* GCC builtins such as __builtin_ctz, and therefore they can't be removed
* despite appearing unreferenced in kernel source.
*
* __c[lt]z[sd]i2 can be overridden by linking arch-specific versions.
*/
#include <linux/export.h>
#include <linux/kernel.h>
int __weak __ctzsi2(int val);
int __weak __ctzsi2(int val)
{
return __ffs(val);
}
EXPORT_SYMBOL(__ctzsi2);
int __weak __clzsi2(int val);
int __weak __clzsi2(int val)
{
return 32 - fls(val);
}
EXPORT_SYMBOL(__clzsi2);
int __weak __clzdi2(long val);
int __weak __ctzdi2(long val);
#if BITS_PER_LONG == 32
int __weak __clzdi2(long val)
{
return 32 - fls((int)val);
}
EXPORT_SYMBOL(__clzdi2);
int __weak __ctzdi2(long val)
{
return __ffs((u32)val);
}
EXPORT_SYMBOL(__ctzdi2);
#elif BITS_PER_LONG == 64
int __weak __clzdi2(long val)
{
return 64 - fls64((u64)val);
}
EXPORT_SYMBOL(__clzdi2);
int __weak __ctzdi2(long val)
{
return __ffs64((u64)val);
}
EXPORT_SYMBOL(__ctzdi2);
#else
#error BITS_PER_LONG not 32 or 64
#endif

18
lib/clz_tab.c Normal file
View file

@ -0,0 +1,18 @@
const unsigned char __clz_tab[] = {
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
};

191
lib/cmdline.c Normal file
View file

@ -0,0 +1,191 @@
/*
* linux/lib/cmdline.c
* Helper functions generally used for parsing kernel command line
* and module options.
*
* Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*
* GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
*
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/string.h>
/*
* If a hyphen was found in get_option, this will handle the
* range of numbers, M-N. This will expand the range and insert
* the values[M, M+1, ..., N] into the ints array in get_options.
*/
static int get_range(char **str, int *pint)
{
int x, inc_counter, upper_range;
(*str)++;
upper_range = simple_strtol((*str), NULL, 0);
inc_counter = upper_range - *pint;
for (x = *pint; x < upper_range; x++)
*pint++ = x;
return inc_counter;
}
/**
* get_option - Parse integer from an option string
* @str: option string
* @pint: (output) integer value parsed from @str
*
* Read an int from an option string; if available accept a subsequent
* comma as well.
*
* Return values:
* 0 - no int in string
* 1 - int found, no subsequent comma
* 2 - int found including a subsequent comma
* 3 - hyphen found to denote a range
*/
int get_option(char **str, int *pint)
{
char *cur = *str;
if (!cur || !(*cur))
return 0;
*pint = simple_strtol(cur, str, 0);
if (cur == *str)
return 0;
if (**str == ',') {
(*str)++;
return 2;
}
if (**str == '-')
return 3;
return 1;
}
EXPORT_SYMBOL(get_option);
/**
* get_options - Parse a string into a list of integers
* @str: String to be parsed
* @nints: size of integer array
* @ints: integer array
*
* This function parses a string containing a comma-separated
* list of integers, a hyphen-separated range of _positive_ integers,
* or a combination of both. The parse halts when the array is
* full, or when no more numbers can be retrieved from the
* string.
*
* Return value is the character in the string which caused
* the parse to end (typically a null terminator, if @str is
* completely parseable).
*/
char *get_options(const char *str, int nints, int *ints)
{
int res, i = 1;
while (i < nints) {
res = get_option((char **)&str, ints + i);
if (res == 0)
break;
if (res == 3) {
int range_nums;
range_nums = get_range((char **)&str, ints + i);
if (range_nums < 0)
break;
/*
* Decrement the result by one to leave out the
* last number in the range. The next iteration
* will handle the upper number in the range
*/
i += (range_nums - 1);
}
i++;
if (res == 1)
break;
}
ints[0] = i - 1;
return (char *)str;
}
EXPORT_SYMBOL(get_options);
/**
* memparse - parse a string with mem suffixes into a number
* @ptr: Where parse begins
* @retptr: (output) Optional pointer to next char after parse completes
*
* Parses a string into a number. The number stored at @ptr is
* potentially suffixed with K, M, G, T, P, E.
*/
unsigned long long memparse(const char *ptr, char **retptr)
{
char *endptr; /* local pointer to end of parsed string */
unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
switch (*endptr) {
case 'E':
case 'e':
ret <<= 10;
case 'P':
case 'p':
ret <<= 10;
case 'T':
case 't':
ret <<= 10;
case 'G':
case 'g':
ret <<= 10;
case 'M':
case 'm':
ret <<= 10;
case 'K':
case 'k':
ret <<= 10;
endptr++;
default:
break;
}
if (retptr)
*retptr = endptr;
return ret;
}
EXPORT_SYMBOL(memparse);
/**
* parse_option_str - Parse a string and check an option is set or not
* @str: String to be parsed
* @option: option name
*
* This function parses a string containing a comma-separated list of
* strings like a=b,c.
*
* Return true if there's such option in the string, or return false.
*/
bool parse_option_str(const char *str, const char *option)
{
while (*str) {
if (!strncmp(str, option, strlen(option))) {
str += strlen(option);
if (!*str || *str == ',')
return true;
}
while (*str && *str != ',')
str++;
if (*str == ',')
str++;
}
return false;
}

50
lib/compat_audit.c Normal file
View file

@ -0,0 +1,50 @@
#include <linux/init.h>
#include <linux/types.h>
#include <asm/unistd32.h>
unsigned compat_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned compat_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned compat_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned compat_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned compat_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_compat_syscall(int abi, unsigned syscall)
{
switch (syscall) {
#ifdef __NR_open
case __NR_open:
return 2;
#endif
#ifdef __NR_openat
case __NR_openat:
return 3;
#endif
#ifdef __NR_socketcall
case __NR_socketcall:
return 4;
#endif
case __NR_execve:
return 5;
default:
return 1;
}
}

101
lib/cordic.c Normal file
View file

@ -0,0 +1,101 @@
/*
* Copyright (c) 2011 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/cordic.h>
#define CORDIC_ANGLE_GEN 39797
#define CORDIC_PRECISION_SHIFT 16
#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
#define FLOAT(X) (((X) >= 0) \
? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
: -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
static const s32 arctan_table[] = {
2949120,
1740967,
919879,
466945,
234379,
117304,
58666,
29335,
14668,
7334,
3667,
1833,
917,
458,
229,
115,
57,
29
};
/*
* cordic_calc_iq() - calculates the i/q coordinate for given angle
*
* theta: angle in degrees for which i/q coordinate is to be calculated
* coord: function output parameter holding the i/q coordinate
*/
struct cordic_iq cordic_calc_iq(s32 theta)
{
struct cordic_iq coord;
s32 angle, valtmp;
unsigned iter;
int signx = 1;
int signtheta;
coord.i = CORDIC_ANGLE_GEN;
coord.q = 0;
angle = 0;
theta = FIXED(theta);
signtheta = (theta < 0) ? -1 : 1;
theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) -
FIXED(180) * signtheta;
if (FLOAT(theta) > 90) {
theta -= FIXED(180);
signx = -1;
} else if (FLOAT(theta) < -90) {
theta += FIXED(180);
signx = -1;
}
for (iter = 0; iter < CORDIC_NUM_ITER; iter++) {
if (theta > angle) {
valtmp = coord.i - (coord.q >> iter);
coord.q += (coord.i >> iter);
angle += arctan_table[iter];
} else {
valtmp = coord.i + (coord.q >> iter);
coord.q -= (coord.i >> iter);
angle -= arctan_table[iter];
}
coord.i = valtmp;
}
coord.i *= signx;
coord.q *= signx;
return coord;
}
EXPORT_SYMBOL(cordic_calc_iq);
MODULE_DESCRIPTION("CORDIC algorithm");
MODULE_AUTHOR("Broadcom Corporation");
MODULE_LICENSE("Dual BSD/GPL");

View file

@ -0,0 +1,50 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include "notifier-error-inject.h"
static int priority;
module_param(priority, int, 0);
MODULE_PARM_DESC(priority, "specify cpu notifier priority");
static struct notifier_err_inject cpu_notifier_err_inject = {
.actions = {
{ NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE) },
{ NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE_FROZEN) },
{ NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE) },
{ NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE_FROZEN) },
{}
}
};
static struct dentry *dir;
static int err_inject_init(void)
{
int err;
dir = notifier_err_inject_init("cpu", notifier_err_inject_dir,
&cpu_notifier_err_inject, priority);
if (IS_ERR(dir))
return PTR_ERR(dir);
err = register_hotcpu_notifier(&cpu_notifier_err_inject.nb);
if (err)
debugfs_remove_recursive(dir);
return err;
}
static void err_inject_exit(void)
{
unregister_hotcpu_notifier(&cpu_notifier_err_inject.nb);
debugfs_remove_recursive(dir);
}
module_init(err_inject_init);
module_exit(err_inject_exit);
MODULE_DESCRIPTION("CPU notifier error injection module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");

307
lib/cpu_rmap.c Normal file
View file

@ -0,0 +1,307 @@
/*
* cpu_rmap.c: CPU affinity reverse-map support
* Copyright 2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/cpu_rmap.h>
#include <linux/interrupt.h>
#include <linux/export.h>
/*
* These functions maintain a mapping from CPUs to some ordered set of
* objects with CPU affinities. This can be seen as a reverse-map of
* CPU affinity. However, we do not assume that the object affinities
* cover all CPUs in the system. For those CPUs not directly covered
* by object affinities, we attempt to find a nearest object based on
* CPU topology.
*/
/**
* alloc_cpu_rmap - allocate CPU affinity reverse-map
* @size: Number of objects to be mapped
* @flags: Allocation flags e.g. %GFP_KERNEL
*/
struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
{
struct cpu_rmap *rmap;
unsigned int cpu;
size_t obj_offset;
/* This is a silly number of objects, and we use u16 indices. */
if (size > 0xffff)
return NULL;
/* Offset of object pointer array from base structure */
obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
sizeof(void *));
rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
if (!rmap)
return NULL;
kref_init(&rmap->refcount);
rmap->obj = (void **)((char *)rmap + obj_offset);
/* Initially assign CPUs to objects on a rota, since we have
* no idea where the objects are. Use infinite distance, so
* any object with known distance is preferable. Include the
* CPUs that are not present/online, since we definitely want
* any newly-hotplugged CPUs to have some object assigned.
*/
for_each_possible_cpu(cpu) {
rmap->near[cpu].index = cpu % size;
rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
}
rmap->size = size;
return rmap;
}
EXPORT_SYMBOL(alloc_cpu_rmap);
/**
* cpu_rmap_release - internal reclaiming helper called from kref_put
* @ref: kref to struct cpu_rmap
*/
static void cpu_rmap_release(struct kref *ref)
{
struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
kfree(rmap);
}
/**
* cpu_rmap_get - internal helper to get new ref on a cpu_rmap
* @rmap: reverse-map allocated with alloc_cpu_rmap()
*/
static inline void cpu_rmap_get(struct cpu_rmap *rmap)
{
kref_get(&rmap->refcount);
}
/**
* cpu_rmap_put - release ref on a cpu_rmap
* @rmap: reverse-map allocated with alloc_cpu_rmap()
*/
int cpu_rmap_put(struct cpu_rmap *rmap)
{
return kref_put(&rmap->refcount, cpu_rmap_release);
}
EXPORT_SYMBOL(cpu_rmap_put);
/* Reevaluate nearest object for given CPU, comparing with the given
* neighbours at the given distance.
*/
static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
const struct cpumask *mask, u16 dist)
{
int neigh;
for_each_cpu(neigh, mask) {
if (rmap->near[cpu].dist > dist &&
rmap->near[neigh].dist <= dist) {
rmap->near[cpu].index = rmap->near[neigh].index;
rmap->near[cpu].dist = dist;
return true;
}
}
return false;
}
#ifdef DEBUG
static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
{
unsigned index;
unsigned int cpu;
pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
for_each_possible_cpu(cpu) {
index = rmap->near[cpu].index;
pr_info("cpu %d -> obj %u (distance %u)\n",
cpu, index, rmap->near[cpu].dist);
}
}
#else
static inline void
debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
{
}
#endif
/**
* cpu_rmap_add - add object to a rmap
* @rmap: CPU rmap allocated with alloc_cpu_rmap()
* @obj: Object to add to rmap
*
* Return index of object.
*/
int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
{
u16 index;
BUG_ON(rmap->used >= rmap->size);
index = rmap->used++;
rmap->obj[index] = obj;
return index;
}
EXPORT_SYMBOL(cpu_rmap_add);
/**
* cpu_rmap_update - update CPU rmap following a change of object affinity
* @rmap: CPU rmap to update
* @index: Index of object whose affinity changed
* @affinity: New CPU affinity of object
*/
int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
const struct cpumask *affinity)
{
cpumask_var_t update_mask;
unsigned int cpu;
if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
return -ENOMEM;
/* Invalidate distance for all CPUs for which this used to be
* the nearest object. Mark those CPUs for update.
*/
for_each_online_cpu(cpu) {
if (rmap->near[cpu].index == index) {
rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
cpumask_set_cpu(cpu, update_mask);
}
}
debug_print_rmap(rmap, "after invalidating old distances");
/* Set distance to 0 for all CPUs in the new affinity mask.
* Mark all CPUs within their NUMA nodes for update.
*/
for_each_cpu(cpu, affinity) {
rmap->near[cpu].index = index;
rmap->near[cpu].dist = 0;
cpumask_or(update_mask, update_mask,
cpumask_of_node(cpu_to_node(cpu)));
}
debug_print_rmap(rmap, "after updating neighbours");
/* Update distances based on topology */
for_each_cpu(cpu, update_mask) {
if (cpu_rmap_copy_neigh(rmap, cpu,
topology_thread_cpumask(cpu), 1))
continue;
if (cpu_rmap_copy_neigh(rmap, cpu,
topology_core_cpumask(cpu), 2))
continue;
if (cpu_rmap_copy_neigh(rmap, cpu,
cpumask_of_node(cpu_to_node(cpu)), 3))
continue;
/* We could continue into NUMA node distances, but for now
* we give up.
*/
}
debug_print_rmap(rmap, "after copying neighbours");
free_cpumask_var(update_mask);
return 0;
}
EXPORT_SYMBOL(cpu_rmap_update);
/* Glue between IRQ affinity notifiers and CPU rmaps */
struct irq_glue {
struct irq_affinity_notify notify;
struct cpu_rmap *rmap;
u16 index;
};
/**
* free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
* @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
*
* Must be called in process context, before freeing the IRQs.
*/
void free_irq_cpu_rmap(struct cpu_rmap *rmap)
{
struct irq_glue *glue;
u16 index;
if (!rmap)
return;
for (index = 0; index < rmap->used; index++) {
glue = rmap->obj[index];
irq_set_affinity_notifier(glue->notify.irq, NULL);
}
cpu_rmap_put(rmap);
}
EXPORT_SYMBOL(free_irq_cpu_rmap);
/**
* irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
* @notify: struct irq_affinity_notify passed by irq/manage.c
* @mask: cpu mask for new SMP affinity
*
* This is executed in workqueue context.
*/
static void
irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
{
struct irq_glue *glue =
container_of(notify, struct irq_glue, notify);
int rc;
rc = cpu_rmap_update(glue->rmap, glue->index, mask);
if (rc)
pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
}
/**
* irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
* @ref: kref to struct irq_affinity_notify passed by irq/manage.c
*/
static void irq_cpu_rmap_release(struct kref *ref)
{
struct irq_glue *glue =
container_of(ref, struct irq_glue, notify.kref);
cpu_rmap_put(glue->rmap);
kfree(glue);
}
/**
* irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
* @rmap: The reverse-map
* @irq: The IRQ number
*
* This adds an IRQ affinity notifier that will update the reverse-map
* automatically.
*
* Must be called in process context, after the IRQ is allocated but
* before it is bound with request_irq().
*/
int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
{
struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
int rc;
if (!glue)
return -ENOMEM;
glue->notify.notify = irq_cpu_rmap_notify;
glue->notify.release = irq_cpu_rmap_release;
glue->rmap = rmap;
cpu_rmap_get(rmap);
glue->index = cpu_rmap_add(rmap, glue);
rc = irq_set_affinity_notifier(irq, &glue->notify);
if (rc) {
cpu_rmap_put(glue->rmap);
kfree(glue);
}
return rc;
}
EXPORT_SYMBOL(irq_cpu_rmap_add);

229
lib/cpumask.c Normal file
View file

@ -0,0 +1,229 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/bootmem.h>
int __first_cpu(const cpumask_t *srcp)
{
return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
}
EXPORT_SYMBOL(__first_cpu);
int __next_cpu(int n, const cpumask_t *srcp)
{
return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
}
EXPORT_SYMBOL(__next_cpu);
#if NR_CPUS > 64
int __next_cpu_nr(int n, const cpumask_t *srcp)
{
return min_t(int, nr_cpu_ids,
find_next_bit(srcp->bits, nr_cpu_ids, n+1));
}
EXPORT_SYMBOL(__next_cpu_nr);
#endif
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
* @n: the cpu prior to the place to search (ie. return will be > @n)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
* Returns >= nr_cpu_ids if no further cpus set in both.
*/
int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
if (cpumask_test_cpu(n, src2p))
break;
return n;
}
EXPORT_SYMBOL(cpumask_next_and);
/**
* cpumask_any_but - return a "random" in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
* Returns >= nr_cpu_ids if no cpus set.
*/
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
cpumask_check(cpu);
for_each_cpu(i, mask)
if (i != cpu)
break;
return i;
}
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
/**
* alloc_cpumask_var_node - allocate a struct cpumask on a given node
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop returning a constant 1 (in <linux/cpumask.h>)
* Returns TRUE if memory allocation succeeded, FALSE otherwise.
*
* In addition, mask will be NULL if this fails. Note that gcc is
* usually smart enough to know that mask can never be NULL if
* CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
* too.
*/
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
*mask = kmalloc_node(cpumask_size(), flags, node);
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (!*mask) {
printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
dump_stack();
}
#endif
/* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
if (*mask) {
unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
unsigned int tail;
tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
memset(ptr + cpumask_size() - tail, 0, tail);
}
return *mask != NULL;
}
EXPORT_SYMBOL(alloc_cpumask_var_node);
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
}
EXPORT_SYMBOL(zalloc_cpumask_var_node);
/**
* alloc_cpumask_var - allocate a struct cpumask
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop returning a constant 1 (in <linux/cpumask.h>).
*
* See alloc_cpumask_var_node.
*/
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
EXPORT_SYMBOL(alloc_cpumask_var);
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(zalloc_cpumask_var);
/**
* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
* @mask: pointer to cpumask_var_t where the cpumask is returned
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop (in <linux/cpumask.h>).
* Either returns an allocated (zero-filled) cpumask, or causes the
* system to panic.
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
*mask = memblock_virt_alloc(cpumask_size(), 0);
}
/**
* free_cpumask_var - frees memory allocated for a struct cpumask.
* @mask: cpumask to free
*
* This is safe on a NULL mask.
*/
void free_cpumask_var(cpumask_var_t mask)
{
kfree(mask);
}
EXPORT_SYMBOL(free_cpumask_var);
/**
* free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
* @mask: cpumask to free
*/
void __init free_bootmem_cpumask_var(cpumask_var_t mask)
{
memblock_free_early(__pa(mask), cpumask_size());
}
#endif
/**
* cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
*
* @i: index number
* @numa_node: local numa_node
* @dstp: cpumask with the relevant cpu bit set according to the policy
*
* This function sets the cpumask according to a numa aware policy.
* cpumask could be used as an affinity hint for the IRQ related to a
* queue. When the policy is to spread queues across cores - local cores
* first.
*
* Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
* the cpu bit and need to re-call the function.
*/
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
{
cpumask_var_t mask;
int cpu;
int ret = 0;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
i %= num_online_cpus();
if (numa_node == -1 || !cpumask_of_node(numa_node)) {
/* Use all online cpu's for non numa aware system */
cpumask_copy(mask, cpu_online_mask);
} else {
int n;
cpumask_and(mask,
cpumask_of_node(numa_node), cpu_online_mask);
n = cpumask_weight(mask);
if (i >= n) {
i -= n;
/* If index > number of local cpu's, mask out local
* cpu's
*/
cpumask_andnot(mask, cpu_online_mask, mask);
}
}
for_each_cpu(cpu, mask) {
if (--i < 0)
goto out;
}
ret = -EAGAIN;
out:
free_cpumask_var(mask);
if (!ret)
cpumask_set_cpu(cpu, dstp);
return ret;
}
EXPORT_SYMBOL(cpumask_set_cpu_local_first);

69
lib/crc-ccitt.c Normal file
View file

@ -0,0 +1,69 @@
/*
* linux/lib/crc-ccitt.c
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-ccitt.h>
/*
* This mysterious table is just the CRC of each possible byte. It can be
* computed using the standard bit-at-a-time methods. The polynomial can
* be seen in entry 128, 0x8408. This corresponds to x^0 + x^5 + x^12.
* Add the implicit x^16, and you have the standard CRC-CCITT.
*/
u16 const crc_ccitt_table[256] = {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
};
EXPORT_SYMBOL(crc_ccitt_table);
/**
* crc_ccitt - recompute the CRC for the data buffer
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
*/
u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
{
while (len--)
crc = crc_ccitt_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc_ccitt);
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");

69
lib/crc-itu-t.c Normal file
View file

@ -0,0 +1,69 @@
/*
* crc-itu-t.c
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-itu-t.h>
/** CRC table for the CRC ITU-T V.41 0x0x1021 (x^16 + x^12 + x^15 + 1) */
const u16 crc_itu_t_table[256] = {
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
};
EXPORT_SYMBOL(crc_itu_t_table);
/**
* crc_itu_t - Compute the CRC-ITU-T for the data buffer
*
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
*
* Returns the updated CRC value
*/
u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len)
{
while (len--)
crc = crc_itu_t_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc_itu_t);
MODULE_DESCRIPTION("CRC ITU-T V.41 calculations");
MODULE_LICENSE("GPL");

64
lib/crc-t10dif.c Normal file
View file

@ -0,0 +1,64 @@
/*
* T10 Data Integrity Field CRC16 calculation
*
* Copyright (c) 2007 Oracle Corporation. All rights reserved.
* Written by Martin K. Petersen <martin.petersen@oracle.com>
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-t10dif.h>
#include <linux/err.h>
#include <linux/init.h>
#include <crypto/hash.h>
#include <linux/static_key.h>
static struct crypto_shash *crct10dif_tfm;
static struct static_key crct10dif_fallback __read_mostly;
__u16 crc_t10dif(const unsigned char *buffer, size_t len)
{
struct {
struct shash_desc shash;
char ctx[2];
} desc;
int err;
if (static_key_false(&crct10dif_fallback))
return crc_t10dif_generic(0, buffer, len);
desc.shash.tfm = crct10dif_tfm;
desc.shash.flags = 0;
*(__u16 *)desc.ctx = 0;
err = crypto_shash_update(&desc.shash, buffer, len);
BUG_ON(err);
return *(__u16 *)desc.ctx;
}
EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
if (IS_ERR(crct10dif_tfm)) {
static_key_slow_inc(&crct10dif_fallback);
crct10dif_tfm = NULL;
}
return 0;
}
static void __exit crc_t10dif_mod_fini(void)
{
crypto_free_shash(crct10dif_tfm);
}
module_init(crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_fini);
MODULE_DESCRIPTION("T10 DIF CRC calculation");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crct10dif");

67
lib/crc16.c Normal file
View file

@ -0,0 +1,67 @@
/*
* crc16.c
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc16.h>
/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
u16 const crc16_table[256] = {
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
};
EXPORT_SYMBOL(crc16_table);
/**
* crc16 - compute the CRC-16 for the data buffer
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
*
* Returns the updated CRC value.
*/
u16 crc16(u16 crc, u8 const *buffer, size_t len)
{
while (len--)
crc = crc16_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc16);
MODULE_DESCRIPTION("CRC16 calculations");
MODULE_LICENSE("GPL");

1174
lib/crc32.c Normal file

File diff suppressed because it is too large Load diff

72
lib/crc32defs.h Normal file
View file

@ -0,0 +1,72 @@
/*
* There are multiple 16-bit CRC polynomials in common use, but this is
* *the* standard CRC-32 polynomial, first popularized by Ethernet.
* x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
*/
#define CRCPOLY_LE 0xedb88320
#define CRCPOLY_BE 0x04c11db7
/*
* This is the CRC32c polynomial, as outlined by Castagnoli.
* x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
* x^8+x^6+x^0
*/
#define CRC32C_POLY_LE 0x82F63B78
/* Try to choose an implementation variant via Kconfig */
#ifdef CONFIG_CRC32_SLICEBY8
# define CRC_LE_BITS 64
# define CRC_BE_BITS 64
#endif
#ifdef CONFIG_CRC32_SLICEBY4
# define CRC_LE_BITS 32
# define CRC_BE_BITS 32
#endif
#ifdef CONFIG_CRC32_SARWATE
# define CRC_LE_BITS 8
# define CRC_BE_BITS 8
#endif
#ifdef CONFIG_CRC32_BIT
# define CRC_LE_BITS 1
# define CRC_BE_BITS 1
#endif
/*
* How many bits at a time to use. Valid values are 1, 2, 4, 8, 32 and 64.
* For less performance-sensitive, use 4 or 8 to save table size.
* For larger systems choose same as CPU architecture as default.
* This works well on X86_64, SPARC64 systems. This may require some
* elaboration after experiments with other architectures.
*/
#ifndef CRC_LE_BITS
# ifdef CONFIG_64BIT
# define CRC_LE_BITS 64
# else
# define CRC_LE_BITS 32
# endif
#endif
#ifndef CRC_BE_BITS
# ifdef CONFIG_64BIT
# define CRC_BE_BITS 64
# else
# define CRC_BE_BITS 32
# endif
#endif
/*
* Little-endian CRC computation. Used with serial bit streams sent
* lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
*/
#if CRC_LE_BITS > 64 || CRC_LE_BITS < 1 || CRC_LE_BITS == 16 || \
CRC_LE_BITS & CRC_LE_BITS-1
# error "CRC_LE_BITS must be one of {1, 2, 4, 8, 32, 64}"
#endif
/*
* Big-endian CRC computation. Used with serial bit streams sent
* msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
*/
#if CRC_BE_BITS > 64 || CRC_BE_BITS < 1 || CRC_BE_BITS == 16 || \
CRC_BE_BITS & CRC_BE_BITS-1
# error "CRC_BE_BITS must be one of {1, 2, 4, 8, 32, 64}"
#endif

76
lib/crc7.c Normal file
View file

@ -0,0 +1,76 @@
/*
* crc7.c
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc7.h>
/*
* Table for CRC-7 (polynomial x^7 + x^3 + 1).
* This is a big-endian CRC (msbit is highest power of x),
* aligned so the msbit of the byte is the x^6 coefficient
* and the lsbit is not used.
*/
const u8 crc7_be_syndrome_table[256] = {
0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
0xa2, 0xb0, 0x86, 0x94, 0xea, 0xf8, 0xce, 0xdc,
0x64, 0x76, 0x40, 0x52, 0x2c, 0x3e, 0x08, 0x1a,
0xf4, 0xe6, 0xd0, 0xc2, 0xbc, 0xae, 0x98, 0x8a,
0x56, 0x44, 0x72, 0x60, 0x1e, 0x0c, 0x3a, 0x28,
0xc6, 0xd4, 0xe2, 0xf0, 0x8e, 0x9c, 0xaa, 0xb8,
0xc8, 0xda, 0xec, 0xfe, 0x80, 0x92, 0xa4, 0xb6,
0x58, 0x4a, 0x7c, 0x6e, 0x10, 0x02, 0x34, 0x26,
0xfa, 0xe8, 0xde, 0xcc, 0xb2, 0xa0, 0x96, 0x84,
0x6a, 0x78, 0x4e, 0x5c, 0x22, 0x30, 0x06, 0x14,
0xac, 0xbe, 0x88, 0x9a, 0xe4, 0xf6, 0xc0, 0xd2,
0x3c, 0x2e, 0x18, 0x0a, 0x74, 0x66, 0x50, 0x42,
0x9e, 0x8c, 0xba, 0xa8, 0xd6, 0xc4, 0xf2, 0xe0,
0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x70,
0x82, 0x90, 0xa6, 0xb4, 0xca, 0xd8, 0xee, 0xfc,
0x12, 0x00, 0x36, 0x24, 0x5a, 0x48, 0x7e, 0x6c,
0xb0, 0xa2, 0x94, 0x86, 0xf8, 0xea, 0xdc, 0xce,
0x20, 0x32, 0x04, 0x16, 0x68, 0x7a, 0x4c, 0x5e,
0xe6, 0xf4, 0xc2, 0xd0, 0xae, 0xbc, 0x8a, 0x98,
0x76, 0x64, 0x52, 0x40, 0x3e, 0x2c, 0x1a, 0x08,
0xd4, 0xc6, 0xf0, 0xe2, 0x9c, 0x8e, 0xb8, 0xaa,
0x44, 0x56, 0x60, 0x72, 0x0c, 0x1e, 0x28, 0x3a,
0x4a, 0x58, 0x6e, 0x7c, 0x02, 0x10, 0x26, 0x34,
0xda, 0xc8, 0xfe, 0xec, 0x92, 0x80, 0xb6, 0xa4,
0x78, 0x6a, 0x5c, 0x4e, 0x30, 0x22, 0x14, 0x06,
0xe8, 0xfa, 0xcc, 0xde, 0xa0, 0xb2, 0x84, 0x96,
0x2e, 0x3c, 0x0a, 0x18, 0x66, 0x74, 0x42, 0x50,
0xbe, 0xac, 0x9a, 0x88, 0xf6, 0xe4, 0xd2, 0xc0,
0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
};
EXPORT_SYMBOL(crc7_be_syndrome_table);
/**
* crc7 - update the CRC7 for the data buffer
* @crc: previous CRC7 value
* @buffer: data pointer
* @len: number of bytes in the buffer
* Context: any
*
* Returns the updated CRC7 value.
* The CRC7 is left-aligned in the byte (the lsbit is always 0), as that
* makes the computation easier, and all callers want it in that form.
*
*/
u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
{
while (len--)
crc = crc7_be_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc7_be);
MODULE_DESCRIPTION("CRC7 calculations");
MODULE_LICENSE("GPL");

86
lib/crc8.c Normal file
View file

@ -0,0 +1,86 @@
/*
* Copyright (c) 2011 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/crc8.h>
#include <linux/printk.h>
/*
* crc8_populate_msb - fill crc table for given polynomial in reverse bit order.
*
* table: table to be filled.
* polynomial: polynomial for which table is to be filled.
*/
void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
{
int i, j;
const u8 msbit = 0x80;
u8 t = msbit;
table[0] = 0;
for (i = 1; i < CRC8_TABLE_SIZE; i *= 2) {
t = (t << 1) ^ (t & msbit ? polynomial : 0);
for (j = 0; j < i; j++)
table[i+j] = table[j] ^ t;
}
}
EXPORT_SYMBOL(crc8_populate_msb);
/*
* crc8_populate_lsb - fill crc table for given polynomial in regular bit order.
*
* table: table to be filled.
* polynomial: polynomial for which table is to be filled.
*/
void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
{
int i, j;
u8 t = 1;
table[0] = 0;
for (i = (CRC8_TABLE_SIZE >> 1); i; i >>= 1) {
t = (t >> 1) ^ (t & 1 ? polynomial : 0);
for (j = 0; j < CRC8_TABLE_SIZE; j += 2*i)
table[i+j] = table[j] ^ t;
}
}
EXPORT_SYMBOL(crc8_populate_lsb);
/*
* crc8 - calculate a crc8 over the given input data.
*
* table: crc table used for calculation.
* pdata: pointer to data buffer.
* nbytes: number of bytes in data buffer.
* crc: previous returned crc8 value.
*/
u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc)
{
/* loop over the buffer data */
while (nbytes-- > 0)
crc = table[(crc ^ *pdata++) & 0xff];
return crc;
}
EXPORT_SYMBOL(crc8);
MODULE_DESCRIPTION("CRC8 (by Williams, Ross N.) function");
MODULE_AUTHOR("Broadcom Corporation");
MODULE_LICENSE("Dual BSD/GPL");

37
lib/ctype.c Normal file
View file

@ -0,0 +1,37 @@
/*
* linux/lib/ctype.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/ctype.h>
#include <linux/compiler.h>
#include <linux/export.h>
const unsigned char _ctype[] = {
_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
EXPORT_SYMBOL(_ctype);

48
lib/debug_locks.c Normal file
View file

@ -0,0 +1,48 @@
/*
* lib/debug_locks.c
*
* Generic place for common debugging facilities for various locks:
* spinlocks, rwlocks, mutexes and rwsems.
*
* Started by Ingo Molnar:
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/debug_locks.h>
/*
* We want to turn all lock-debugging facilities on/off at once,
* via a global flag. The reason is that once a single bug has been
* detected and reported, there might be cascade of followup bugs
* that would just muddy the log. So we report the first one and
* shut up after that.
*/
int debug_locks = 1;
EXPORT_SYMBOL_GPL(debug_locks);
/*
* The locking-testsuite uses <debug_locks_silent> to get a
* 'silent failure': nothing is printed to the console when
* a locking bug is detected.
*/
int debug_locks_silent;
EXPORT_SYMBOL_GPL(debug_locks_silent);
/*
* Generic 'turn off all lock debugging' function:
*/
int debug_locks_off(void)
{
if (__debug_locks_off()) {
if (!debug_locks_silent) {
console_verbose();
return 1;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(debug_locks_off);

1097
lib/debugobjects.c Normal file

File diff suppressed because it is too large Load diff

34
lib/dec_and_lock.c Normal file
View file

@ -0,0 +1,34 @@
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* NOTE NOTE NOTE! This is _not_ equivalent to
*
* if (atomic_dec_and_test(&atomic)) {
* spin_lock(&lock);
* return 1;
* }
* return 0;
*
* because the spin-lock and the decrement must be
* "atomic".
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);

75
lib/decompress.c Normal file
View file

@ -0,0 +1,75 @@
/*
* decompress.c
*
* Detect the decompression method based on magic number
*/
#include <linux/decompress/generic.h>
#include <linux/decompress/bunzip2.h>
#include <linux/decompress/unlzma.h>
#include <linux/decompress/unxz.h>
#include <linux/decompress/inflate.h>
#include <linux/decompress/unlzo.h>
#include <linux/decompress/unlz4.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/printk.h>
#ifndef CONFIG_DECOMPRESS_GZIP
# define gunzip NULL
#endif
#ifndef CONFIG_DECOMPRESS_BZIP2
# define bunzip2 NULL
#endif
#ifndef CONFIG_DECOMPRESS_LZMA
# define unlzma NULL
#endif
#ifndef CONFIG_DECOMPRESS_XZ
# define unxz NULL
#endif
#ifndef CONFIG_DECOMPRESS_LZO
# define unlzo NULL
#endif
#ifndef CONFIG_DECOMPRESS_LZ4
# define unlz4 NULL
#endif
struct compress_format {
unsigned char magic[2];
const char *name;
decompress_fn decompressor;
};
static const struct compress_format compressed_formats[] __initconst = {
{ {037, 0213}, "gzip", gunzip },
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
{ {0x5d, 0x00}, "lzma", unlzma },
{ {0xfd, 0x37}, "xz", unxz },
{ {0x89, 0x4c}, "lzo", unlzo },
{ {0x02, 0x21}, "lz4", unlz4 },
{ {0, 0}, NULL, NULL }
};
decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
const char **name)
{
const struct compress_format *cf;
if (len < 2)
return NULL; /* Need at least this much... */
pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
for (cf = compressed_formats; cf->name; cf++) {
if (!memcmp(inbuf, cf->magic, 2))
break;
}
if (name)
*name = cf->name;
return cf->decompressor;
}

755
lib/decompress_bunzip2.c Normal file
View file

@ -0,0 +1,755 @@
/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
which also acknowledges contributions by Mike Burrows, David Wheeler,
Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
Robert Sedgewick, and Jon L. Bentley.
This code is licensed under the LGPLv2:
LGPL (http://www.gnu.org/copyleft/lgpl.html
*/
/*
Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
More efficient reading of Huffman codes, a streamlined read_bunzip()
function, and various other tweaks. In (limited) tests, approximately
20% faster than bzcat on x86 and about 10% faster on arm.
Note that about 2/3 of the time is spent in read_unzip() reversing
the Burrows-Wheeler transformation. Much of that time is delay
resulting from cache misses.
I would ask that anyone benefiting from this work, especially those
using it in commercial products, consider making a donation to my local
non-profit hospice organization in the name of the woman I loved, who
passed away Feb. 12, 2003.
In memory of Toni W. Hagan
Hospice of Acadiana, Inc.
2600 Johnston St., Suite 200
Lafayette, LA 70503-3240
Phone (337) 232-1234 or 1-800-738-2226
Fax (337) 232-1297
http://www.hospiceacadiana.com/
Manuel
*/
/*
Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
*/
#ifdef STATIC
#define PREBOOT
#else
#include <linux/decompress/bunzip2.h>
#endif /* STATIC */
#include <linux/decompress/mm.h>
#ifndef INT_MAX
#define INT_MAX 0x7fffffff
#endif
/* Constants for Huffman coding */
#define MAX_GROUPS 6
#define GROUP_SIZE 50 /* 64 would have been more efficient */
#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
#define SYMBOL_RUNA 0
#define SYMBOL_RUNB 1
/* Status return values */
#define RETVAL_OK 0
#define RETVAL_LAST_BLOCK (-1)
#define RETVAL_NOT_BZIP_DATA (-2)
#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
#define RETVAL_DATA_ERROR (-5)
#define RETVAL_OUT_OF_MEMORY (-6)
#define RETVAL_OBSOLETE_INPUT (-7)
/* Other housekeeping constants */
#define BZIP2_IOBUF_SIZE 4096
/* This is what we know about each Huffman coding group */
struct group_data {
/* We have an extra slot at the end of limit[] for a sentinal value. */
int limit[MAX_HUFCODE_BITS+1];
int base[MAX_HUFCODE_BITS];
int permute[MAX_SYMBOLS];
int minLen, maxLen;
};
/* Structure holding all the housekeeping data, including IO buffers and
memory that persists between calls to bunzip */
struct bunzip_data {
/* State for interrupting output loop */
int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
/* I/O tracking data (file handles, buffers, positions, etc.) */
long (*fill)(void*, unsigned long);
long inbufCount, inbufPos /*, outbufPos*/;
unsigned char *inbuf /*,*outbuf*/;
unsigned int inbufBitCount, inbufBits;
/* The CRC values stored in the block header and calculated from the
data */
unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC;
/* Intermediate buffer and its size (in bytes) */
unsigned int *dbuf, dbufSize;
/* These things are a bit too big to go on the stack */
unsigned char selectors[32768]; /* nSelectors = 15 bits */
struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
int io_error; /* non-zero if we have IO error */
int byteCount[256];
unsigned char symToByte[256], mtfSymbol[256];
};
/* Return the next nnn bits of input. All reads from the compressed input
are done through this function. All reads are big endian */
static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted)
{
unsigned int bits = 0;
/* If we need to get more data from the byte buffer, do so.
(Loop getting one byte at a time to enforce endianness and avoid
unaligned access.) */
while (bd->inbufBitCount < bits_wanted) {
/* If we need to read more data from file into byte buffer, do
so */
if (bd->inbufPos == bd->inbufCount) {
if (bd->io_error)
return 0;
bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE);
if (bd->inbufCount <= 0) {
bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF;
return 0;
}
bd->inbufPos = 0;
}
/* Avoid 32-bit overflow (dump bit buffer to top of output) */
if (bd->inbufBitCount >= 24) {
bits = bd->inbufBits&((1 << bd->inbufBitCount)-1);
bits_wanted -= bd->inbufBitCount;
bits <<= bits_wanted;
bd->inbufBitCount = 0;
}
/* Grab next 8 bits of input from buffer. */
bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
}
/* Calculate result */
bd->inbufBitCount -= bits_wanted;
bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1);
return bits;
}
/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
static int INIT get_next_block(struct bunzip_data *bd)
{
struct group_data *hufGroup = NULL;
int *base = NULL;
int *limit = NULL;
int dbufCount, nextSym, dbufSize, groupCount, selector,
i, j, k, t, runPos, symCount, symTotal, nSelectors, *byteCount;
unsigned char uc, *symToByte, *mtfSymbol, *selectors;
unsigned int *dbuf, origPtr;
dbuf = bd->dbuf;
dbufSize = bd->dbufSize;
selectors = bd->selectors;
byteCount = bd->byteCount;
symToByte = bd->symToByte;
mtfSymbol = bd->mtfSymbol;
/* Read in header signature and CRC, then validate signature.
(last block signature means CRC is for whole file, return now) */
i = get_bits(bd, 24);
j = get_bits(bd, 24);
bd->headerCRC = get_bits(bd, 32);
if ((i == 0x177245) && (j == 0x385090))
return RETVAL_LAST_BLOCK;
if ((i != 0x314159) || (j != 0x265359))
return RETVAL_NOT_BZIP_DATA;
/* We can add support for blockRandomised if anybody complains.
There was some code for this in busybox 1.0.0-pre3, but nobody ever
noticed that it didn't actually work. */
if (get_bits(bd, 1))
return RETVAL_OBSOLETE_INPUT;
origPtr = get_bits(bd, 24);
if (origPtr >= dbufSize)
return RETVAL_DATA_ERROR;
/* mapping table: if some byte values are never used (encoding things
like ascii text), the compression code removes the gaps to have fewer
symbols to deal with, and writes a sparse bitfield indicating which
values were present. We make a translation table to convert the
symbols back to the corresponding bytes. */
t = get_bits(bd, 16);
symTotal = 0;
for (i = 0; i < 16; i++) {
if (t&(1 << (15-i))) {
k = get_bits(bd, 16);
for (j = 0; j < 16; j++)
if (k&(1 << (15-j)))
symToByte[symTotal++] = (16*i)+j;
}
}
/* How many different Huffman coding groups does this block use? */
groupCount = get_bits(bd, 3);
if (groupCount < 2 || groupCount > MAX_GROUPS)
return RETVAL_DATA_ERROR;
/* nSelectors: Every GROUP_SIZE many symbols we select a new
Huffman coding group. Read in the group selector list,
which is stored as MTF encoded bit runs. (MTF = Move To
Front, as each value is used it's moved to the start of the
list.) */
nSelectors = get_bits(bd, 15);
if (!nSelectors)
return RETVAL_DATA_ERROR;
for (i = 0; i < groupCount; i++)
mtfSymbol[i] = i;
for (i = 0; i < nSelectors; i++) {
/* Get next value */
for (j = 0; get_bits(bd, 1); j++)
if (j >= groupCount)
return RETVAL_DATA_ERROR;
/* Decode MTF to get the next selector */
uc = mtfSymbol[j];
for (; j; j--)
mtfSymbol[j] = mtfSymbol[j-1];
mtfSymbol[0] = selectors[i] = uc;
}
/* Read the Huffman coding tables for each group, which code
for symTotal literal symbols, plus two run symbols (RUNA,
RUNB) */
symCount = symTotal+2;
for (j = 0; j < groupCount; j++) {
unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
int minLen, maxLen, pp;
/* Read Huffman code lengths for each symbol. They're
stored in a way similar to mtf; record a starting
value for the first symbol, and an offset from the
previous value for everys symbol after that.
(Subtracting 1 before the loop and then adding it
back at the end is an optimization that makes the
test inside the loop simpler: symbol length 0
becomes negative, so an unsigned inequality catches
it.) */
t = get_bits(bd, 5)-1;
for (i = 0; i < symCount; i++) {
for (;;) {
if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
return RETVAL_DATA_ERROR;
/* If first bit is 0, stop. Else
second bit indicates whether to
increment or decrement the value.
Optimization: grab 2 bits and unget
the second if the first was 0. */
k = get_bits(bd, 2);
if (k < 2) {
bd->inbufBitCount++;
break;
}
/* Add one if second bit 1, else
* subtract 1. Avoids if/else */
t += (((k+1)&2)-1);
}
/* Correct for the initial -1, to get the
* final symbol length */
length[i] = t+1;
}
/* Find largest and smallest lengths in this group */
minLen = maxLen = length[0];
for (i = 1; i < symCount; i++) {
if (length[i] > maxLen)
maxLen = length[i];
else if (length[i] < minLen)
minLen = length[i];
}
/* Calculate permute[], base[], and limit[] tables from
* length[].
*
* permute[] is the lookup table for converting
* Huffman coded symbols into decoded symbols. base[]
* is the amount to subtract from the value of a
* Huffman symbol of a given length when using
* permute[].
*
* limit[] indicates the largest numerical value a
* symbol with a given number of bits can have. This
* is how the Huffman codes can vary in length: each
* code with a value > limit[length] needs another
* bit.
*/
hufGroup = bd->groups+j;
hufGroup->minLen = minLen;
hufGroup->maxLen = maxLen;
/* Note that minLen can't be smaller than 1, so we
adjust the base and limit array pointers so we're
not always wasting the first entry. We do this
again when using them (during symbol decoding).*/
base = hufGroup->base-1;
limit = hufGroup->limit-1;
/* Calculate permute[]. Concurrently, initialize
* temp[] and limit[]. */
pp = 0;
for (i = minLen; i <= maxLen; i++) {
temp[i] = limit[i] = 0;
for (t = 0; t < symCount; t++)
if (length[t] == i)
hufGroup->permute[pp++] = t;
}
/* Count symbols coded for at each bit length */
for (i = 0; i < symCount; i++)
temp[length[i]]++;
/* Calculate limit[] (the largest symbol-coding value
*at each bit length, which is (previous limit <<
*1)+symbols at this level), and base[] (number of
*symbols to ignore at each bit length, which is limit
*minus the cumulative count of symbols coded for
*already). */
pp = t = 0;
for (i = minLen; i < maxLen; i++) {
pp += temp[i];
/* We read the largest possible symbol size
and then unget bits after determining how
many we need, and those extra bits could be
set to anything. (They're noise from
future symbols.) At each level we're
really only interested in the first few
bits, so here we set all the trailing
to-be-ignored bits to 1 so they don't
affect the value > limit[length]
comparison. */
limit[i] = (pp << (maxLen - i)) - 1;
pp <<= 1;
base[i+1] = pp-(t += temp[i]);
}
limit[maxLen+1] = INT_MAX; /* Sentinal value for
* reading next sym. */
limit[maxLen] = pp+temp[maxLen]-1;
base[minLen] = 0;
}
/* We've finished reading and digesting the block header. Now
read this block's Huffman coded symbols from the file and
undo the Huffman coding and run length encoding, saving the
result into dbuf[dbufCount++] = uc */
/* Initialize symbol occurrence counters and symbol Move To
* Front table */
for (i = 0; i < 256; i++) {
byteCount[i] = 0;
mtfSymbol[i] = (unsigned char)i;
}
/* Loop through compressed symbols. */
runPos = dbufCount = symCount = selector = 0;
for (;;) {
/* Determine which Huffman coding group to use. */
if (!(symCount--)) {
symCount = GROUP_SIZE-1;
if (selector >= nSelectors)
return RETVAL_DATA_ERROR;
hufGroup = bd->groups+selectors[selector++];
base = hufGroup->base-1;
limit = hufGroup->limit-1;
}
/* Read next Huffman-coded symbol. */
/* Note: It is far cheaper to read maxLen bits and
back up than it is to read minLen bits and then an
additional bit at a time, testing as we go.
Because there is a trailing last block (with file
CRC), there is no danger of the overread causing an
unexpected EOF for a valid compressed file. As a
further optimization, we do the read inline
(falling back to a call to get_bits if the buffer
runs dry). The following (up to got_huff_bits:) is
equivalent to j = get_bits(bd, hufGroup->maxLen);
*/
while (bd->inbufBitCount < hufGroup->maxLen) {
if (bd->inbufPos == bd->inbufCount) {
j = get_bits(bd, hufGroup->maxLen);
goto got_huff_bits;
}
bd->inbufBits =
(bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
};
bd->inbufBitCount -= hufGroup->maxLen;
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
got_huff_bits:
/* Figure how how many bits are in next symbol and
* unget extras */
i = hufGroup->minLen;
while (j > limit[i])
++i;
bd->inbufBitCount += (hufGroup->maxLen - i);
/* Huffman decode value to get nextSym (with bounds checking) */
if ((i > hufGroup->maxLen)
|| (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))
>= MAX_SYMBOLS))
return RETVAL_DATA_ERROR;
nextSym = hufGroup->permute[j];
/* We have now decoded the symbol, which indicates
either a new literal byte, or a repeated run of the
most recent literal byte. First, check if nextSym
indicates a repeated run, and if so loop collecting
how many times to repeat the last literal. */
if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
/* If this is the start of a new run, zero out
* counter */
if (!runPos) {
runPos = 1;
t = 0;
}
/* Neat trick that saves 1 symbol: instead of
or-ing 0 or 1 at each bit position, add 1
or 2 instead. For example, 1011 is 1 << 0
+ 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1
+ 1 << 2. You can make any bit pattern
that way using 1 less symbol than the basic
or 0/1 method (except all bits 0, which
would use no symbols, but a run of length 0
doesn't mean anything in this context).
Thus space is saved. */
t += (runPos << nextSym);
/* +runPos if RUNA; +2*runPos if RUNB */
runPos <<= 1;
continue;
}
/* When we hit the first non-run symbol after a run,
we now know how many times to repeat the last
literal, so append that many copies to our buffer
of decoded symbols (dbuf) now. (The last literal
used is the one at the head of the mtfSymbol
array.) */
if (runPos) {
runPos = 0;
if (dbufCount+t >= dbufSize)
return RETVAL_DATA_ERROR;
uc = symToByte[mtfSymbol[0]];
byteCount[uc] += t;
while (t--)
dbuf[dbufCount++] = uc;
}
/* Is this the terminating symbol? */
if (nextSym > symTotal)
break;
/* At this point, nextSym indicates a new literal
character. Subtract one to get the position in the
MTF array at which this literal is currently to be
found. (Note that the result can't be -1 or 0,
because 0 and 1 are RUNA and RUNB. But another
instance of the first symbol in the mtf array,
position 0, would have been handled as part of a
run above. Therefore 1 unused mtf position minus 2
non-literal nextSym values equals -1.) */
if (dbufCount >= dbufSize)
return RETVAL_DATA_ERROR;
i = nextSym - 1;
uc = mtfSymbol[i];
/* Adjust the MTF array. Since we typically expect to
*move only a small number of symbols, and are bound
*by 256 in any case, using memmove here would
*typically be bigger and slower due to function call
*overhead and other assorted setup costs. */
do {
mtfSymbol[i] = mtfSymbol[i-1];
} while (--i);
mtfSymbol[0] = uc;
uc = symToByte[uc];
/* We have our literal byte. Save it into dbuf. */
byteCount[uc]++;
dbuf[dbufCount++] = (unsigned int)uc;
}
/* At this point, we've read all the Huffman-coded symbols
(and repeated runs) for this block from the input stream,
and decoded them into the intermediate buffer. There are
dbufCount many decoded bytes in dbuf[]. Now undo the
Burrows-Wheeler transform on dbuf. See
http://dogma.net/markn/articles/bwt/bwt.htm
*/
/* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
j = 0;
for (i = 0; i < 256; i++) {
k = j+byteCount[i];
byteCount[i] = j;
j = k;
}
/* Figure out what order dbuf would be in if we sorted it. */
for (i = 0; i < dbufCount; i++) {
uc = (unsigned char)(dbuf[i] & 0xff);
dbuf[byteCount[uc]] |= (i << 8);
byteCount[uc]++;
}
/* Decode first byte by hand to initialize "previous" byte.
Note that it doesn't get output, and if the first three
characters are identical it doesn't qualify as a run (hence
writeRunCountdown = 5). */
if (dbufCount) {
if (origPtr >= dbufCount)
return RETVAL_DATA_ERROR;
bd->writePos = dbuf[origPtr];
bd->writeCurrent = (unsigned char)(bd->writePos&0xff);
bd->writePos >>= 8;
bd->writeRunCountdown = 5;
}
bd->writeCount = dbufCount;
return RETVAL_OK;
}
/* Undo burrows-wheeler transform on intermediate buffer to produce output.
If start_bunzip was initialized with out_fd =-1, then up to len bytes of
data are written to outbuf. Return value is number of bytes written or
error (all errors are negative numbers). If out_fd!=-1, outbuf and len
are ignored, data is written to out_fd and return is RETVAL_OK or error.
*/
static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len)
{
const unsigned int *dbuf;
int pos, xcurrent, previous, gotcount;
/* If last read was short due to end of file, return last block now */
if (bd->writeCount < 0)
return bd->writeCount;
gotcount = 0;
dbuf = bd->dbuf;
pos = bd->writePos;
xcurrent = bd->writeCurrent;
/* We will always have pending decoded data to write into the output
buffer unless this is the very first call (in which case we haven't
Huffman-decoded a block into the intermediate buffer yet). */
if (bd->writeCopies) {
/* Inside the loop, writeCopies means extra copies (beyond 1) */
--bd->writeCopies;
/* Loop outputting bytes */
for (;;) {
/* If the output buffer is full, snapshot
* state and return */
if (gotcount >= len) {
bd->writePos = pos;
bd->writeCurrent = xcurrent;
bd->writeCopies++;
return len;
}
/* Write next byte into output buffer, updating CRC */
outbuf[gotcount++] = xcurrent;
bd->writeCRC = (((bd->writeCRC) << 8)
^bd->crc32Table[((bd->writeCRC) >> 24)
^xcurrent]);
/* Loop now if we're outputting multiple
* copies of this byte */
if (bd->writeCopies) {
--bd->writeCopies;
continue;
}
decode_next_byte:
if (!bd->writeCount--)
break;
/* Follow sequence vector to undo
* Burrows-Wheeler transform */
previous = xcurrent;
pos = dbuf[pos];
xcurrent = pos&0xff;
pos >>= 8;
/* After 3 consecutive copies of the same
byte, the 4th is a repeat count. We count
down from 4 instead *of counting up because
testing for non-zero is faster */
if (--bd->writeRunCountdown) {
if (xcurrent != previous)
bd->writeRunCountdown = 4;
} else {
/* We have a repeated run, this byte
* indicates the count */
bd->writeCopies = xcurrent;
xcurrent = previous;
bd->writeRunCountdown = 5;
/* Sometimes there are just 3 bytes
* (run length 0) */
if (!bd->writeCopies)
goto decode_next_byte;
/* Subtract the 1 copy we'd output
* anyway to get extras */
--bd->writeCopies;
}
}
/* Decompression of this block completed successfully */
bd->writeCRC = ~bd->writeCRC;
bd->totalCRC = ((bd->totalCRC << 1) |
(bd->totalCRC >> 31)) ^ bd->writeCRC;
/* If this block had a CRC error, force file level CRC error. */
if (bd->writeCRC != bd->headerCRC) {
bd->totalCRC = bd->headerCRC+1;
return RETVAL_LAST_BLOCK;
}
}
/* Refill the intermediate buffer by Huffman-decoding next
* block of input */
/* (previous is just a convenient unused temp variable here) */
previous = get_next_block(bd);
if (previous) {
bd->writeCount = previous;
return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount;
}
bd->writeCRC = 0xffffffffUL;
pos = bd->writePos;
xcurrent = bd->writeCurrent;
goto decode_next_byte;
}
static long INIT nofill(void *buf, unsigned long len)
{
return -1;
}
/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
ignored, and data is read from file handle into temporary buffer. */
static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
long (*fill)(void*, unsigned long))
{
struct bunzip_data *bd;
unsigned int i, j, c;
const unsigned int BZh0 =
(((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16)
+(((unsigned int)'h') << 8)+(unsigned int)'0';
/* Figure out how much data to allocate */
i = sizeof(struct bunzip_data);
/* Allocate bunzip_data. Most fields initialize to zero. */
bd = *bdp = malloc(i);
if (!bd)
return RETVAL_OUT_OF_MEMORY;
memset(bd, 0, sizeof(struct bunzip_data));
/* Setup input buffer */
bd->inbuf = inbuf;
bd->inbufCount = len;
if (fill != NULL)
bd->fill = fill;
else
bd->fill = nofill;
/* Init the CRC32 table (big endian) */
for (i = 0; i < 256; i++) {
c = i << 24;
for (j = 8; j; j--)
c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
bd->crc32Table[i] = c;
}
/* Ensure that file starts with "BZh['1'-'9']." */
i = get_bits(bd, 32);
if (((unsigned int)(i-BZh0-1)) >= 9)
return RETVAL_NOT_BZIP_DATA;
/* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
uncompressed data. Allocate intermediate buffer for block. */
bd->dbufSize = 100000*(i-BZh0);
bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
if (!bd->dbuf)
return RETVAL_OUT_OF_MEMORY;
return RETVAL_OK;
}
/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
not end of file.) */
STATIC int INIT bunzip2(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *outbuf,
long *pos,
void(*error)(char *x))
{
struct bunzip_data *bd;
int i = -1;
unsigned char *inbuf;
if (flush)
outbuf = malloc(BZIP2_IOBUF_SIZE);
if (!outbuf) {
error("Could not allocate output buffer");
return RETVAL_OUT_OF_MEMORY;
}
if (buf)
inbuf = buf;
else
inbuf = malloc(BZIP2_IOBUF_SIZE);
if (!inbuf) {
error("Could not allocate input buffer");
i = RETVAL_OUT_OF_MEMORY;
goto exit_0;
}
i = start_bunzip(&bd, inbuf, len, fill);
if (!i) {
for (;;) {
i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE);
if (i <= 0)
break;
if (!flush)
outbuf += i;
else
if (i != flush(outbuf, i)) {
i = RETVAL_UNEXPECTED_OUTPUT_EOF;
break;
}
}
}
/* Check CRC and release memory */
if (i == RETVAL_LAST_BLOCK) {
if (bd->headerCRC != bd->totalCRC)
error("Data integrity error when decompressing.");
else
i = RETVAL_OK;
} else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
error("Compressed file ends unexpectedly");
}
if (!bd)
goto exit_1;
if (bd->dbuf)
large_free(bd->dbuf);
if (pos)
*pos = bd->inbufPos;
free(bd);
exit_1:
if (!buf)
free(inbuf);
exit_0:
if (flush)
free(outbuf);
return i;
}
#ifdef PREBOOT
STATIC int INIT decompress(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *outbuf,
long *pos,
void(*error)(char *x))
{
return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
}
#endif

184
lib/decompress_inflate.c Normal file
View file

@ -0,0 +1,184 @@
#ifdef STATIC
/* Pre-boot environment: included */
/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
* errors about console_printk etc... on ARM */
#define _LINUX_KERNEL_H
#include "zlib_inflate/inftrees.c"
#include "zlib_inflate/inffast.c"
#include "zlib_inflate/inflate.c"
#else /* STATIC */
/* initramfs et al: linked */
#include <linux/zutil.h>
#include "zlib_inflate/inftrees.h"
#include "zlib_inflate/inffast.h"
#include "zlib_inflate/inflate.h"
#include "zlib_inflate/infutil.h"
#include <linux/decompress/inflate.h>
#endif /* STATIC */
#include <linux/decompress/mm.h>
#define GZIP_IOBUF_SIZE (16*1024)
static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
/* Included from initramfs et al code */
STATIC int INIT gunzip(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf,
long *pos,
void(*error)(char *x)) {
u8 *zbuf;
struct z_stream_s *strm;
int rc;
size_t out_len;
rc = -1;
if (flush) {
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
goto gunzip_nomem1;
}
if (buf)
zbuf = buf;
else {
zbuf = malloc(GZIP_IOBUF_SIZE);
len = 0;
}
if (!zbuf) {
error("Out of memory while allocating input buffer");
goto gunzip_nomem2;
}
strm = malloc(sizeof(*strm));
if (strm == NULL) {
error("Out of memory while allocating z_stream");
goto gunzip_nomem3;
}
strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
sizeof(struct inflate_state));
if (strm->workspace == NULL) {
error("Out of memory while allocating workspace");
goto gunzip_nomem4;
}
if (!fill)
fill = nofill;
if (len == 0)
len = fill(zbuf, GZIP_IOBUF_SIZE);
/* verify the gzip header */
if (len < 10 ||
zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
if (pos)
*pos = 0;
error("Not a gzip file");
goto gunzip_5;
}
/* skip over gzip header (1f,8b,08... 10 bytes total +
* possible asciz filename)
*/
strm->next_in = zbuf + 10;
strm->avail_in = len - 10;
/* skip over asciz filename */
if (zbuf[3] & 0x8) {
do {
/*
* If the filename doesn't fit into the buffer,
* the file is very probably corrupt. Don't try
* to read more data.
*/
if (strm->avail_in == 0) {
error("header error");
goto gunzip_5;
}
--strm->avail_in;
} while (*strm->next_in++);
}
strm->next_out = out_buf;
strm->avail_out = out_len;
rc = zlib_inflateInit2(strm, -MAX_WBITS);
if (!flush) {
WS(strm)->inflate_state.wsize = 0;
WS(strm)->inflate_state.window = NULL;
}
while (rc == Z_OK) {
if (strm->avail_in == 0) {
/* TODO: handle case where both pos and fill are set */
len = fill(zbuf, GZIP_IOBUF_SIZE);
if (len < 0) {
rc = -1;
error("read error");
break;
}
strm->next_in = zbuf;
strm->avail_in = len;
}
rc = zlib_inflate(strm, 0);
/* Write any data generated */
if (flush && strm->next_out > out_buf) {
long l = strm->next_out - out_buf;
if (l != flush(out_buf, l)) {
rc = -1;
error("write error");
break;
}
strm->next_out = out_buf;
strm->avail_out = out_len;
}
/* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
if (rc == Z_STREAM_END) {
rc = 0;
break;
} else if (rc != Z_OK) {
error("uncompression error");
rc = -1;
}
}
zlib_inflateEnd(strm);
if (pos)
/* add + 8 to skip over trailer */
*pos = strm->next_in - zbuf+8;
gunzip_5:
free(strm->workspace);
gunzip_nomem4:
free(strm);
gunzip_nomem3:
if (!buf)
free(zbuf);
gunzip_nomem2:
if (flush)
free(out_buf);
gunzip_nomem1:
return rc; /* returns Z_OK (0) if successful */
}
#define decompress gunzip

209
lib/decompress_unlz4.c Normal file
View file

@ -0,0 +1,209 @@
/*
* Wrapper for decompressing LZ4-compressed kernel, initramfs, and initrd
*
* Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifdef STATIC
#define PREBOOT
#include "lz4/lz4_decompress.c"
#else
#include <linux/decompress/unlz4.h>
#endif
#include <linux/types.h>
#include <linux/lz4.h>
#include <linux/decompress/mm.h>
#include <linux/compiler.h>
#include <asm/unaligned.h>
/*
* Note: Uncompressed chunk size is used in the compressor side
* (userspace side for compression).
* It is hardcoded because there is not proper way to extract it
* from the binary stream which is generated by the preliminary
* version of LZ4 tool so far.
*/
#define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20)
#define ARCHIVE_MAGICNUMBER 0x184C2102
STATIC inline int INIT unlz4(u8 *input, long in_len,
long (*fill)(void *, unsigned long),
long (*flush)(void *, unsigned long),
u8 *output, long *posp,
void (*error) (char *x))
{
int ret = -1;
size_t chunksize = 0;
size_t uncomp_chunksize = LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE;
u8 *inp;
u8 *inp_start;
u8 *outp;
long size = in_len;
#ifdef PREBOOT
size_t out_len = get_unaligned_le32(input + in_len);
#endif
size_t dest_len;
if (output) {
outp = output;
} else if (!flush) {
error("NULL output pointer and no flush function provided");
goto exit_0;
} else {
outp = large_malloc(uncomp_chunksize);
if (!outp) {
error("Could not allocate output buffer");
goto exit_0;
}
}
if (input && fill) {
error("Both input pointer and fill function provided,");
goto exit_1;
} else if (input) {
inp = input;
} else if (!fill) {
error("NULL input pointer and missing fill function");
goto exit_1;
} else {
inp = large_malloc(lz4_compressbound(uncomp_chunksize));
if (!inp) {
error("Could not allocate input buffer");
goto exit_1;
}
}
inp_start = inp;
if (posp)
*posp = 0;
if (fill) {
size = fill(inp, 4);
if (size < 4) {
error("data corrupted");
goto exit_2;
}
}
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
if (!fill) {
inp += 4;
size -= 4;
}
} else {
error("invalid header");
goto exit_2;
}
if (posp)
*posp += 4;
for (;;) {
if (fill) {
size = fill(inp, 4);
if (size == 0)
break;
if (size < 4) {
error("data corrupted");
goto exit_2;
}
}
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
if (!fill) {
inp += 4;
size -= 4;
}
if (posp)
*posp += 4;
continue;
}
if (posp)
*posp += 4;
if (!fill) {
inp += 4;
size -= 4;
} else {
if (chunksize > lz4_compressbound(uncomp_chunksize)) {
error("chunk length is longer than allocated");
goto exit_2;
}
size = fill(inp, chunksize);
if (size < chunksize) {
error("data corrupted");
goto exit_2;
}
}
#ifdef PREBOOT
if (out_len >= uncomp_chunksize) {
dest_len = uncomp_chunksize;
out_len -= dest_len;
} else
dest_len = out_len;
ret = lz4_decompress(inp, &chunksize, outp, dest_len);
#else
dest_len = uncomp_chunksize;
ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp,
&dest_len);
#endif
if (ret < 0) {
error("Decoding failed");
goto exit_2;
}
ret = -1;
if (flush && flush(outp, dest_len) != dest_len)
goto exit_2;
if (output)
outp += dest_len;
if (posp)
*posp += chunksize;
if (!fill) {
size -= chunksize;
if (size == 0)
break;
else if (size < 0) {
error("data corrupted");
goto exit_2;
}
inp += chunksize;
}
}
ret = 0;
exit_2:
if (!input)
large_free(inp_start);
exit_1:
if (!output)
large_free(outp);
exit_0:
return ret;
}
#ifdef PREBOOT
STATIC int INIT decompress(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *output,
long *posp,
void(*error)(char *x)
)
{
return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
}
#endif

680
lib/decompress_unlzma.c Normal file
View file

@ -0,0 +1,680 @@
/* Lzma decompressor for Linux kernel. Shamelessly snarfed
*from busybox 1.1.1
*
*Linux kernel adaptation
*Copyright (C) 2006 Alain < alain@knaff.lu >
*
*Based on small lzma deflate implementation/Small range coder
*implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
*Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
*Copyright (C) 1999-2005 Igor Pavlov
*
*Copyrights of the parts, see headers below.
*
*
*This program is free software; you can redistribute it and/or
*modify it under the terms of the GNU Lesser General Public
*License as published by the Free Software Foundation; either
*version 2.1 of the License, or (at your option) any later version.
*
*This program is distributed in the hope that it will be useful,
*but WITHOUT ANY WARRANTY; without even the implied warranty of
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
*Lesser General Public License for more details.
*
*You should have received a copy of the GNU Lesser General Public
*License along with this library; if not, write to the Free Software
*Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifdef STATIC
#define PREBOOT
#else
#include <linux/decompress/unlzma.h>
#endif /* STATIC */
#include <linux/decompress/mm.h>
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
static long long INIT read_int(unsigned char *ptr, int size)
{
int i;
long long ret = 0;
for (i = 0; i < size; i++)
ret = (ret << 8) | ptr[size-i-1];
return ret;
}
#define ENDIAN_CONVERT(x) \
x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
/* Small range coder implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
*Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
*Copyright (c) 1999-2005 Igor Pavlov
*/
#include <linux/compiler.h>
#define LZMA_IOBUF_SIZE 0x10000
struct rc {
long (*fill)(void*, unsigned long);
uint8_t *ptr;
uint8_t *buffer;
uint8_t *buffer_end;
long buffer_size;
uint32_t code;
uint32_t range;
uint32_t bound;
void (*error)(char *);
};
#define RC_TOP_BITS 24
#define RC_MOVE_BITS 5
#define RC_MODEL_TOTAL_BITS 11
static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
/* Called twice: once at startup and once in rc_normalize() */
static void INIT rc_read(struct rc *rc)
{
rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
if (rc->buffer_size <= 0)
rc->error("unexpected EOF");
rc->ptr = rc->buffer;
rc->buffer_end = rc->buffer + rc->buffer_size;
}
/* Called once */
static inline void INIT rc_init(struct rc *rc,
long (*fill)(void*, unsigned long),
char *buffer, long buffer_size)
{
if (fill)
rc->fill = fill;
else
rc->fill = nofill;
rc->buffer = (uint8_t *)buffer;
rc->buffer_size = buffer_size;
rc->buffer_end = rc->buffer + rc->buffer_size;
rc->ptr = rc->buffer;
rc->code = 0;
rc->range = 0xFFFFFFFF;
}
static inline void INIT rc_init_code(struct rc *rc)
{
int i;
for (i = 0; i < 5; i++) {
if (rc->ptr >= rc->buffer_end)
rc_read(rc);
rc->code = (rc->code << 8) | *rc->ptr++;
}
}
/* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
static void INIT rc_do_normalize(struct rc *rc)
{
if (rc->ptr >= rc->buffer_end)
rc_read(rc);
rc->range <<= 8;
rc->code = (rc->code << 8) | *rc->ptr++;
}
static inline void INIT rc_normalize(struct rc *rc)
{
if (rc->range < (1 << RC_TOP_BITS))
rc_do_normalize(rc);
}
/* Called 9 times */
/* Why rc_is_bit_0_helper exists?
*Because we want to always expose (rc->code < rc->bound) to optimizer
*/
static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
{
rc_normalize(rc);
rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
return rc->bound;
}
static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
{
uint32_t t = rc_is_bit_0_helper(rc, p);
return rc->code < t;
}
/* Called ~10 times, but very small, thus inlined */
static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
{
rc->range = rc->bound;
*p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
}
static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p)
{
rc->range -= rc->bound;
rc->code -= rc->bound;
*p -= *p >> RC_MOVE_BITS;
}
/* Called 4 times in unlzma loop */
static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
{
if (rc_is_bit_0(rc, p)) {
rc_update_bit_0(rc, p);
*symbol *= 2;
return 0;
} else {
rc_update_bit_1(rc, p);
*symbol = *symbol * 2 + 1;
return 1;
}
}
/* Called once */
static inline int INIT rc_direct_bit(struct rc *rc)
{
rc_normalize(rc);
rc->range >>= 1;
if (rc->code >= rc->range) {
rc->code -= rc->range;
return 1;
}
return 0;
}
/* Called twice */
static inline void INIT
rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
{
int i = num_levels;
*symbol = 1;
while (i--)
rc_get_bit(rc, p + *symbol, symbol);
*symbol -= 1 << num_levels;
}
/*
* Small lzma deflate implementation.
* Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
* Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
* Copyright (C) 1999-2005 Igor Pavlov
*/
struct lzma_header {
uint8_t pos;
uint32_t dict_size;
uint64_t dst_size;
} __attribute__ ((packed)) ;
#define LZMA_BASE_SIZE 1846
#define LZMA_LIT_SIZE 768
#define LZMA_NUM_POS_BITS_MAX 4
#define LZMA_LEN_NUM_LOW_BITS 3
#define LZMA_LEN_NUM_MID_BITS 3
#define LZMA_LEN_NUM_HIGH_BITS 8
#define LZMA_LEN_CHOICE 0
#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
#define LZMA_LEN_MID (LZMA_LEN_LOW \
+ (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
#define LZMA_LEN_HIGH (LZMA_LEN_MID \
+(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
#define LZMA_NUM_STATES 12
#define LZMA_NUM_LIT_STATES 7
#define LZMA_START_POS_MODEL_INDEX 4
#define LZMA_END_POS_MODEL_INDEX 14
#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
#define LZMA_NUM_POS_SLOT_BITS 6
#define LZMA_NUM_LEN_TO_POS_STATES 4
#define LZMA_NUM_ALIGN_BITS 4
#define LZMA_MATCH_MIN_LEN 2
#define LZMA_IS_MATCH 0
#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
+ (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
#define LZMA_SPEC_POS (LZMA_POS_SLOT \
+(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
#define LZMA_ALIGN (LZMA_SPEC_POS \
+ LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
struct writer {
uint8_t *buffer;
uint8_t previous_byte;
size_t buffer_pos;
int bufsize;
size_t global_pos;
long (*flush)(void*, unsigned long);
struct lzma_header *header;
};
struct cstate {
int state;
uint32_t rep0, rep1, rep2, rep3;
};
static inline size_t INIT get_pos(struct writer *wr)
{
return
wr->global_pos + wr->buffer_pos;
}
static inline uint8_t INIT peek_old_byte(struct writer *wr,
uint32_t offs)
{
if (!wr->flush) {
int32_t pos;
while (offs > wr->header->dict_size)
offs -= wr->header->dict_size;
pos = wr->buffer_pos - offs;
return wr->buffer[pos];
} else {
uint32_t pos = wr->buffer_pos - offs;
while (pos >= wr->header->dict_size)
pos += wr->header->dict_size;
return wr->buffer[pos];
}
}
static inline int INIT write_byte(struct writer *wr, uint8_t byte)
{
wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
wr->buffer_pos = 0;
wr->global_pos += wr->header->dict_size;
if (wr->flush((char *)wr->buffer, wr->header->dict_size)
!= wr->header->dict_size)
return -1;
}
return 0;
}
static inline int INIT copy_byte(struct writer *wr, uint32_t offs)
{
return write_byte(wr, peek_old_byte(wr, offs));
}
static inline int INIT copy_bytes(struct writer *wr,
uint32_t rep0, int len)
{
do {
if (copy_byte(wr, rep0))
return -1;
len--;
} while (len != 0 && wr->buffer_pos < wr->header->dst_size);
return len;
}
static inline int INIT process_bit0(struct writer *wr, struct rc *rc,
struct cstate *cst, uint16_t *p,
int pos_state, uint16_t *prob,
int lc, uint32_t literal_pos_mask) {
int mi = 1;
rc_update_bit_0(rc, prob);
prob = (p + LZMA_LITERAL +
(LZMA_LIT_SIZE
* (((get_pos(wr) & literal_pos_mask) << lc)
+ (wr->previous_byte >> (8 - lc))))
);
if (cst->state >= LZMA_NUM_LIT_STATES) {
int match_byte = peek_old_byte(wr, cst->rep0);
do {
int bit;
uint16_t *prob_lit;
match_byte <<= 1;
bit = match_byte & 0x100;
prob_lit = prob + 0x100 + bit + mi;
if (rc_get_bit(rc, prob_lit, &mi)) {
if (!bit)
break;
} else {
if (bit)
break;
}
} while (mi < 0x100);
}
while (mi < 0x100) {
uint16_t *prob_lit = prob + mi;
rc_get_bit(rc, prob_lit, &mi);
}
if (cst->state < 4)
cst->state = 0;
else if (cst->state < 10)
cst->state -= 3;
else
cst->state -= 6;
return write_byte(wr, mi);
}
static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
struct cstate *cst, uint16_t *p,
int pos_state, uint16_t *prob) {
int offset;
uint16_t *prob_len;
int num_bits;
int len;
rc_update_bit_1(rc, prob);
prob = p + LZMA_IS_REP + cst->state;
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
cst->rep3 = cst->rep2;
cst->rep2 = cst->rep1;
cst->rep1 = cst->rep0;
cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
prob = p + LZMA_LEN_CODER;
} else {
rc_update_bit_1(rc, prob);
prob = p + LZMA_IS_REP_G0 + cst->state;
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
prob = (p + LZMA_IS_REP_0_LONG
+ (cst->state <<
LZMA_NUM_POS_BITS_MAX) +
pos_state);
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
cst->state = cst->state < LZMA_NUM_LIT_STATES ?
9 : 11;
return copy_byte(wr, cst->rep0);
} else {
rc_update_bit_1(rc, prob);
}
} else {
uint32_t distance;
rc_update_bit_1(rc, prob);
prob = p + LZMA_IS_REP_G1 + cst->state;
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
distance = cst->rep1;
} else {
rc_update_bit_1(rc, prob);
prob = p + LZMA_IS_REP_G2 + cst->state;
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
distance = cst->rep2;
} else {
rc_update_bit_1(rc, prob);
distance = cst->rep3;
cst->rep3 = cst->rep2;
}
cst->rep2 = cst->rep1;
}
cst->rep1 = cst->rep0;
cst->rep0 = distance;
}
cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
prob = p + LZMA_REP_LEN_CODER;
}
prob_len = prob + LZMA_LEN_CHOICE;
if (rc_is_bit_0(rc, prob_len)) {
rc_update_bit_0(rc, prob_len);
prob_len = (prob + LZMA_LEN_LOW
+ (pos_state <<
LZMA_LEN_NUM_LOW_BITS));
offset = 0;
num_bits = LZMA_LEN_NUM_LOW_BITS;
} else {
rc_update_bit_1(rc, prob_len);
prob_len = prob + LZMA_LEN_CHOICE_2;
if (rc_is_bit_0(rc, prob_len)) {
rc_update_bit_0(rc, prob_len);
prob_len = (prob + LZMA_LEN_MID
+ (pos_state <<
LZMA_LEN_NUM_MID_BITS));
offset = 1 << LZMA_LEN_NUM_LOW_BITS;
num_bits = LZMA_LEN_NUM_MID_BITS;
} else {
rc_update_bit_1(rc, prob_len);
prob_len = prob + LZMA_LEN_HIGH;
offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
+ (1 << LZMA_LEN_NUM_MID_BITS));
num_bits = LZMA_LEN_NUM_HIGH_BITS;
}
}
rc_bit_tree_decode(rc, prob_len, num_bits, &len);
len += offset;
if (cst->state < 4) {
int pos_slot;
cst->state += LZMA_NUM_LIT_STATES;
prob =
p + LZMA_POS_SLOT +
((len <
LZMA_NUM_LEN_TO_POS_STATES ? len :
LZMA_NUM_LEN_TO_POS_STATES - 1)
<< LZMA_NUM_POS_SLOT_BITS);
rc_bit_tree_decode(rc, prob,
LZMA_NUM_POS_SLOT_BITS,
&pos_slot);
if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
int i, mi;
num_bits = (pos_slot >> 1) - 1;
cst->rep0 = 2 | (pos_slot & 1);
if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
cst->rep0 <<= num_bits;
prob = p + LZMA_SPEC_POS +
cst->rep0 - pos_slot - 1;
} else {
num_bits -= LZMA_NUM_ALIGN_BITS;
while (num_bits--)
cst->rep0 = (cst->rep0 << 1) |
rc_direct_bit(rc);
prob = p + LZMA_ALIGN;
cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
num_bits = LZMA_NUM_ALIGN_BITS;
}
i = 1;
mi = 1;
while (num_bits--) {
if (rc_get_bit(rc, prob + mi, &mi))
cst->rep0 |= i;
i <<= 1;
}
} else
cst->rep0 = pos_slot;
if (++(cst->rep0) == 0)
return 0;
if (cst->rep0 > wr->header->dict_size
|| cst->rep0 > get_pos(wr))
return -1;
}
len += LZMA_MATCH_MIN_LEN;
return copy_bytes(wr, cst->rep0, len);
}
STATIC inline int INIT unlzma(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *output,
long *posp,
void(*error)(char *x)
)
{
struct lzma_header header;
int lc, pb, lp;
uint32_t pos_state_mask;
uint32_t literal_pos_mask;
uint16_t *p;
int num_probs;
struct rc rc;
int i, mi;
struct writer wr;
struct cstate cst;
unsigned char *inbuf;
int ret = -1;
rc.error = error;
if (buf)
inbuf = buf;
else
inbuf = malloc(LZMA_IOBUF_SIZE);
if (!inbuf) {
error("Could not allocate input buffer");
goto exit_0;
}
cst.state = 0;
cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
wr.header = &header;
wr.flush = flush;
wr.global_pos = 0;
wr.previous_byte = 0;
wr.buffer_pos = 0;
rc_init(&rc, fill, inbuf, in_len);
for (i = 0; i < sizeof(header); i++) {
if (rc.ptr >= rc.buffer_end)
rc_read(&rc);
((unsigned char *)&header)[i] = *rc.ptr++;
}
if (header.pos >= (9 * 5 * 5)) {
error("bad header");
goto exit_1;
}
mi = 0;
lc = header.pos;
while (lc >= 9) {
mi++;
lc -= 9;
}
pb = 0;
lp = mi;
while (lp >= 5) {
pb++;
lp -= 5;
}
pos_state_mask = (1 << pb) - 1;
literal_pos_mask = (1 << lp) - 1;
ENDIAN_CONVERT(header.dict_size);
ENDIAN_CONVERT(header.dst_size);
if (header.dict_size == 0)
header.dict_size = 1;
if (output)
wr.buffer = output;
else {
wr.bufsize = MIN(header.dst_size, header.dict_size);
wr.buffer = large_malloc(wr.bufsize);
}
if (wr.buffer == NULL)
goto exit_1;
num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
if (p == 0)
goto exit_2;
num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
for (i = 0; i < num_probs; i++)
p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
rc_init_code(&rc);
while (get_pos(&wr) < header.dst_size) {
int pos_state = get_pos(&wr) & pos_state_mask;
uint16_t *prob = p + LZMA_IS_MATCH +
(cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
if (rc_is_bit_0(&rc, prob)) {
if (process_bit0(&wr, &rc, &cst, p, pos_state, prob,
lc, literal_pos_mask)) {
error("LZMA data is corrupt");
goto exit_3;
}
} else {
if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) {
error("LZMA data is corrupt");
goto exit_3;
}
if (cst.rep0 == 0)
break;
}
if (rc.buffer_size <= 0)
goto exit_3;
}
if (posp)
*posp = rc.ptr-rc.buffer;
if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos)
ret = 0;
exit_3:
large_free(p);
exit_2:
if (!output)
large_free(wr.buffer);
exit_1:
if (!buf)
free(inbuf);
exit_0:
return ret;
}
#ifdef PREBOOT
STATIC int INIT decompress(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *output,
long *posp,
void(*error)(char *x)
)
{
return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
}
#endif

290
lib/decompress_unlzo.c Normal file
View file

@ -0,0 +1,290 @@
/*
* LZO decompressor for the Linux kernel. Code borrowed from the lzo
* implementation by Markus Franz Xaver Johannes Oberhumer.
*
* Linux kernel adaptation:
* Copyright (C) 2009
* Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com>
*
* Original code:
* Copyright (C) 1996-2005 Markus Franz Xaver Johannes Oberhumer
* All Rights Reserved.
*
* lzop and the LZO library are free software; you can redistribute them
* and/or modify them under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING.
* If not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Markus F.X.J. Oberhumer
* <markus@oberhumer.com>
* http://www.oberhumer.com/opensource/lzop/
*/
#ifdef STATIC
#include "lzo/lzo1x_decompress_safe.c"
#else
#include <linux/decompress/unlzo.h>
#endif
#include <linux/types.h>
#include <linux/lzo.h>
#include <linux/decompress/mm.h>
#include <linux/compiler.h>
#include <asm/unaligned.h>
static const unsigned char lzop_magic[] = {
0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a };
#define LZO_BLOCK_SIZE (256*1024l)
#define HEADER_HAS_FILTER 0x00000800L
#define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4)
#define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4)
STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
{
int l;
u8 *parse = input;
u8 *end = input + in_len;
u8 level = 0;
u16 version;
/*
* Check that there's enough input to possibly have a valid header.
* Then it is possible to parse several fields until the minimum
* size may have been used.
*/
if (in_len < HEADER_SIZE_MIN)
return 0;
/* read magic: 9 first bits */
for (l = 0; l < 9; l++) {
if (*parse++ != lzop_magic[l])
return 0;
}
/* get version (2bytes), skip library version (2),
* 'need to be extracted' version (2) and
* method (1) */
version = get_unaligned_be16(parse);
parse += 7;
if (version >= 0x0940)
level = *parse++;
if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
parse += 8; /* flags + filter info */
else
parse += 4; /* flags */
/*
* At least mode, mtime_low, filename length, and checksum must
* be left to be parsed. If also mtime_high is present, it's OK
* because the next input buffer check is after reading the
* filename length.
*/
if (end - parse < 8 + 1 + 4)
return 0;
/* skip mode and mtime_low */
parse += 8;
if (version >= 0x0940)
parse += 4; /* skip mtime_high */
l = *parse++;
/* don't care about the file name, and skip checksum */
if (end - parse < l + 4)
return 0;
parse += l + 4;
*skip = parse - input;
return 1;
}
STATIC int INIT unlzo(u8 *input, long in_len,
long (*fill)(void *, unsigned long),
long (*flush)(void *, unsigned long),
u8 *output, long *posp,
void (*error) (char *x))
{
u8 r = 0;
long skip = 0;
u32 src_len, dst_len;
size_t tmp;
u8 *in_buf, *in_buf_save, *out_buf;
int ret = -1;
if (output) {
out_buf = output;
} else if (!flush) {
error("NULL output pointer and no flush function provided");
goto exit;
} else {
out_buf = malloc(LZO_BLOCK_SIZE);
if (!out_buf) {
error("Could not allocate output buffer");
goto exit;
}
}
if (input && fill) {
error("Both input pointer and fill function provided, don't know what to do");
goto exit_1;
} else if (input) {
in_buf = input;
} else if (!fill) {
error("NULL input pointer and missing fill function");
goto exit_1;
} else {
in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE));
if (!in_buf) {
error("Could not allocate input buffer");
goto exit_1;
}
}
in_buf_save = in_buf;
if (posp)
*posp = 0;
if (fill) {
/*
* Start from in_buf + HEADER_SIZE_MAX to make it possible
* to use memcpy() to copy the unused data to the beginning
* of the buffer. This way memmove() isn't needed which
* is missing from pre-boot environments of most archs.
*/
in_buf += HEADER_SIZE_MAX;
in_len = fill(in_buf, HEADER_SIZE_MAX);
}
if (!parse_header(in_buf, &skip, in_len)) {
error("invalid header");
goto exit_2;
}
in_buf += skip;
in_len -= skip;
if (fill) {
/* Move the unused data to the beginning of the buffer. */
memcpy(in_buf_save, in_buf, in_len);
in_buf = in_buf_save;
}
if (posp)
*posp = skip;
for (;;) {
/* read uncompressed block size */
if (fill && in_len < 4) {
skip = fill(in_buf + in_len, 4 - in_len);
if (skip > 0)
in_len += skip;
}
if (in_len < 4) {
error("file corrupted");
goto exit_2;
}
dst_len = get_unaligned_be32(in_buf);
in_buf += 4;
in_len -= 4;
/* exit if last block */
if (dst_len == 0) {
if (posp)
*posp += 4;
break;
}
if (dst_len > LZO_BLOCK_SIZE) {
error("dest len longer than block size");
goto exit_2;
}
/* read compressed block size, and skip block checksum info */
if (fill && in_len < 8) {
skip = fill(in_buf + in_len, 8 - in_len);
if (skip > 0)
in_len += skip;
}
if (in_len < 8) {
error("file corrupted");
goto exit_2;
}
src_len = get_unaligned_be32(in_buf);
in_buf += 8;
in_len -= 8;
if (src_len <= 0 || src_len > dst_len) {
error("file corrupted");
goto exit_2;
}
/* decompress */
if (fill && in_len < src_len) {
skip = fill(in_buf + in_len, src_len - in_len);
if (skip > 0)
in_len += skip;
}
if (in_len < src_len) {
error("file corrupted");
goto exit_2;
}
tmp = dst_len;
/* When the input data is not compressed at all,
* lzo1x_decompress_safe will fail, so call memcpy()
* instead */
if (unlikely(dst_len == src_len))
memcpy(out_buf, in_buf, src_len);
else {
r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
out_buf, &tmp);
if (r != LZO_E_OK || dst_len != tmp) {
error("Compressed data violation");
goto exit_2;
}
}
if (flush && flush(out_buf, dst_len) != dst_len)
goto exit_2;
if (output)
out_buf += dst_len;
if (posp)
*posp += src_len + 12;
in_buf += src_len;
in_len -= src_len;
if (fill) {
/*
* If there happens to still be unused data left in
* in_buf, move it to the beginning of the buffer.
* Use a loop to avoid memmove() dependency.
*/
if (in_len > 0)
for (skip = 0; skip < in_len; ++skip)
in_buf_save[skip] = in_buf[skip];
in_buf = in_buf_save;
}
}
ret = 0;
exit_2:
if (!input)
free(in_buf_save);
exit_1:
if (!output)
free(out_buf);
exit:
return ret;
}
#define decompress unlzo

397
lib/decompress_unxz.c Normal file
View file

@ -0,0 +1,397 @@
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
/*
* Important notes about in-place decompression
*
* At least on x86, the kernel is decompressed in place: the compressed data
* is placed to the end of the output buffer, and the decompressor overwrites
* most of the compressed data. There must be enough safety margin to
* guarantee that the write position is always behind the read position.
*
* The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
* Note that the margin with XZ is bigger than with Deflate (gzip)!
*
* The worst case for in-place decompression is that the beginning of
* the file is compressed extremely well, and the rest of the file is
* uncompressible. Thus, we must look for worst-case expansion when the
* compressor is encoding uncompressible data.
*
* The structure of the .xz file in case of a compresed kernel is as follows.
* Sizes (as bytes) of the fields are in parenthesis.
*
* Stream Header (12)
* Block Header:
* Block Header (8-12)
* Compressed Data (N)
* Block Padding (0-3)
* CRC32 (4)
* Index (8-20)
* Stream Footer (12)
*
* Normally there is exactly one Block, but let's assume that there are
* 2-4 Blocks just in case. Because Stream Header and also Block Header
* of the first Block don't make the decompressor produce any uncompressed
* data, we can ignore them from our calculations. Block Headers of possible
* additional Blocks have to be taken into account still. With these
* assumptions, it is safe to assume that the total header overhead is
* less than 128 bytes.
*
* Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
* doesn't change the size of the data, it is enough to calculate the
* safety margin for LZMA2.
*
* LZMA2 stores the data in chunks. Each chunk has a header whose size is
* a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
* the maximum chunk header size is 8 bytes. After the chunk header, there
* may be up to 64 KiB of actual payload in the chunk. Often the payload is
* quite a bit smaller though; to be safe, let's assume that an average
* chunk has only 32 KiB of payload.
*
* The maximum uncompressed size of the payload is 2 MiB. The minimum
* uncompressed size of the payload is in practice never less than the
* payload size itself. The LZMA2 format would allow uncompressed size
* to be less than the payload size, but no sane compressor creates such
* files. LZMA2 supports storing uncompressible data in uncompressed form,
* so there's never a need to create payloads whose uncompressed size is
* smaller than the compressed size.
*
* The assumption, that the uncompressed size of the payload is never
* smaller than the payload itself, is valid only when talking about
* the payload as a whole. It is possible that the payload has parts where
* the decompressor consumes more input than it produces output. Calculating
* the worst case for this would be tricky. Instead of trying to do that,
* let's simply make sure that the decompressor never overwrites any bytes
* of the payload which it is currently reading.
*
* Now we have enough information to calculate the safety margin. We need
* - 128 bytes for the .xz file format headers;
* - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
* per chunk, each chunk having average payload size of 32 KiB); and
* - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
* the decompressor never overwrites anything from the LZMA2 chunk
* payload it is currently reading.
*
* We get the following formula:
*
* safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
* = 128 + (uncompressed_size >> 12) + 65536
*
* For comparison, according to arch/x86/boot/compressed/misc.c, the
* equivalent formula for Deflate is this:
*
* safety_margin = 18 + (uncompressed_size >> 12) + 32768
*
* Thus, when updating Deflate-only in-place kernel decompressor to
* support XZ, the fixed overhead has to be increased from 18+32768 bytes
* to 128+65536 bytes.
*/
/*
* STATIC is defined to "static" if we are being built for kernel
* decompression (pre-boot code). <linux/decompress/mm.h> will define
* STATIC to empty if it wasn't already defined. Since we will need to
* know later if we are being used for kernel decompression, we define
* XZ_PREBOOT here.
*/
#ifdef STATIC
# define XZ_PREBOOT
#endif
#ifdef __KERNEL__
# include <linux/decompress/mm.h>
#endif
#define XZ_EXTERN STATIC
#ifndef XZ_PREBOOT
# include <linux/slab.h>
# include <linux/xz.h>
#else
/*
* Use the internal CRC32 code instead of kernel's CRC32 module, which
* is not available in early phase of booting.
*/
#define XZ_INTERNAL_CRC32 1
/*
* For boot time use, we enable only the BCJ filter of the current
* architecture or none if no BCJ filter is available for the architecture.
*/
#ifdef CONFIG_X86
# define XZ_DEC_X86
#endif
#ifdef CONFIG_PPC
# define XZ_DEC_POWERPC
#endif
#ifdef CONFIG_ARM
# define XZ_DEC_ARM
#endif
#ifdef CONFIG_IA64
# define XZ_DEC_IA64
#endif
#ifdef CONFIG_SPARC
# define XZ_DEC_SPARC
#endif
/*
* This will get the basic headers so that memeq() and others
* can be defined.
*/
#include "xz/xz_private.h"
/*
* Replace the normal allocation functions with the versions from
* <linux/decompress/mm.h>. vfree() needs to support vfree(NULL)
* when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
* Workaround it here because the other decompressors don't need it.
*/
#undef kmalloc
#undef kfree
#undef vmalloc
#undef vfree
#define kmalloc(size, flags) malloc(size)
#define kfree(ptr) free(ptr)
#define vmalloc(size) malloc(size)
#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
/*
* FIXME: Not all basic memory functions are provided in architecture-specific
* files (yet). We define our own versions here for now, but this should be
* only a temporary solution.
*
* memeq and memzero are not used much and any remotely sane implementation
* is fast enough. memcpy/memmove speed matters in multi-call mode, but
* the kernel image is decompressed in single-call mode, in which only
* memcpy speed can matter and only if there is a lot of uncompressible data
* (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
* functions below should just be kept small; it's probably not worth
* optimizing for speed.
*/
#ifndef memeq
static bool memeq(const void *a, const void *b, size_t size)
{
const uint8_t *x = a;
const uint8_t *y = b;
size_t i;
for (i = 0; i < size; ++i)
if (x[i] != y[i])
return false;
return true;
}
#endif
#ifndef memzero
static void memzero(void *buf, size_t size)
{
uint8_t *b = buf;
uint8_t *e = b + size;
while (b != e)
*b++ = '\0';
}
#endif
#ifndef memmove
/* Not static to avoid a conflict with the prototype in the Linux headers. */
void *memmove(void *dest, const void *src, size_t size)
{
uint8_t *d = dest;
const uint8_t *s = src;
size_t i;
if (d < s) {
for (i = 0; i < size; ++i)
d[i] = s[i];
} else if (d > s) {
i = size;
while (i-- > 0)
d[i] = s[i];
}
return dest;
}
#endif
/*
* Since we need memmove anyway, would use it as memcpy too.
* Commented out for now to avoid breaking things.
*/
/*
#ifndef memcpy
# define memcpy memmove
#endif
*/
#include "xz/xz_crc32.c"
#include "xz/xz_dec_stream.c"
#include "xz/xz_dec_lzma2.c"
#include "xz/xz_dec_bcj.c"
#endif /* XZ_PREBOOT */
/* Size of the input and output buffers in multi-call mode */
#define XZ_IOBUF_SIZE 4096
/*
* This function implements the API defined in <linux/decompress/generic.h>.
*
* This wrapper will automatically choose single-call or multi-call mode
* of the native XZ decoder API. The single-call mode can be used only when
* both input and output buffers are available as a single chunk, i.e. when
* fill() and flush() won't be used.
*/
STATIC int INIT unxz(unsigned char *in, long in_size,
long (*fill)(void *dest, unsigned long size),
long (*flush)(void *src, unsigned long size),
unsigned char *out, long *in_used,
void (*error)(char *x))
{
struct xz_buf b;
struct xz_dec *s;
enum xz_ret ret;
bool must_free_in = false;
#if XZ_INTERNAL_CRC32
xz_crc32_init();
#endif
if (in_used != NULL)
*in_used = 0;
if (fill == NULL && flush == NULL)
s = xz_dec_init(XZ_SINGLE, 0);
else
s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
if (s == NULL)
goto error_alloc_state;
if (flush == NULL) {
b.out = out;
b.out_size = (size_t)-1;
} else {
b.out_size = XZ_IOBUF_SIZE;
b.out = malloc(XZ_IOBUF_SIZE);
if (b.out == NULL)
goto error_alloc_out;
}
if (in == NULL) {
must_free_in = true;
in = malloc(XZ_IOBUF_SIZE);
if (in == NULL)
goto error_alloc_in;
}
b.in = in;
b.in_pos = 0;
b.in_size = in_size;
b.out_pos = 0;
if (fill == NULL && flush == NULL) {
ret = xz_dec_run(s, &b);
} else {
do {
if (b.in_pos == b.in_size && fill != NULL) {
if (in_used != NULL)
*in_used += b.in_pos;
b.in_pos = 0;
in_size = fill(in, XZ_IOBUF_SIZE);
if (in_size < 0) {
/*
* This isn't an optimal error code
* but it probably isn't worth making
* a new one either.
*/
ret = XZ_BUF_ERROR;
break;
}
b.in_size = in_size;
}
ret = xz_dec_run(s, &b);
if (flush != NULL && (b.out_pos == b.out_size
|| (ret != XZ_OK && b.out_pos > 0))) {
/*
* Setting ret here may hide an error
* returned by xz_dec_run(), but probably
* it's not too bad.
*/
if (flush(b.out, b.out_pos) != (long)b.out_pos)
ret = XZ_BUF_ERROR;
b.out_pos = 0;
}
} while (ret == XZ_OK);
if (must_free_in)
free(in);
if (flush != NULL)
free(b.out);
}
if (in_used != NULL)
*in_used += b.in_pos;
xz_dec_end(s);
switch (ret) {
case XZ_STREAM_END:
return 0;
case XZ_MEM_ERROR:
/* This can occur only in multi-call mode. */
error("XZ decompressor ran out of memory");
break;
case XZ_FORMAT_ERROR:
error("Input is not in the XZ format (wrong magic bytes)");
break;
case XZ_OPTIONS_ERROR:
error("Input was encoded with settings that are not "
"supported by this XZ decoder");
break;
case XZ_DATA_ERROR:
case XZ_BUF_ERROR:
error("XZ-compressed data is corrupt");
break;
default:
error("Bug in the XZ decompressor");
break;
}
return -1;
error_alloc_in:
if (flush != NULL)
free(b.out);
error_alloc_out:
xz_dec_end(s);
error_alloc_state:
error("XZ decompressor ran out of memory");
return -1;
}
/*
* This macro is used by architecture-specific files to decompress
* the kernel image.
*/
#define decompress unxz

407
lib/devres.c Normal file
View file

@ -0,0 +1,407 @@
#include <linux/err.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/export.h>
void devm_ioremap_release(struct device *dev, void *res)
{
iounmap(*(void __iomem **)res);
}
static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
{
return *(void **)res == match_data;
}
/**
* devm_ioremap - Managed ioremap()
* @dev: Generic device to remap IO address for
* @offset: BUS offset to map
* @size: Size of map
*
* Managed ioremap(). Map is automatically unmapped on driver detach.
*/
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
unsigned long size)
{
void __iomem **ptr, *addr;
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
addr = ioremap(offset, size);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
} else
devres_free(ptr);
return addr;
}
EXPORT_SYMBOL(devm_ioremap);
/**
* devm_ioremap_nocache - Managed ioremap_nocache()
* @dev: Generic device to remap IO address for
* @offset: BUS offset to map
* @size: Size of map
*
* Managed ioremap_nocache(). Map is automatically unmapped on driver
* detach.
*/
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
unsigned long size)
{
void __iomem **ptr, *addr;
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
addr = ioremap_nocache(offset, size);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
} else
devres_free(ptr);
return addr;
}
EXPORT_SYMBOL(devm_ioremap_nocache);
/**
* devm_iounmap - Managed iounmap()
* @dev: Generic device to unmap for
* @addr: Address to unmap
*
* Managed iounmap(). @addr must have been mapped using devm_ioremap*().
*/
void devm_iounmap(struct device *dev, void __iomem *addr)
{
WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
(__force void *)addr));
iounmap(addr);
}
EXPORT_SYMBOL(devm_iounmap);
/**
* devm_ioremap_resource() - check, request region, and ioremap resource
* @dev: generic device to handle the resource for
* @res: resource to be handled
*
* Checks that a resource is a valid memory region, requests the memory region
* and ioremaps it either as cacheable or as non-cacheable memory depending on
* the resource's flags. All operations are managed and will be undone on
* driver detach.
*
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure. Usage example:
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_ioremap_resource(&pdev->dev, res);
* if (IS_ERR(base))
* return PTR_ERR(base);
*/
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
{
resource_size_t size;
const char *name;
void __iomem *dest_ptr;
BUG_ON(!dev);
if (!res || resource_type(res) != IORESOURCE_MEM) {
dev_err(dev, "invalid resource\n");
return IOMEM_ERR_PTR(-EINVAL);
}
size = resource_size(res);
name = res->name ?: dev_name(dev);
if (!devm_request_mem_region(dev, res->start, size, name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
return IOMEM_ERR_PTR(-EBUSY);
}
if (res->flags & IORESOURCE_CACHEABLE)
dest_ptr = devm_ioremap(dev, res->start, size);
else
dest_ptr = devm_ioremap_nocache(dev, res->start, size);
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
}
return dest_ptr;
}
EXPORT_SYMBOL(devm_ioremap_resource);
#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Generic iomap devres
*/
static void devm_ioport_map_release(struct device *dev, void *res)
{
ioport_unmap(*(void __iomem **)res);
}
static int devm_ioport_map_match(struct device *dev, void *res,
void *match_data)
{
return *(void **)res == match_data;
}
/**
* devm_ioport_map - Managed ioport_map()
* @dev: Generic device to map ioport for
* @port: Port to map
* @nr: Number of ports to map
*
* Managed ioport_map(). Map is automatically unmapped on driver
* detach.
*/
void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr)
{
void __iomem **ptr, *addr;
ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
addr = ioport_map(port, nr);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
} else
devres_free(ptr);
return addr;
}
EXPORT_SYMBOL(devm_ioport_map);
/**
* devm_ioport_unmap - Managed ioport_unmap()
* @dev: Generic device to unmap for
* @addr: Address to unmap
*
* Managed ioport_unmap(). @addr must have been mapped using
* devm_ioport_map().
*/
void devm_ioport_unmap(struct device *dev, void __iomem *addr)
{
ioport_unmap(addr);
WARN_ON(devres_destroy(dev, devm_ioport_map_release,
devm_ioport_map_match, (__force void *)addr));
}
EXPORT_SYMBOL(devm_ioport_unmap);
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifdef CONFIG_PCI
/*
* PCI iomap devres
*/
#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
struct pcim_iomap_devres {
void __iomem *table[PCIM_IOMAP_MAX];
};
static void pcim_iomap_release(struct device *gendev, void *res)
{
struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
struct pcim_iomap_devres *this = res;
int i;
for (i = 0; i < PCIM_IOMAP_MAX; i++)
if (this->table[i])
pci_iounmap(dev, this->table[i]);
}
/**
* pcim_iomap_table - access iomap allocation table
* @pdev: PCI device to access iomap table for
*
* Access iomap allocation table for @dev. If iomap table doesn't
* exist and @pdev is managed, it will be allocated. All iomaps
* recorded in the iomap table are automatically unmapped on driver
* detach.
*
* This function might sleep when the table is first allocated but can
* be safely called without context and guaranteed to succed once
* allocated.
*/
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr, *new_dr;
dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
if (dr)
return dr->table;
new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
if (!new_dr)
return NULL;
dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
return dr->table;
}
EXPORT_SYMBOL(pcim_iomap_table);
/**
* pcim_iomap - Managed pcim_iomap()
* @pdev: PCI device to iomap for
* @bar: BAR to iomap
* @maxlen: Maximum length of iomap
*
* Managed pci_iomap(). Map is automatically unmapped on driver
* detach.
*/
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
{
void __iomem **tbl;
BUG_ON(bar >= PCIM_IOMAP_MAX);
tbl = (void __iomem **)pcim_iomap_table(pdev);
if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
return NULL;
tbl[bar] = pci_iomap(pdev, bar, maxlen);
return tbl[bar];
}
EXPORT_SYMBOL(pcim_iomap);
/**
* pcim_iounmap - Managed pci_iounmap()
* @pdev: PCI device to iounmap for
* @addr: Address to unmap
*
* Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
*/
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
{
void __iomem **tbl;
int i;
pci_iounmap(pdev, addr);
tbl = (void __iomem **)pcim_iomap_table(pdev);
BUG_ON(!tbl);
for (i = 0; i < PCIM_IOMAP_MAX; i++)
if (tbl[i] == addr) {
tbl[i] = NULL;
return;
}
WARN_ON(1);
}
EXPORT_SYMBOL(pcim_iounmap);
/**
* pcim_iomap_regions - Request and iomap PCI BARs
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
* @name: Name used when requesting regions
*
* Request and iomap regions specified by @mask.
*/
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
{
void __iomem * const *iomap;
int i, rc;
iomap = pcim_iomap_table(pdev);
if (!iomap)
return -ENOMEM;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
unsigned long len;
if (!(mask & (1 << i)))
continue;
rc = -EINVAL;
len = pci_resource_len(pdev, i);
if (!len)
goto err_inval;
rc = pci_request_region(pdev, i, name);
if (rc)
goto err_inval;
rc = -ENOMEM;
if (!pcim_iomap(pdev, i, 0))
goto err_region;
}
return 0;
err_region:
pci_release_region(pdev, i);
err_inval:
while (--i >= 0) {
if (!(mask & (1 << i)))
continue;
pcim_iounmap(pdev, iomap[i]);
pci_release_region(pdev, i);
}
return rc;
}
EXPORT_SYMBOL(pcim_iomap_regions);
/**
* pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to iomap
* @name: Name used when requesting regions
*
* Request all PCI BARs and iomap regions specified by @mask.
*/
int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
const char *name)
{
int request_mask = ((1 << 6) - 1) & ~mask;
int rc;
rc = pci_request_selected_regions(pdev, request_mask, name);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, mask, name);
if (rc)
pci_release_selected_regions(pdev, request_mask);
return rc;
}
EXPORT_SYMBOL(pcim_iomap_regions_request_all);
/**
* pcim_iounmap_regions - Unmap and release PCI BARs
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to unmap and release
*
* Unmap and release regions specified by @mask.
*/
void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
{
void __iomem * const *iomap;
int i;
iomap = pcim_iomap_table(pdev);
if (!iomap)
return;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (!(mask & (1 << i)))
continue;
pcim_iounmap(pdev, iomap[i]);
pci_release_region(pdev, i);
}
}
EXPORT_SYMBOL(pcim_iounmap_regions);
#endif /* CONFIG_PCI */

270
lib/digsig.c Normal file
View file

@ -0,0 +1,270 @@
/*
* Copyright (C) 2011 Nokia Corporation
* Copyright (C) 2011 Intel Corporation
*
* Author:
* Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
* <dmitry.kasatkin@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
* File: sign.c
* implements signature (RSA) verification
* pkcs decoding is based on LibTomCrypt code
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/key.h>
#include <linux/crypto.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <keys/user-type.h>
#include <linux/mpi.h>
#include <linux/digsig.h>
static struct crypto_shash *shash;
static const char *pkcs_1_v1_5_decode_emsa(const unsigned char *msg,
unsigned long msglen,
unsigned long modulus_bitlen,
unsigned long *outlen)
{
unsigned long modulus_len, ps_len, i;
modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0);
/* test message size */
if ((msglen > modulus_len) || (modulus_len < 11))
return NULL;
/* separate encoded message */
if (msg[0] != 0x00 || msg[1] != 0x01)
return NULL;
for (i = 2; i < modulus_len - 1; i++)
if (msg[i] != 0xFF)
break;
/* separator check */
if (msg[i] != 0)
/* There was no octet with hexadecimal value 0x00
to separate ps from m. */
return NULL;
ps_len = i - 2;
*outlen = (msglen - (2 + ps_len + 1));
return msg + 2 + ps_len + 1;
}
/*
* RSA Signature verification with public key
*/
static int digsig_verify_rsa(struct key *key,
const char *sig, int siglen,
const char *h, int hlen)
{
int err = -EINVAL;
unsigned long len;
unsigned long mlen, mblen;
unsigned nret, l;
int head, i;
unsigned char *out1 = NULL;
const char *m;
MPI in = NULL, res = NULL, pkey[2];
uint8_t *p, *datap, *endp;
struct user_key_payload *ukp;
struct pubkey_hdr *pkh;
down_read(&key->sem);
ukp = key->payload.data;
if (ukp->datalen < sizeof(*pkh))
goto err1;
pkh = (struct pubkey_hdr *)ukp->data;
if (pkh->version != 1)
goto err1;
if (pkh->algo != PUBKEY_ALGO_RSA)
goto err1;
if (pkh->nmpi != 2)
goto err1;
datap = pkh->mpi;
endp = ukp->data + ukp->datalen;
err = -ENOMEM;
for (i = 0; i < pkh->nmpi; i++) {
unsigned int remaining = endp - datap;
pkey[i] = mpi_read_from_buffer(datap, &remaining);
if (!pkey[i])
goto err;
datap += remaining;
}
mblen = mpi_get_nbits(pkey[0]);
mlen = DIV_ROUND_UP(mblen, 8);
if (mlen == 0)
goto err;
out1 = kzalloc(mlen, GFP_KERNEL);
if (!out1)
goto err;
nret = siglen;
in = mpi_read_from_buffer(sig, &nret);
if (!in)
goto err;
res = mpi_alloc(mpi_get_nlimbs(in) * 2);
if (!res)
goto err;
err = mpi_powm(res, in, pkey[1], pkey[0]);
if (err)
goto err;
if (mpi_get_nlimbs(res) * BYTES_PER_MPI_LIMB > mlen) {
err = -EINVAL;
goto err;
}
p = mpi_get_buffer(res, &l, NULL);
if (!p) {
err = -EINVAL;
goto err;
}
len = mlen;
head = len - l;
memset(out1, 0, head);
memcpy(out1 + head, p, l);
kfree(p);
m = pkcs_1_v1_5_decode_emsa(out1, len, mblen, &len);
if (!m || len != hlen || memcmp(m, h, hlen))
err = -EINVAL;
err:
mpi_free(in);
mpi_free(res);
kfree(out1);
while (--i >= 0)
mpi_free(pkey[i]);
err1:
up_read(&key->sem);
return err;
}
/**
* digsig_verify() - digital signature verification with public key
* @keyring: keyring to search key in
* @sig: digital signature
* @siglen: length of the signature
* @data: data
* @datalen: length of the data
*
* Returns 0 on success, -EINVAL otherwise
*
* Verifies data integrity against digital signature.
* Currently only RSA is supported.
* Normally hash of the content is used as a data for this function.
*
*/
int digsig_verify(struct key *keyring, const char *sig, int siglen,
const char *data, int datalen)
{
int err = -ENOMEM;
struct signature_hdr *sh = (struct signature_hdr *)sig;
struct shash_desc *desc = NULL;
unsigned char hash[SHA1_DIGEST_SIZE];
struct key *key;
char name[20];
if (siglen < sizeof(*sh) + 2)
return -EINVAL;
if (sh->algo != PUBKEY_ALGO_RSA)
return -ENOTSUPP;
sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid));
if (keyring) {
/* search in specific keyring */
key_ref_t kref;
kref = keyring_search(make_key_ref(keyring, 1UL),
&key_type_user, name);
if (IS_ERR(kref))
key = ERR_CAST(kref);
else
key = key_ref_to_ptr(kref);
} else {
key = request_key(&key_type_user, name, NULL);
}
if (IS_ERR(key)) {
pr_err("key not found, id: %s\n", name);
return PTR_ERR(key);
}
desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
GFP_KERNEL);
if (!desc)
goto err;
desc->tfm = shash;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
crypto_shash_init(desc);
crypto_shash_update(desc, data, datalen);
crypto_shash_update(desc, sig, sizeof(*sh));
crypto_shash_final(desc, hash);
kfree(desc);
/* pass signature mpis address */
err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh),
hash, sizeof(hash));
err:
key_put(key);
return err ? -EINVAL : 0;
}
EXPORT_SYMBOL_GPL(digsig_verify);
static int __init digsig_init(void)
{
shash = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(shash)) {
pr_err("shash allocation failed\n");
return PTR_ERR(shash);
}
return 0;
}
static void __exit digsig_cleanup(void)
{
crypto_free_shash(shash);
}
module_init(digsig_init);
module_exit(digsig_cleanup);
MODULE_LICENSE("GPL");

183
lib/div64.c Normal file
View file

@ -0,0 +1,183 @@
/*
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
*
* Based on former do_div() implementation from asm-parisc/div64.h:
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*
*
* Generic C version of 64bit/32bit division and modulo, with
* 64bit result and 32bit remainder.
*
* The fast case for (n>>32 == 0) is handled inline by do_div().
*
* Code generated for this function might be very inefficient
* for some CPUs. __div64_32() can be overridden by linking arch-specific
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/math64.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
{
uint64_t rem = *n;
uint64_t b = base;
uint64_t res, d = 1;
uint32_t high = rem >> 32;
/* Reduce the thing a bit first */
res = 0;
if (high >= base) {
high /= base;
res = (uint64_t) high << 32;
rem -= (uint64_t) (high*base) << 32;
}
while ((int64_t)b > 0 && b < rem) {
b = b+b;
d = d+d;
}
do {
if (rem >= b) {
rem -= b;
res += d;
}
b >>= 1;
d >>= 1;
} while (d);
*n = res;
return rem;
}
EXPORT_SYMBOL(__div64_32);
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
u64 quotient;
if (dividend < 0) {
quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
*remainder = -*remainder;
if (divisor > 0)
quotient = -quotient;
} else {
quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
if (divisor < 0)
quotient = -quotient;
}
return quotient;
}
EXPORT_SYMBOL(div_s64_rem);
#endif
/**
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
* @dividend: 64bit dividend
* @divisor: 64bit divisor
* @remainder: 64bit remainder
*
* This implementation is a comparable to algorithm used by div64_u64.
* But this operation, which includes math for calculating the remainder,
* is kept distinct to avoid slowing down the div64_u64 operation on 32bit
* systems.
*/
#ifndef div64_u64_rem
u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
u32 high = divisor >> 32;
u64 quot;
if (high == 0) {
u32 rem32;
quot = div_u64_rem(dividend, divisor, &rem32);
*remainder = rem32;
} else {
int n = 1 + fls(high);
quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0)
quot--;
*remainder = dividend - quot * divisor;
if (*remainder >= divisor) {
quot++;
*remainder -= divisor;
}
}
return quot;
}
EXPORT_SYMBOL(div64_u64_rem);
#endif
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
*
* This implementation is a modified version of the algorithm proposed
* by the book 'Hacker's Delight'. The original source and full proof
* can be found here and is available for use without restriction.
*
* 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
*/
#ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
{
u32 high = divisor >> 32;
u64 quot;
if (high == 0) {
quot = div_u64(dividend, divisor);
} else {
int n = 1 + fls(high);
quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0)
quot--;
if ((dividend - quot * divisor) >= divisor)
quot++;
}
return quot;
}
EXPORT_SYMBOL(div64_u64);
#endif
/**
* div64_s64 - signed 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
*/
#ifndef div64_s64
s64 div64_s64(s64 dividend, s64 divisor)
{
s64 quot, t;
quot = div64_u64(abs64(dividend), abs64(divisor));
t = (dividend ^ divisor) >> 63;
return (quot ^ t) - t;
}
EXPORT_SYMBOL(div64_s64);
#endif
#endif /* BITS_PER_LONG == 32 */
/*
* Iterative div/mod for use when dividend is not expected to be much
* bigger than divisor.
*/
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
return __iter_div_u64_rem(dividend, divisor, remainder);
}
EXPORT_SYMBOL(iter_div_u64_rem);

1635
lib/dma-debug.c Normal file

File diff suppressed because it is too large Load diff

63
lib/dump_stack.c Normal file
View file

@ -0,0 +1,63 @@
/*
* Provide a default dump_stack() function for architectures
* which don't implement their own.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/atomic.h>
static void __dump_stack(void)
{
dump_stack_print_info(KERN_DEFAULT);
show_stack(NULL, NULL);
}
/**
* dump_stack - dump the current task information and its stack trace
*
* Architectures can override this implementation by implementing its own.
*/
#ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1);
asmlinkage __visible void dump_stack(void)
{
int was_locked;
int old;
int cpu;
/*
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
preempt_disable();
retry:
cpu = smp_processor_id();
old = atomic_cmpxchg(&dump_lock, -1, cpu);
if (old == -1) {
was_locked = 0;
} else if (old == cpu) {
was_locked = 1;
} else {
cpu_relax();
goto retry;
}
__dump_stack();
if (!was_locked)
atomic_set(&dump_lock, -1);
preempt_enable();
}
#else
asmlinkage __visible void dump_stack(void)
{
__dump_stack();
}
#endif
EXPORT_SYMBOL(dump_stack);

1043
lib/dynamic_debug.c Normal file

File diff suppressed because it is too large Load diff

138
lib/dynamic_queue_limits.c Normal file
View file

@ -0,0 +1,138 @@
/*
* Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h
*
* Copyright (c) 2011, Tom Herbert <therbert@google.com>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/dynamic_queue_limits.h>
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
/* Records completed count and recalculates the queue limit */
void dql_completed(struct dql *dql, unsigned int count)
{
unsigned int inprogress, prev_inprogress, limit;
unsigned int ovlimit, completed, num_queued;
bool all_prev_completed;
num_queued = ACCESS_ONCE(dql->num_queued);
/* Can't complete more than what's in queue */
BUG_ON(count > num_queued - dql->num_completed);
completed = dql->num_completed + count;
limit = dql->limit;
ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
inprogress = num_queued - completed;
prev_inprogress = dql->prev_num_queued - dql->num_completed;
all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
if ((ovlimit && !inprogress) ||
(dql->prev_ovlimit && all_prev_completed)) {
/*
* Queue considered starved if:
* - The queue was over-limit in the last interval,
* and there is no more data in the queue.
* OR
* - The queue was over-limit in the previous interval and
* when enqueuing it was possible that all queued data
* had been consumed. This covers the case when queue
* may have becomes starved between completion processing
* running and next time enqueue was scheduled.
*
* When queue is starved increase the limit by the amount
* of bytes both sent and completed in the last interval,
* plus any previous over-limit.
*/
limit += POSDIFF(completed, dql->prev_num_queued) +
dql->prev_ovlimit;
dql->slack_start_time = jiffies;
dql->lowest_slack = UINT_MAX;
} else if (inprogress && prev_inprogress && !all_prev_completed) {
/*
* Queue was not starved, check if the limit can be decreased.
* A decrease is only considered if the queue has been busy in
* the whole interval (the check above).
*
* If there is slack, the amount of execess data queued above
* the the amount needed to prevent starvation, the queue limit
* can be decreased. To avoid hysteresis we consider the
* minimum amount of slack found over several iterations of the
* completion routine.
*/
unsigned int slack, slack_last_objs;
/*
* Slack is the maximum of
* - The queue limit plus previous over-limit minus twice
* the number of objects completed. Note that two times
* number of completed bytes is a basis for an upper bound
* of the limit.
* - Portion of objects in the last queuing operation that
* was not part of non-zero previous over-limit. That is
* "round down" by non-overlimit portion of the last
* queueing operation.
*/
slack = POSDIFF(limit + dql->prev_ovlimit,
2 * (completed - dql->num_completed));
slack_last_objs = dql->prev_ovlimit ?
POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0;
slack = max(slack, slack_last_objs);
if (slack < dql->lowest_slack)
dql->lowest_slack = slack;
if (time_after(jiffies,
dql->slack_start_time + dql->slack_hold_time)) {
limit = POSDIFF(limit, dql->lowest_slack);
dql->slack_start_time = jiffies;
dql->lowest_slack = UINT_MAX;
}
}
/* Enforce bounds on limit */
limit = clamp(limit, dql->min_limit, dql->max_limit);
if (limit != dql->limit) {
dql->limit = limit;
ovlimit = 0;
}
dql->adj_limit = limit + completed;
dql->prev_ovlimit = ovlimit;
dql->prev_last_obj_cnt = dql->last_obj_cnt;
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
}
EXPORT_SYMBOL(dql_completed);
void dql_reset(struct dql *dql)
{
/* Reset all dynamic values */
dql->limit = 0;
dql->num_queued = 0;
dql->num_completed = 0;
dql->last_obj_cnt = 0;
dql->prev_num_queued = 0;
dql->prev_last_obj_cnt = 0;
dql->prev_ovlimit = 0;
dql->lowest_slack = UINT_MAX;
dql->slack_start_time = jiffies;
}
EXPORT_SYMBOL(dql_reset);
int dql_init(struct dql *dql, unsigned hold_time)
{
dql->max_limit = DQL_MAX_LIMIT;
dql->min_limit = 0;
dql->slack_hold_time = hold_time;
dql_reset(dql);
return 0;
}
EXPORT_SYMBOL(dql_init);

146
lib/earlycpio.c Normal file
View file

@ -0,0 +1,146 @@
/* ----------------------------------------------------------------------- *
*
* Copyright 2012 Intel Corporation; author H. Peter Anvin
*
* This file is part of the Linux kernel, and is made available
* under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ----------------------------------------------------------------------- */
/*
* earlycpio.c
*
* Find a specific cpio member; must precede any compressed content.
* This is used to locate data items in the initramfs used by the
* kernel itself during early boot (before the main initramfs is
* decompressed.) It is the responsibility of the initramfs creator
* to ensure that these items are uncompressed at the head of the
* blob. Depending on the boot loader or package tool that may be a
* separate file or part of the same file.
*/
#include <linux/earlycpio.h>
#include <linux/kernel.h>
#include <linux/string.h>
enum cpio_fields {
C_MAGIC,
C_INO,
C_MODE,
C_UID,
C_GID,
C_NLINK,
C_MTIME,
C_FILESIZE,
C_MAJ,
C_MIN,
C_RMAJ,
C_RMIN,
C_NAMESIZE,
C_CHKSUM,
C_NFIELDS
};
/**
* cpio_data find_cpio_data - Search for files in an uncompressed cpio
* @path: The directory to search for, including a slash at the end
* @data: Pointer to the the cpio archive or a header inside
* @len: Remaining length of the cpio based on data pointer
* @nextoff: When a matching file is found, this is the offset from the
* beginning of the cpio to the beginning of the next file, not the
* matching file itself. It can be used to iterate through the cpio
* to find all files inside of a directory path.
*
* @return: struct cpio_data containing the address, length and
* filename (with the directory path cut off) of the found file.
* If you search for a filename and not for files in a directory,
* pass the absolute path of the filename in the cpio and make sure
* the match returned an empty filename string.
*/
struct cpio_data find_cpio_data(const char *path, void *data,
size_t len, long *nextoff)
{
const size_t cpio_header_len = 8*C_NFIELDS - 2;
struct cpio_data cd = { NULL, 0, "" };
const char *p, *dptr, *nptr;
unsigned int ch[C_NFIELDS], *chp, v;
unsigned char c, x;
size_t mypathsize = strlen(path);
int i, j;
p = data;
while (len > cpio_header_len) {
if (!*p) {
/* All cpio headers need to be 4-byte aligned */
p += 4;
len -= 4;
continue;
}
j = 6; /* The magic field is only 6 characters */
chp = ch;
for (i = C_NFIELDS; i; i--) {
v = 0;
while (j--) {
v <<= 4;
c = *p++;
x = c - '0';
if (x < 10) {
v += x;
continue;
}
x = (c | 0x20) - 'a';
if (x < 6) {
v += x + 10;
continue;
}
goto quit; /* Invalid hexadecimal */
}
*chp++ = v;
j = 8; /* All other fields are 8 characters */
}
if ((ch[C_MAGIC] - 0x070701) > 1)
goto quit; /* Invalid magic */
len -= cpio_header_len;
dptr = PTR_ALIGN(p + ch[C_NAMESIZE], 4);
nptr = PTR_ALIGN(dptr + ch[C_FILESIZE], 4);
if (nptr > p + len || dptr < p || nptr < dptr)
goto quit; /* Buffer overrun */
if ((ch[C_MODE] & 0170000) == 0100000 &&
ch[C_NAMESIZE] >= mypathsize &&
!memcmp(p, path, mypathsize)) {
*nextoff = (long)nptr - (long)data;
if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) {
pr_warn(
"File %s exceeding MAX_CPIO_FILE_NAME [%d]\n",
p, MAX_CPIO_FILE_NAME);
}
strlcpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME);
cd.data = (void *)dptr;
cd.size = ch[C_FILESIZE];
return cd; /* Found it! */
}
len -= (nptr - p);
p = nptr;
}
quit:
return cd;
}

93
lib/extable.c Normal file
View file

@ -0,0 +1,93 @@
/*
* Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c.
*
* Copyright (C) 2004 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sort.h>
#include <asm/uaccess.h>
#ifndef ARCH_HAS_SORT_EXTABLE
/*
* The exception table needs to be sorted so that the binary
* search that we use to find entries in it works properly.
* This is used both for the kernel exception table and for
* the exception tables of modules that get loaded.
*/
static int cmp_ex(const void *a, const void *b)
{
const struct exception_table_entry *x = a, *y = b;
/* avoid overflow */
if (x->insn > y->insn)
return 1;
if (x->insn < y->insn)
return -1;
return 0;
}
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
sort(start, finish - start, sizeof(struct exception_table_entry),
cmp_ex, NULL);
}
#ifdef CONFIG_MODULES
/*
* If the exception table is sorted, any referring to the module init
* will be at the beginning or the end.
*/
void trim_init_extable(struct module *m)
{
/*trim the beginning*/
while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
m->extable++;
m->num_exentries--;
}
/*trim the end*/
while (m->num_exentries &&
within_module_init(m->extable[m->num_exentries-1].insn, m))
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
#endif /* !ARCH_HAS_SORT_EXTABLE */
#ifndef ARCH_HAS_SEARCH_EXTABLE
/*
* Search one exception table for an entry corresponding to the
* given instruction address, and return the address of the entry,
* or NULL if none is found.
* We use a binary search, and thus we assume that the table is
* already sorted.
*/
const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long value)
{
while (first <= last) {
const struct exception_table_entry *mid;
mid = ((last - first) >> 1) + first;
/*
* careful, the distance between value and insn
* can be larger than MAX_LONG:
*/
if (mid->insn < value)
first = mid + 1;
else if (mid->insn > value)
last = mid - 1;
else
return mid;
}
return NULL;
}
#endif

233
lib/fault-inject.c Normal file
View file

@ -0,0 +1,233 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/fault-inject.h>
/*
* setup_fault_attr() is a helper function for various __setup handlers, so it
* returns 0 on error, because that is what __setup handlers do.
*/
int setup_fault_attr(struct fault_attr *attr, char *str)
{
unsigned long probability;
unsigned long interval;
int times;
int space;
/* "<interval>,<probability>,<space>,<times>" */
if (sscanf(str, "%lu,%lu,%d,%d",
&interval, &probability, &space, &times) < 4) {
printk(KERN_WARNING
"FAULT_INJECTION: failed to parse arguments\n");
return 0;
}
attr->probability = probability;
attr->interval = interval;
atomic_set(&attr->times, times);
atomic_set(&attr->space, space);
return 1;
}
EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
if (attr->verbose > 0)
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure\n");
if (attr->verbose > 1)
dump_stack();
}
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
static bool fail_task(struct fault_attr *attr, struct task_struct *task)
{
return !in_interrupt() && task->make_it_fail;
}
#define MAX_STACK_TRACE_DEPTH 32
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
static bool fail_stacktrace(struct fault_attr *attr)
{
struct stack_trace trace;
int depth = attr->stacktrace_depth;
unsigned long entries[MAX_STACK_TRACE_DEPTH];
int n;
bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
if (depth == 0)
return found;
trace.nr_entries = 0;
trace.entries = entries;
trace.max_entries = depth;
trace.skip = 1;
save_stack_trace(&trace);
for (n = 0; n < trace.nr_entries; n++) {
if (attr->reject_start <= entries[n] &&
entries[n] < attr->reject_end)
return false;
if (attr->require_start <= entries[n] &&
entries[n] < attr->require_end)
found = true;
}
return found;
}
#else
static inline bool fail_stacktrace(struct fault_attr *attr)
{
return true;
}
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
/*
* This code is stolen from failmalloc-1.0
* http://www.nongnu.org/failmalloc/
*/
bool should_fail(struct fault_attr *attr, ssize_t size)
{
/* No need to check any other properties if the probability is 0 */
if (attr->probability == 0)
return false;
if (attr->task_filter && !fail_task(attr, current))
return false;
if (atomic_read(&attr->times) == 0)
return false;
if (atomic_read(&attr->space) > size) {
atomic_sub(size, &attr->space);
return false;
}
if (attr->interval > 1) {
attr->count++;
if (attr->count % attr->interval)
return false;
}
if (attr->probability <= prandom_u32() % 100)
return false;
if (!fail_stacktrace(attr))
return false;
fail_dump(attr);
if (atomic_read(&attr->times) != -1)
atomic_dec_not_zero(&attr->times);
return true;
}
EXPORT_SYMBOL_GPL(should_fail);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int debugfs_ul_set(void *data, u64 val)
{
*(unsigned long *)data = val;
return 0;
}
static int debugfs_ul_get(void *data, u64 *val)
{
*val = *(unsigned long *)data;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
static struct dentry *debugfs_create_ul(const char *name, umode_t mode,
struct dentry *parent, unsigned long *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_ul);
}
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
static int debugfs_stacktrace_depth_set(void *data, u64 val)
{
*(unsigned long *)data =
min_t(unsigned long, val, MAX_STACK_TRACE_DEPTH);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
debugfs_stacktrace_depth_set, "%llu\n");
static struct dentry *debugfs_create_stacktrace_depth(
const char *name, umode_t mode,
struct dentry *parent, unsigned long *value)
{
return debugfs_create_file(name, mode, parent, value,
&fops_stacktrace_depth);
}
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
struct dentry *fault_create_debugfs_attr(const char *name,
struct dentry *parent, struct fault_attr *attr)
{
umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
dir = debugfs_create_dir(name, parent);
if (!dir)
return ERR_PTR(-ENOMEM);
if (!debugfs_create_ul("probability", mode, dir, &attr->probability))
goto fail;
if (!debugfs_create_ul("interval", mode, dir, &attr->interval))
goto fail;
if (!debugfs_create_atomic_t("times", mode, dir, &attr->times))
goto fail;
if (!debugfs_create_atomic_t("space", mode, dir, &attr->space))
goto fail;
if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose))
goto fail;
if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
goto fail;
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
&attr->stacktrace_depth))
goto fail;
if (!debugfs_create_ul("require-start", mode, dir,
&attr->require_start))
goto fail;
if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end))
goto fail;
if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start))
goto fail;
if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end))
goto fail;
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
return dir;
fail:
debugfs_remove_recursive(dir);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

2
lib/fdt.c Normal file
View file

@ -0,0 +1,2 @@
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt.c"

2
lib/fdt_empty_tree.c Normal file
View file

@ -0,0 +1,2 @@
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt_empty_tree.c"

2
lib/fdt_ro.c Normal file
View file

@ -0,0 +1,2 @@
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt_ro.c"

2
lib/fdt_rw.c Normal file
View file

@ -0,0 +1,2 @@
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt_rw.c"

2
lib/fdt_strerror.c Normal file
View file

@ -0,0 +1,2 @@
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt_strerror.c"

2
lib/fdt_sw.c Normal file
View file

@ -0,0 +1,2 @@
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt_sw.c"

2
lib/fdt_wip.c Normal file
View file

@ -0,0 +1,2 @@
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt_wip.c"

49
lib/find_last_bit.c Normal file
View file

@ -0,0 +1,49 @@
/* find_last_bit.c: fallback find next bit implementation
*
* Copyright (C) 2008 IBM Corporation
* Written by Rusty Russell <rusty@rustcorp.com.au>
* (Inspired by David Howell's find_next_bit implementation)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/export.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#ifndef find_last_bit
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
unsigned long words;
unsigned long tmp;
/* Start at final word. */
words = size / BITS_PER_LONG;
/* Partial final word? */
if (size & (BITS_PER_LONG-1)) {
tmp = (addr[words] & (~0UL >> (BITS_PER_LONG
- (size & (BITS_PER_LONG-1)))));
if (tmp)
goto found;
}
while (words) {
tmp = addr[--words];
if (tmp) {
found:
return words * BITS_PER_LONG + __fls(tmp);
}
}
/* Not found */
return size;
}
EXPORT_SYMBOL(find_last_bit);
#endif

285
lib/find_next_bit.c Normal file
View file

@ -0,0 +1,285 @@
/* find_next_bit.c: fallback find next bit implementation
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/export.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#ifndef find_next_bit
/*
* Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_next_bit);
#endif
#ifndef find_next_zero_bit
/*
* This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h.
*/
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset %= BITS_PER_LONG;
if (offset) {
tmp = *(p++);
tmp |= ~0UL >> (BITS_PER_LONG - offset);
if (size < BITS_PER_LONG)
goto found_first;
if (~tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp |= ~0UL << size;
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found_middle:
return result + ffz(tmp);
}
EXPORT_SYMBOL(find_next_zero_bit);
#endif
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found:
return result + __ffs(tmp);
}
EXPORT_SYMBOL(find_first_bit);
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
*/
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = (*p) | (~0UL << size);
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found:
return result + ffz(tmp);
}
EXPORT_SYMBOL(find_first_zero_bit);
#endif
#ifdef __BIG_ENDIAN
/* include/linux/byteorder does not support "unsigned long" type */
static inline unsigned long ext2_swabp(const unsigned long * x)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64p((u64 *) x);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32p((u32 *) x);
#else
#error BITS_PER_LONG not defined
#endif
}
/* include/linux/byteorder doesn't support "unsigned long" type */
static inline unsigned long ext2_swab(const unsigned long y)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64((u64) y);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32((u32) y);
#else
#error BITS_PER_LONG not defined
#endif
}
#ifndef find_next_zero_bit_le
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
const unsigned long *p = addr;
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
p += BITOP_WORD(offset);
size -= result;
offset &= (BITS_PER_LONG - 1UL);
if (offset) {
tmp = ext2_swabp(p++);
tmp |= (~0UL >> (BITS_PER_LONG - offset));
if (size < BITS_PER_LONG)
goto found_first;
if (~tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG - 1)) {
if (~(tmp = *(p++)))
goto found_middle_swap;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = ext2_swabp(p);
found_first:
tmp |= ~0UL << size;
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. Skip ffz */
found_middle:
return result + ffz(tmp);
found_middle_swap:
return result + ffz(ext2_swab(tmp));
}
EXPORT_SYMBOL(find_next_zero_bit_le);
#endif
#ifndef find_next_bit_le
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
const unsigned long *p = addr;
unsigned long result = offset & ~(BITS_PER_LONG - 1);
unsigned long tmp;
if (offset >= size)
return size;
p += BITOP_WORD(offset);
size -= result;
offset &= (BITS_PER_LONG - 1UL);
if (offset) {
tmp = ext2_swabp(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG - 1)) {
tmp = *(p++);
if (tmp)
goto found_middle_swap;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = ext2_swabp(p);
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
found_middle_swap:
return result + __ffs(ext2_swab(tmp));
}
EXPORT_SYMBOL(find_next_bit_le);
#endif
#endif /* __BIG_ENDIAN */

398
lib/flex_array.c Normal file
View file

@ -0,0 +1,398 @@
/*
* Flexible array managed in PAGE_SIZE parts
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corporation, 2009
*
* Author: Dave Hansen <dave@linux.vnet.ibm.com>
*/
#include <linux/flex_array.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/export.h>
#include <linux/reciprocal_div.h>
struct flex_array_part {
char elements[FLEX_ARRAY_PART_SIZE];
};
/*
* If a user requests an allocation which is small
* enough, we may simply use the space in the
* flex_array->parts[] array to store the user
* data.
*/
static inline int elements_fit_in_base(struct flex_array *fa)
{
int data_size = fa->element_size * fa->total_nr_elements;
if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT)
return 1;
return 0;
}
/**
* flex_array_alloc - allocate a new flexible array
* @element_size: the size of individual elements in the array
* @total: total number of elements that this should hold
* @flags: page allocation flags to use for base array
*
* Note: all locking must be provided by the caller.
*
* @total is used to size internal structures. If the user ever
* accesses any array indexes >=@total, it will produce errors.
*
* The maximum number of elements is defined as: the number of
* elements that can be stored in a page times the number of
* page pointers that we can fit in the base structure or (using
* integer math):
*
* (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
*
* Here's a table showing example capacities. Note that the maximum
* index that the get/put() functions is just nr_objects-1. This
* basically means that you get 4MB of storage on 32-bit and 2MB on
* 64-bit.
*
*
* Element size | Objects | Objects |
* PAGE_SIZE=4k | 32-bit | 64-bit |
* ---------------------------------|
* 1 bytes | 4177920 | 2088960 |
* 2 bytes | 2088960 | 1044480 |
* 3 bytes | 1392300 | 696150 |
* 4 bytes | 1044480 | 522240 |
* 32 bytes | 130560 | 65408 |
* 33 bytes | 126480 | 63240 |
* 2048 bytes | 2040 | 1020 |
* 2049 bytes | 1020 | 510 |
* void * | 1044480 | 261120 |
*
* Since 64-bit pointers are twice the size, we lose half the
* capacity in the base structure. Also note that no effort is made
* to efficiently pack objects across page boundaries.
*/
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags)
{
struct flex_array *ret;
int elems_per_part = 0;
int max_size = 0;
struct reciprocal_value reciprocal_elems = { 0 };
if (element_size) {
elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
reciprocal_elems = reciprocal_value(elems_per_part);
max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
}
/* max_size will end up 0 if element_size > PAGE_SIZE */
if (total > max_size)
return NULL;
ret = kzalloc(sizeof(struct flex_array), flags);
if (!ret)
return NULL;
ret->element_size = element_size;
ret->total_nr_elements = total;
ret->elems_per_part = elems_per_part;
ret->reciprocal_elems = reciprocal_elems;
if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
memset(&ret->parts[0], FLEX_ARRAY_FREE,
FLEX_ARRAY_BASE_BYTES_LEFT);
return ret;
}
EXPORT_SYMBOL(flex_array_alloc);
static int fa_element_to_part_nr(struct flex_array *fa,
unsigned int element_nr)
{
/*
* if element_size == 0 we don't get here, so we never touch
* the zeroed fa->reciprocal_elems, which would yield invalid
* results
*/
return reciprocal_divide(element_nr, fa->reciprocal_elems);
}
/**
* flex_array_free_parts - just free the second-level pages
* @fa: the flex array from which to free parts
*
* This is to be used in cases where the base 'struct flex_array'
* has been statically allocated and should not be free.
*/
void flex_array_free_parts(struct flex_array *fa)
{
int part_nr;
if (elements_fit_in_base(fa))
return;
for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
kfree(fa->parts[part_nr]);
}
EXPORT_SYMBOL(flex_array_free_parts);
void flex_array_free(struct flex_array *fa)
{
flex_array_free_parts(fa);
kfree(fa);
}
EXPORT_SYMBOL(flex_array_free);
static unsigned int index_inside_part(struct flex_array *fa,
unsigned int element_nr,
unsigned int part_nr)
{
unsigned int part_offset;
part_offset = element_nr - part_nr * fa->elems_per_part;
return part_offset * fa->element_size;
}
static struct flex_array_part *
__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
{
struct flex_array_part *part = fa->parts[part_nr];
if (!part) {
part = kmalloc(sizeof(struct flex_array_part), flags);
if (!part)
return NULL;
if (!(flags & __GFP_ZERO))
memset(part, FLEX_ARRAY_FREE,
sizeof(struct flex_array_part));
fa->parts[part_nr] = part;
}
return part;
}
/**
* flex_array_put - copy data into the array at @element_nr
* @fa: the flex array to copy data into
* @element_nr: index of the position in which to insert
* the new element.
* @src: address of data to copy into the array
* @flags: page allocation flags to use for array expansion
*
*
* Note that this *copies* the contents of @src into
* the array. If you are trying to store an array of
* pointers, make sure to pass in &ptr instead of ptr.
* You may instead wish to use the flex_array_put_ptr()
* helper function.
*
* Locking must be provided by the caller.
*/
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags)
{
int part_nr = 0;
struct flex_array_part *part;
void *dst;
if (element_nr >= fa->total_nr_elements)
return -ENOSPC;
if (!fa->element_size)
return 0;
if (elements_fit_in_base(fa))
part = (struct flex_array_part *)&fa->parts[0];
else {
part_nr = fa_element_to_part_nr(fa, element_nr);
part = __fa_get_part(fa, part_nr, flags);
if (!part)
return -ENOMEM;
}
dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
memcpy(dst, src, fa->element_size);
return 0;
}
EXPORT_SYMBOL(flex_array_put);
/**
* flex_array_clear - clear element in array at @element_nr
* @fa: the flex array of the element.
* @element_nr: index of the position to clear.
*
* Locking must be provided by the caller.
*/
int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
{
int part_nr = 0;
struct flex_array_part *part;
void *dst;
if (element_nr >= fa->total_nr_elements)
return -ENOSPC;
if (!fa->element_size)
return 0;
if (elements_fit_in_base(fa))
part = (struct flex_array_part *)&fa->parts[0];
else {
part_nr = fa_element_to_part_nr(fa, element_nr);
part = fa->parts[part_nr];
if (!part)
return -EINVAL;
}
dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
memset(dst, FLEX_ARRAY_FREE, fa->element_size);
return 0;
}
EXPORT_SYMBOL(flex_array_clear);
/**
* flex_array_prealloc - guarantee that array space exists
* @fa: the flex array for which to preallocate parts
* @start: index of first array element for which space is allocated
* @nr_elements: number of elements for which space is allocated
* @flags: page allocation flags
*
* This will guarantee that no future calls to flex_array_put()
* will allocate memory. It can be used if you are expecting to
* be holding a lock or in some atomic context while writing
* data into the array.
*
* Locking must be provided by the caller.
*/
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int nr_elements, gfp_t flags)
{
int start_part;
int end_part;
int part_nr;
unsigned int end;
struct flex_array_part *part;
if (!start && !nr_elements)
return 0;
if (start >= fa->total_nr_elements)
return -ENOSPC;
if (!nr_elements)
return 0;
end = start + nr_elements - 1;
if (end >= fa->total_nr_elements)
return -ENOSPC;
if (!fa->element_size)
return 0;
if (elements_fit_in_base(fa))
return 0;
start_part = fa_element_to_part_nr(fa, start);
end_part = fa_element_to_part_nr(fa, end);
for (part_nr = start_part; part_nr <= end_part; part_nr++) {
part = __fa_get_part(fa, part_nr, flags);
if (!part)
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(flex_array_prealloc);
/**
* flex_array_get - pull data back out of the array
* @fa: the flex array from which to extract data
* @element_nr: index of the element to fetch from the array
*
* Returns a pointer to the data at index @element_nr. Note
* that this is a copy of the data that was passed in. If you
* are using this to store pointers, you'll get back &ptr. You
* may instead wish to use the flex_array_get_ptr helper.
*
* Locking must be provided by the caller.
*/
void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
{
int part_nr = 0;
struct flex_array_part *part;
if (!fa->element_size)
return NULL;
if (element_nr >= fa->total_nr_elements)
return NULL;
if (elements_fit_in_base(fa))
part = (struct flex_array_part *)&fa->parts[0];
else {
part_nr = fa_element_to_part_nr(fa, element_nr);
part = fa->parts[part_nr];
if (!part)
return NULL;
}
return &part->elements[index_inside_part(fa, element_nr, part_nr)];
}
EXPORT_SYMBOL(flex_array_get);
/**
* flex_array_get_ptr - pull a ptr back out of the array
* @fa: the flex array from which to extract data
* @element_nr: index of the element to fetch from the array
*
* Returns the pointer placed in the flex array at element_nr using
* flex_array_put_ptr(). This function should not be called if the
* element in question was not set using the _put_ptr() helper.
*/
void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr)
{
void **tmp;
tmp = flex_array_get(fa, element_nr);
if (!tmp)
return NULL;
return *tmp;
}
EXPORT_SYMBOL(flex_array_get_ptr);
static int part_is_free(struct flex_array_part *part)
{
int i;
for (i = 0; i < sizeof(struct flex_array_part); i++)
if (part->elements[i] != FLEX_ARRAY_FREE)
return 0;
return 1;
}
/**
* flex_array_shrink - free unused second-level pages
* @fa: the flex array to shrink
*
* Frees all second-level pages that consist solely of unused
* elements. Returns the number of pages freed.
*
* Locking must be provided by the caller.
*/
int flex_array_shrink(struct flex_array *fa)
{
struct flex_array_part *part;
int part_nr;
int ret = 0;
if (!fa->total_nr_elements || !fa->element_size)
return 0;
if (elements_fit_in_base(fa))
return ret;
for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
part = fa->parts[part_nr];
if (!part)
continue;
if (part_is_free(part)) {
fa->parts[part_nr] = NULL;
kfree(part);
ret++;
}
}
return ret;
}
EXPORT_SYMBOL(flex_array_shrink);

272
lib/flex_proportions.c Normal file
View file

@ -0,0 +1,272 @@
/*
* Floating proportions with flexible aging period
*
* Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
*
* The goal of this code is: Given different types of event, measure proportion
* of each type of event over time. The proportions are measured with
* exponentially decaying history to give smooth transitions. A formula
* expressing proportion of event of type 'j' is:
*
* p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
*
* Where x_{i,j} is j's number of events in i-th last time period and x_i is
* total number of events in i-th last time period.
*
* Note that p_{j}'s are normalised, i.e.
*
* \Sum_{j} p_{j} = 1,
*
* This formula can be straightforwardly computed by maintaing denominator
* (let's call it 'd') and for each event type its numerator (let's call it
* 'n_j'). When an event of type 'j' happens, we simply need to do:
* n_j++; d++;
*
* When a new period is declared, we could do:
* d /= 2
* for each j
* n_j /= 2
*
* To avoid iteration over all event types, we instead shift numerator of event
* j lazily when someone asks for a proportion of event j or when event j
* occurs. This can bit trivially implemented by remembering last period in
* which something happened with proportion of type j.
*/
#include <linux/flex_proportions.h>
int fprop_global_init(struct fprop_global *p, gfp_t gfp)
{
int err;
p->period = 0;
/* Use 1 to avoid dealing with periods with 0 events... */
err = percpu_counter_init(&p->events, 1, gfp);
if (err)
return err;
seqcount_init(&p->sequence);
return 0;
}
void fprop_global_destroy(struct fprop_global *p)
{
percpu_counter_destroy(&p->events);
}
/*
* Declare @periods new periods. It is upto the caller to make sure period
* transitions cannot happen in parallel.
*
* The function returns true if the proportions are still defined and false
* if aging zeroed out all events. This can be used to detect whether declaring
* further periods has any effect.
*/
bool fprop_new_period(struct fprop_global *p, int periods)
{
s64 events;
unsigned long flags;
local_irq_save(flags);
events = percpu_counter_sum(&p->events);
/*
* Don't do anything if there are no events.
*/
if (events <= 1) {
local_irq_restore(flags);
return false;
}
write_seqcount_begin(&p->sequence);
if (periods < 64)
events -= events >> periods;
/* Use addition to avoid losing events happening between sum and set */
percpu_counter_add(&p->events, -events);
p->period += periods;
write_seqcount_end(&p->sequence);
local_irq_restore(flags);
return true;
}
/*
* ---- SINGLE ----
*/
int fprop_local_init_single(struct fprop_local_single *pl)
{
pl->events = 0;
pl->period = 0;
raw_spin_lock_init(&pl->lock);
return 0;
}
void fprop_local_destroy_single(struct fprop_local_single *pl)
{
}
static void fprop_reflect_period_single(struct fprop_global *p,
struct fprop_local_single *pl)
{
unsigned int period = p->period;
unsigned long flags;
/* Fast path - period didn't change */
if (pl->period == period)
return;
raw_spin_lock_irqsave(&pl->lock, flags);
/* Someone updated pl->period while we were spinning? */
if (pl->period >= period) {
raw_spin_unlock_irqrestore(&pl->lock, flags);
return;
}
/* Aging zeroed our fraction? */
if (period - pl->period < BITS_PER_LONG)
pl->events >>= period - pl->period;
else
pl->events = 0;
pl->period = period;
raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/* Event of type pl happened */
void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
{
fprop_reflect_period_single(p, pl);
pl->events++;
percpu_counter_add(&p->events, 1);
}
/* Return fraction of events of type pl */
void fprop_fraction_single(struct fprop_global *p,
struct fprop_local_single *pl,
unsigned long *numerator, unsigned long *denominator)
{
unsigned int seq;
s64 num, den;
do {
seq = read_seqcount_begin(&p->sequence);
fprop_reflect_period_single(p, pl);
num = pl->events;
den = percpu_counter_read_positive(&p->events);
} while (read_seqcount_retry(&p->sequence, seq));
/*
* Make fraction <= 1 and denominator > 0 even in presence of percpu
* counter errors
*/
if (den <= num) {
if (num)
den = num;
else
den = 1;
}
*denominator = den;
*numerator = num;
}
/*
* ---- PERCPU ----
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
{
int err;
err = percpu_counter_init(&pl->events, 0, gfp);
if (err)
return err;
pl->period = 0;
raw_spin_lock_init(&pl->lock);
return 0;
}
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
{
percpu_counter_destroy(&pl->events);
}
static void fprop_reflect_period_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl)
{
unsigned int period = p->period;
unsigned long flags;
/* Fast path - period didn't change */
if (pl->period == period)
return;
raw_spin_lock_irqsave(&pl->lock, flags);
/* Someone updated pl->period while we were spinning? */
if (pl->period >= period) {
raw_spin_unlock_irqrestore(&pl->lock, flags);
return;
}
/* Aging zeroed our fraction? */
if (period - pl->period < BITS_PER_LONG) {
s64 val = percpu_counter_read(&pl->events);
if (val < (nr_cpu_ids * PROP_BATCH))
val = percpu_counter_sum(&pl->events);
__percpu_counter_add(&pl->events,
-val + (val >> (period-pl->period)), PROP_BATCH);
} else
percpu_counter_set(&pl->events, 0);
pl->period = period;
raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/* Event of type pl happened */
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
{
fprop_reflect_period_percpu(p, pl);
__percpu_counter_add(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl,
unsigned long *numerator, unsigned long *denominator)
{
unsigned int seq;
s64 num, den;
do {
seq = read_seqcount_begin(&p->sequence);
fprop_reflect_period_percpu(p, pl);
num = percpu_counter_read_positive(&pl->events);
den = percpu_counter_read_positive(&p->events);
} while (read_seqcount_retry(&p->sequence, seq));
/*
* Make fraction <= 1 and denominator > 0 even in presence of percpu
* counter errors
*/
if (den <= num) {
if (num)
den = num;
else
den = 1;
}
*denominator = den;
*numerator = num;
}
/*
* Like __fprop_inc_percpu() except that event is counted only if the given
* type has fraction smaller than @max_frac/FPROP_FRAC_BASE
*/
void __fprop_inc_percpu_max(struct fprop_global *p,
struct fprop_local_percpu *pl, int max_frac)
{
if (unlikely(max_frac < FPROP_FRAC_BASE)) {
unsigned long numerator, denominator;
fprop_fraction_percpu(p, pl, &numerator, &denominator);
if (numerator >
(((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
return;
} else
fprop_reflect_period_percpu(p, pl);
__percpu_counter_add(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}

126
lib/fonts/Kconfig Normal file
View file

@ -0,0 +1,126 @@
#
# Font configuration
#
config FONT_SUPPORT
tristate
if FONT_SUPPORT
config FONTS
bool "Select compiled-in fonts"
depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
help
Say Y here if you would like to use fonts other than the default
your frame buffer console usually use.
Note that the answer to this question won't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about foreign fonts.
If unsure, say N (the default choices are safe).
config FONT_8x8
bool "VGA 8x8 font" if FONTS
depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
default y if !SPARC && !FONTS
help
This is the "high resolution" font for the VGA frame buffer (the one
provided by the text console 80x50 (and higher) modes).
Note that this is a poor quality font. The VGA 8x16 font is quite a
lot more readable.
Given the resolution provided by the frame buffer device, answer N
here is safe.
config FONT_8x16
bool "VGA 8x16 font" if FONTS
default y if !SPARC && !FONTS
help
This is the "high resolution" font for the VGA frame buffer (the one
provided by the VGA text console 80x25 mode.
If unsure, say Y.
config FONT_6x11
bool "Mac console 6x11 font (not supported by all drivers)" if FONTS
depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
default y if !SPARC && !FONTS && MAC
help
Small console font with Macintosh-style high-half glyphs. Some Mac
framebuffer drivers don't support this one at all.
config FONT_7x14
bool "console 7x14 font (not supported by all drivers)" if FONTS
depends on FRAMEBUFFER_CONSOLE
help
Console font with characters just a bit smaller than the default.
If the standard 8x16 font is a little too big for you, say Y.
Otherwise, say N.
config FONT_PEARL_8x8
bool "Pearl (old m68k) console 8x8 font" if FONTS
depends on FRAMEBUFFER_CONSOLE
default y if !SPARC && !FONTS && AMIGA
help
Small console font with PC-style control-character and high-half
glyphs.
config FONT_ACORN_8x8
bool "Acorn console 8x8 font" if FONTS
depends on FRAMEBUFFER_CONSOLE
default y if !SPARC && !FONTS && ARM && ARCH_ACORN
help
Small console font with PC-style control characters and high-half
glyphs.
config FONT_MINI_4x6
bool "Mini 4x6 font"
depends on !SPARC && FONTS
config FONT_6x10
bool "Medium-size 6x10 font"
depends on !SPARC && FONTS
help
Medium-size console font. Suitable for framebuffer consoles on
embedded devices with a 320x240 screen, to get a reasonable number
of characters (53x24) that are still at a readable size.
config FONT_SUN8x16
bool "Sparc console 8x16 font"
depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
help
This is the high resolution console font for Sun machines. Say Y.
config FONT_SUN12x22
bool "Sparc console 12x22 font (not supported by all drivers)"
depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
help
This is the high resolution console font for Sun machines with very
big letters (like the letters used in the SPARC PROM). If the
standard font is unreadable for you, say Y, otherwise say N.
config FONT_10x18
bool "console 10x18 font (not supported by all drivers)" if FONTS
depends on FRAMEBUFFER_CONSOLE
help
This is a high resolution console font for machines with very
big letters. It fits between the sun 12x22 and the normal 8x16 font.
If other fonts are too big or too small for you, say Y, otherwise say N.
config FONT_AUTOSELECT
def_bool y
depends on !FONT_8x8
depends on !FONT_6x11
depends on !FONT_7x14
depends on !FONT_PEARL_8x8
depends on !FONT_ACORN_8x8
depends on !FONT_MINI_4x6
depends on !FONT_6x10
depends on !FONT_SUN8x16
depends on !FONT_SUN12x22
depends on !FONT_10x18
select FONT_8x16
endif # FONT_SUPPORT

19
lib/fonts/Makefile Normal file
View file

@ -0,0 +1,19 @@
# Font handling
font-objs := fonts.o
font-objs-$(CONFIG_FONT_SUN8x16) += font_sun8x16.o
font-objs-$(CONFIG_FONT_SUN12x22) += font_sun12x22.o
font-objs-$(CONFIG_FONT_8x8) += font_8x8.o
font-objs-$(CONFIG_FONT_8x16) += font_8x16.o
font-objs-$(CONFIG_FONT_6x11) += font_6x11.o
font-objs-$(CONFIG_FONT_7x14) += font_7x14.o
font-objs-$(CONFIG_FONT_10x18) += font_10x18.o
font-objs-$(CONFIG_FONT_PEARL_8x8) += font_pearl_8x8.o
font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o
font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o
font-objs-$(CONFIG_FONT_6x10) += font_6x10.o
font-objs += $(font-objs-y)
obj-$(CONFIG_FONT_SUPPORT) += font.o

5146
lib/fonts/font_10x18.c Normal file

File diff suppressed because it is too large Load diff

3086
lib/fonts/font_6x10.c Normal file

File diff suppressed because it is too large Load diff

3352
lib/fonts/font_6x11.c Normal file

File diff suppressed because it is too large Load diff

4118
lib/fonts/font_7x14.c Normal file

File diff suppressed because it is too large Load diff

4633
lib/fonts/font_8x16.c Normal file

File diff suppressed because it is too large Load diff

2583
lib/fonts/font_8x8.c Normal file

File diff suppressed because it is too large Load diff

275
lib/fonts/font_acorn_8x8.c Normal file
View file

@ -0,0 +1,275 @@
/* Acorn-like font definition, with PC graphics characters */
#include <linux/font.h>
static const unsigned char acorndata_8x8[] = {
/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
/* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */
/* 03 */ 0x6c, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, 0x00, /* ^C */
/* 04 */ 0x10, 0x38, 0x7c, 0xfe, 0x7c, 0x38, 0x10, 0x00, /* ^D */
/* 05 */ 0x00, 0x18, 0x3c, 0xe7, 0xe7, 0x3c, 0x18, 0x00, /* ^E */
/* 06 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 07 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 08 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 09 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 0A */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 0B */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 0C */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 0D */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 0E */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 0F */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 10 */ 0x00, 0x60, 0x78, 0x7e, 0x7e, 0x78, 0x60, 0x00, /* |> */
/* 11 */ 0x00, 0x06, 0x1e, 0x7e, 0x7e, 0x1e, 0x06, 0x00, /* <| */
/* 12 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 13 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 14 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 15 */ 0x3c, 0x60, 0x3c, 0x66, 0x3c, 0x06, 0x3c, 0x00,
/* 16 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 17 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 18 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 19 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 1A */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 1B */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 1C */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 1D */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 1E */ 0x00, 0x18, 0x18, 0x3c, 0x3c, 0x7e, 0x7e, 0x00, /* /\ */
/* 1F */ 0x00, 0x7e, 0x7e, 0x3c, 0x3c, 0x18, 0x18, 0x00, /* \/ */
/* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* */
/* 21 */ 0x18, 0x3c, 0x3c, 0x18, 0x18, 0x00, 0x18, 0x00, /* ! */
/* 22 */ 0x6C, 0x6C, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, /* " */
/* 23 */ 0x36, 0x36, 0x7F, 0x36, 0x7F, 0x36, 0x36, 0x00, /* # */
/* 24 */ 0x0C, 0x3F, 0x68, 0x3E, 0x0B, 0x7E, 0x18, 0x00, /* $ */
/* 25 */ 0x60, 0x66, 0x0C, 0x18, 0x30, 0x66, 0x06, 0x00, /* % */
/* 26 */ 0x38, 0x6C, 0x6C, 0x38, 0x6D, 0x66, 0x3B, 0x00, /* & */
/* 27 */ 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, /* ' */
/* 28 */ 0x0C, 0x18, 0x30, 0x30, 0x30, 0x18, 0x0C, 0x00, /* ( */
/* 29 */ 0x30, 0x18, 0x0C, 0x0C, 0x0C, 0x18, 0x30, 0x00, /* ) */
/* 2A */ 0x00, 0x18, 0x7E, 0x3C, 0x7E, 0x18, 0x00, 0x00, /* * */
/* 2B */ 0x00, 0x18, 0x18, 0x7E, 0x18, 0x18, 0x00, 0x00, /* + */
/* 2C */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, /* , */
/* 2D */ 0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, /* - */
/* 2E */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, /* . */
/* 2F */ 0x00, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x00, 0x00, /* / */
/* 30 */ 0x3C, 0x66, 0x6E, 0x7E, 0x76, 0x66, 0x3C, 0x00, /* 0 */
/* 31 */ 0x18, 0x38, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, /* 1 */
/* 32 */ 0x3C, 0x66, 0x06, 0x0C, 0x18, 0x30, 0x7E, 0x00, /* 2 */
/* 33 */ 0x3C, 0x66, 0x06, 0x1C, 0x06, 0x66, 0x3C, 0x00, /* 3 */
/* 34 */ 0x0C, 0x1C, 0x3C, 0x6C, 0x7E, 0x0C, 0x0C, 0x00, /* 4 */
/* 35 */ 0x7E, 0x60, 0x7C, 0x06, 0x06, 0x66, 0x3C, 0x00, /* 5 */
/* 36 */ 0x1C, 0x30, 0x60, 0x7C, 0x66, 0x66, 0x3C, 0x00, /* 6 */
/* 37 */ 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x30, 0x30, 0x00, /* 7 */
/* 38 */ 0x3C, 0x66, 0x66, 0x3C, 0x66, 0x66, 0x3C, 0x00, /* 8 */
/* 39 */ 0x3C, 0x66, 0x66, 0x3E, 0x06, 0x0C, 0x38, 0x00, /* 9 */
/* 3A */ 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, /* : */
/* 3B */ 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x30, /* ; */
/* 3C */ 0x0C, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0C, 0x00, /* < */
/* 3D */ 0x00, 0x00, 0x7E, 0x00, 0x7E, 0x00, 0x00, 0x00, /* = */
/* 3E */ 0x30, 0x18, 0x0C, 0x06, 0x0C, 0x18, 0x30, 0x00, /* > */
/* 3F */ 0x3C, 0x66, 0x0C, 0x18, 0x18, 0x00, 0x18, 0x00, /* ? */
/* 40 */ 0x3C, 0x66, 0x6E, 0x6A, 0x6E, 0x60, 0x3C, 0x00, /* @ */
/* 41 */ 0x3C, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, /* A */
/* 42 */ 0x7C, 0x66, 0x66, 0x7C, 0x66, 0x66, 0x7C, 0x00, /* B */
/* 43 */ 0x3C, 0x66, 0x60, 0x60, 0x60, 0x66, 0x3C, 0x00, /* C */
/* 44 */ 0x78, 0x6C, 0x66, 0x66, 0x66, 0x6C, 0x78, 0x00, /* D */
/* 45 */ 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x7E, 0x00, /* E */
/* 46 */ 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x60, 0x00, /* F */
/* 47 */ 0x3C, 0x66, 0x60, 0x6E, 0x66, 0x66, 0x3C, 0x00, /* G */
/* 48 */ 0x66, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, /* H */
/* 49 */ 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, /* I */
/* 4A */ 0x3E, 0x0C, 0x0C, 0x0C, 0x0C, 0x6C, 0x38, 0x00, /* J */
/* 4B */ 0x66, 0x6C, 0x78, 0x70, 0x78, 0x6C, 0x66, 0x00, /* K */
/* 4C */ 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7E, 0x00, /* L */
/* 4D */ 0x63, 0x77, 0x7F, 0x6B, 0x6B, 0x63, 0x63, 0x00, /* M */
/* 4E */ 0x66, 0x66, 0x76, 0x7E, 0x6E, 0x66, 0x66, 0x00, /* N */
/* 4F */ 0x3C, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, /* O */
/* 50 */ 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, 0x60, 0x00, /* P */
/* 51 */ 0x3C, 0x66, 0x66, 0x66, 0x6A, 0x6C, 0x36, 0x00, /* Q */
/* 52 */ 0x7C, 0x66, 0x66, 0x7C, 0x6C, 0x66, 0x66, 0x00, /* R */
/* 53 */ 0x3C, 0x66, 0x60, 0x3C, 0x06, 0x66, 0x3C, 0x00, /* S */
/* 54 */ 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* T */
/* 55 */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, /* U */
/* 56 */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, /* V */
/* 57 */ 0x63, 0x63, 0x6B, 0x6B, 0x7F, 0x77, 0x63, 0x00, /* W */
/* 58 */ 0x66, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x66, 0x00, /* X */
/* 59 */ 0x66, 0x66, 0x66, 0x3C, 0x18, 0x18, 0x18, 0x00, /* Y */
/* 5A */ 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x7E, 0x00, /* Z */
/* 5B */ 0x7C, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7C, 0x00, /* [ */
/* 5C */ 0x00, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x00, 0x00, /* \ */
/* 5D */ 0x3E, 0x06, 0x06, 0x06, 0x06, 0x06, 0x3E, 0x00, /* ] */
/* 5E */ 0x3C, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^ */
/* 5F */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, /* _ */
/* 60 */ 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ` */
/* 61 */ 0x00, 0x00, 0x3C, 0x06, 0x3E, 0x66, 0x3E, 0x00, /* a */
/* 62 */ 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x7C, 0x00, /* b */
/* 63 */ 0x00, 0x00, 0x3C, 0x66, 0x60, 0x66, 0x3C, 0x00, /* c */
/* 64 */ 0x06, 0x06, 0x3E, 0x66, 0x66, 0x66, 0x3E, 0x00, /* d */
/* 65 */ 0x00, 0x00, 0x3C, 0x66, 0x7E, 0x60, 0x3C, 0x00, /* e */
/* 66 */ 0x1C, 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x00, /* f */
/* 67 */ 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x3C, /* g */
/* 68 */ 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, /* h */
/* 69 */ 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x3C, 0x00, /* i */
/* 6A */ 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x70, /* j */
/* 6B */ 0x60, 0x60, 0x66, 0x6C, 0x78, 0x6C, 0x66, 0x00, /* k */
/* 6C */ 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3C, 0x00, /* l */
/* 6D */ 0x00, 0x00, 0x36, 0x7F, 0x6B, 0x6B, 0x63, 0x00, /* m */
/* 6E */ 0x00, 0x00, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, /* n */
/* 6F */ 0x00, 0x00, 0x3C, 0x66, 0x66, 0x66, 0x3C, 0x00, /* o */
/* 70 */ 0x00, 0x00, 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, /* p */
/* 71 */ 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x07, /* q */
/* 72 */ 0x00, 0x00, 0x6C, 0x76, 0x60, 0x60, 0x60, 0x00, /* r */
/* 73 */ 0x00, 0x00, 0x3E, 0x60, 0x3C, 0x06, 0x7C, 0x00, /* s */
/* 74 */ 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x1C, 0x00, /* t */
/* 75 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3E, 0x00, /* u */
/* 76 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, /* v */
/* 77 */ 0x00, 0x00, 0x63, 0x6B, 0x6B, 0x7F, 0x36, 0x00, /* w */
/* 78 */ 0x00, 0x00, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x00, /* x */
/* 79 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x3E, 0x06, 0x3C, /* y */
/* 7A */ 0x00, 0x00, 0x7E, 0x0C, 0x18, 0x30, 0x7E, 0x00, /* z */
/* 7B */ 0x0C, 0x18, 0x18, 0x70, 0x18, 0x18, 0x0C, 0x00, /* { */
/* 7C */ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* | */
/* 7D */ 0x30, 0x18, 0x18, 0x0E, 0x18, 0x18, 0x30, 0x00, /* } */
/* 7E */ 0x31, 0x6B, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, /* ~ */
/* 7F */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /*  */
/* 80 */ 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x30, 0x60,
/* 81 */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3e, 0x00,
/* 82 */ 0x0c, 0x18, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
/* 83 */ 0x18, 0x66, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
/* 84 */ 0x66, 0x00, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
/* 85 */ 0x30, 0x18, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
/* 86 */ 0x3c, 0x66, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
/* 87 */ 0x00, 0x00, 0x3c, 0x66, 0x60, 0x66, 0x3c, 0x60,
/* 88 */ 0x3c, 0x66, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
/* 89 */ 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
/* 8A */ 0x30, 0x18, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
/* 8B */ 0x66, 0x00, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00,
/* 8C */ 0x3c, 0x66, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00,
/* 8D */ 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00,
/* 8E */ 0x66, 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x66, 0x00,
/* 8F */ 0x18, 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x66, 0x00,
/* 90 */ 0x0c, 0x18, 0x7e, 0x60, 0x7c, 0x60, 0x7e, 0x00,
/* 91 */ 0x00, 0x00, 0x3f, 0x0d, 0x3f, 0x6c, 0x3f, 0x00,
/* 92 */ 0x3f, 0x66, 0x66, 0x7f, 0x66, 0x66, 0x67, 0x00,
/* 93 */ 0x3c, 0x66, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00,
/* 94 */ 0x66, 0x00, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00,
/* 95 */ 0x30, 0x18, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00,
/* 96 */ 0x3c, 0x66, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00,
/* 97 */ 0x30, 0x18, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00,
/* 98 */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x06, 0x3c,
/* 99 */ 0x66, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x3c, 0x00,
/* 9A */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00,
/* 9B */ 0x08, 0x3e, 0x6b, 0x68, 0x6b, 0x3e, 0x08, 0x00,
/* 9C */ 0x1c, 0x36, 0x30, 0x7c, 0x30, 0x30, 0x7e, 0x00,
/* 9D */ 0x66, 0x3c, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00,
/* 9E */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* 9F */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* A0 */ 0x0c, 0x18, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
/* A1 */ 0x0c, 0x18, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00,
/* A2 */ 0x0c, 0x18, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00,
/* A3 */ 0x0c, 0x18, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00,
/* A4 */ 0x36, 0x6c, 0x00, 0x7c, 0x66, 0x66, 0x66, 0x00,
/* A5 */ 0x36, 0x6c, 0x00, 0x66, 0x76, 0x6e, 0x66, 0x00,
/* A6 */ 0x1c, 0x06, 0x1e, 0x36, 0x1e, 0x00, 0x3e, 0x00,
/* A7 */ 0x1c, 0x36, 0x36, 0x36, 0x1c, 0x00, 0x3e, 0x00,
/* A8 */ 0x18, 0x00, 0x18, 0x18, 0x30, 0x66, 0x3c, 0x00,
/* A9 */ 0x7e, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* AA */ 0x7e, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* AB */ 0x40, 0xc0, 0x40, 0x4f, 0x41, 0x0f, 0x08, 0x0f,
/* AC */ 0x40, 0xc0, 0x40, 0x48, 0x48, 0x0a, 0x0f, 0x02,
/* AD */ 0x18, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00,
/* AE */ 0x00, 0x33, 0x66, 0xcc, 0xcc, 0x66, 0x33, 0x00,
/* AF */ 0x00, 0xcc, 0x66, 0x33, 0x33, 0x66, 0xcc, 0x00,
/* B0 */ 0x22, 0x88, 0x22, 0x88, 0x22, 0x88, 0x22, 0x88,
/* B1 */ 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
/* B2 */ 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
/* B3 */ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
/* B4 */ 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
/* B5 */ 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18,
/* B6 */ 0x66, 0x66, 0x66, 0xe6, 0x66, 0x66, 0x66, 0x66,
/* B7 */ 0x00, 0x00, 0x00, 0xfe, 0x66, 0x66, 0x66, 0x66,
/* B8 */ 0x00, 0x00, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18,
/* B9 */ 0x66, 0x66, 0xe6, 0x06, 0xe6, 0x66, 0x66, 0x66,
/* BA */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
/* BB */ 0x00, 0x00, 0xfe, 0x06, 0xe6, 0x66, 0x66, 0x66,
/* BC */ 0x66, 0x66, 0xe6, 0x06, 0xfe, 0x00, 0x00, 0x00,
/* BD */ 0x66, 0x66, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00,
/* BE */ 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x00, 0x00, 0x00,
/* BF */ 0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18,
/* C0 */ 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
/* C1 */ 0x18, 0x18, 0x18, 0xff, 0x00, 0x00, 0x00, 0x00,
/* C2 */ 0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18,
/* C3 */ 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
/* C4 */ 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
/* C5 */ 0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
/* C6 */ 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18,
/* C7 */ 0x66, 0x66, 0x66, 0x67, 0x66, 0x66, 0x66, 0x66,
/* C8 */ 0x66, 0x66, 0x67, 0x60, 0x7f, 0x00, 0x00, 0x00,
/* C9 */ 0x00, 0x00, 0x7f, 0x60, 0x67, 0x66, 0x66, 0x66,
/* CA */ 0x66, 0x66, 0xe7, 0x00, 0xff, 0x00, 0x00, 0x00,
/* CB */ 0x00, 0x00, 0xff, 0x00, 0xe7, 0x66, 0x66, 0x66,
/* CC */ 0x66, 0x66, 0x67, 0x60, 0x67, 0x66, 0x66, 0x66,
/* CD */ 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00,
/* CE */ 0x66, 0x66, 0xe7, 0x00, 0xe7, 0x66, 0x66, 0x66,
/* CF */ 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00,
/* D0 */ 0x66, 0x66, 0x66, 0xff, 0x00, 0x00, 0x00, 0x00,
/* D1 */ 0x00, 0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18,
/* D2 */ 0x00, 0x00, 0x00, 0xff, 0x66, 0x66, 0x66, 0x66,
/* D3 */ 0x66, 0x66, 0x66, 0x7f, 0x00, 0x00, 0x00, 0x00,
/* D4 */ 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00,
/* D5 */ 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18,
/* D6 */ 0x00, 0x00, 0x00, 0x7f, 0x66, 0x66, 0x66, 0x66,
/* D7 */ 0x66, 0x66, 0x66, 0xff, 0x66, 0x66, 0x66, 0x66,
/* D8 */ 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18,
/* D9 */ 0x18, 0x18, 0x18, 0xf8, 0x00, 0x00, 0x00, 0x00,
/* DA */ 0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18,
/* DB */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
/* DC */ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
/* DD */ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
/* DE */ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
/* DF */ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
/* E0 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* E1 */ 0x3c, 0x66, 0x66, 0x6c, 0x66, 0x66, 0x6c, 0xc0,
/* E2 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* E3 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* E4 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* E5 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* E6 */ 0x00, 0x00, 0x33, 0x33, 0x33, 0x33, 0x3e, 0x60,
/* E7 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* E8 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* E9 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* EA */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* EB */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* EC */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* ED */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* EE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* EF */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* F0 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* F1 */ 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x7e, 0x00,
/* F2 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* F3 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* F4 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* F5 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* F6 */ 0x00, 0x18, 0x00, 0xff, 0x00, 0x18, 0x00, 0x00,
/* F7 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* F8 */ 0x3c, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00,
/* F9 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* FA */ 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
/* FB */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* FC */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00,
/* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
/* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
const struct font_desc font_acorn_8x8 = {
.idx = ACORN8x8_IDX,
.name = "Acorn8x8",
.width = 8,
.height = 8,
.data = acorndata_8x8,
#ifdef CONFIG_ARCH_ACORN
.pref = 20,
#else
.pref = 0,
#endif
};

2158
lib/fonts/font_mini_4x6.c Normal file

File diff suppressed because it is too large Load diff

2587
lib/fonts/font_pearl_8x8.c Normal file

File diff suppressed because it is too large Load diff

6165
lib/fonts/font_sun12x22.c Normal file

File diff suppressed because it is too large Load diff

275
lib/fonts/font_sun8x16.c Normal file
View file

@ -0,0 +1,275 @@
#include <linux/font.h>
#define FONTDATAMAX 4096
static const unsigned char fontdata_sun8x16[FONTDATAMAX] = {
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x6c,0xfe,0xfe,0xfe,0xfe,0x7c,0x38,0x10,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x7c,0xfe,0x7c,0x38,0x10,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x18,0x3c,0x3c,0xe7,0xe7,0xe7,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x18,0x3c,0x7e,0xff,0xff,0x7e,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x3c,0x3c,0x18,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xc3,0xc3,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,
/* */ 0x00,0x00,0x00,0x00,0x00,0x3c,0x66,0x42,0x42,0x66,0x3c,0x00,0x00,0x00,0x00,0x00,
/* */ 0xff,0xff,0xff,0xff,0xff,0xc3,0x99,0xbd,0xbd,0x99,0xc3,0xff,0xff,0xff,0xff,0xff,
/* */ 0x00,0x00,0x1e,0x0e,0x1a,0x32,0x78,0xcc,0xcc,0xcc,0xcc,0x78,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x3c,0x66,0x66,0x66,0x66,0x3c,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x3f,0x33,0x3f,0x30,0x30,0x30,0x30,0x70,0xf0,0xe0,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7f,0x63,0x7f,0x63,0x63,0x63,0x63,0x67,0xe7,0xe6,0xc0,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x18,0x18,0xdb,0x3c,0xe7,0x3c,0xdb,0x18,0x18,0x00,0x00,0x00,0x00,
/* */ 0x00,0x80,0xc0,0xe0,0xf0,0xf8,0xfe,0xf8,0xf0,0xe0,0xc0,0x80,0x00,0x00,0x00,0x00,
/* */ 0x00,0x02,0x06,0x0e,0x1e,0x3e,0xfe,0x3e,0x1e,0x0e,0x06,0x02,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x7e,0x3c,0x18,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x66,0x66,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7f,0xdb,0xdb,0xdb,0x7b,0x1b,0x1b,0x1b,0x1b,0x1b,0x00,0x00,0x00,0x00,
/* */ 0x00,0x7c,0xc6,0x60,0x38,0x6c,0xc6,0xc6,0x6c,0x38,0x0c,0xc6,0x7c,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xfe,0xfe,0xfe,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x7e,0x3c,0x18,0x7e,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x7e,0x3c,0x18,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x18,0x0c,0xfe,0x0c,0x18,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x30,0x60,0xfe,0x60,0x30,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0xc0,0xc0,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x24,0x66,0xff,0x66,0x24,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x38,0x7c,0x7c,0xfe,0xfe,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0xfe,0xfe,0x7c,0x7c,0x38,0x38,0x10,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/*!*/ 0x00,0x00,0x18,0x3c,0x3c,0x3c,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00,
/*"*/ 0x00,0x66,0x66,0x66,0x24,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/*#*/ 0x00,0x00,0x00,0x6c,0x6c,0xfe,0x6c,0x6c,0x6c,0xfe,0x6c,0x6c,0x00,0x00,0x00,0x00,
/*$*/ 0x18,0x18,0x7c,0xc6,0xc2,0xc0,0x7c,0x06,0x06,0x86,0xc6,0x7c,0x18,0x18,0x00,0x00,
/*%*/ 0x00,0x00,0x00,0x00,0xc2,0xc6,0x0c,0x18,0x30,0x60,0xc6,0x86,0x00,0x00,0x00,0x00,
/*&*/ 0x00,0x00,0x38,0x6c,0x6c,0x38,0x76,0xdc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/*'*/ 0x00,0x30,0x30,0x30,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/*(*/ 0x00,0x00,0x0c,0x18,0x30,0x30,0x30,0x30,0x30,0x30,0x18,0x0c,0x00,0x00,0x00,0x00,
/*)*/ 0x00,0x00,0x30,0x18,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x18,0x30,0x00,0x00,0x00,0x00,
/***/ 0x00,0x00,0x00,0x00,0x00,0x66,0x3c,0xff,0x3c,0x66,0x00,0x00,0x00,0x00,0x00,0x00,
/*+*/ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,
/*,*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x18,0x30,0x00,0x00,0x00,
/*-*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/*.*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x02,0x06,0x0c,0x18,0x30,0x60,0xc0,0x80,0x00,0x00,0x00,0x00,
/*0*/ 0x00,0x00,0x7c,0xc6,0xc6,0xce,0xde,0xf6,0xe6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*1*/ 0x00,0x00,0x18,0x38,0x78,0x18,0x18,0x18,0x18,0x18,0x18,0x7e,0x00,0x00,0x00,0x00,
/*2*/ 0x00,0x00,0x7c,0xc6,0x06,0x0c,0x18,0x30,0x60,0xc0,0xc6,0xfe,0x00,0x00,0x00,0x00,
/*3*/ 0x00,0x00,0x7c,0xc6,0x06,0x06,0x3c,0x06,0x06,0x06,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*4*/ 0x00,0x00,0x0c,0x1c,0x3c,0x6c,0xcc,0xfe,0x0c,0x0c,0x0c,0x1e,0x00,0x00,0x00,0x00,
/*5*/ 0x00,0x00,0xfe,0xc0,0xc0,0xc0,0xfc,0x06,0x06,0x06,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*6*/ 0x00,0x00,0x38,0x60,0xc0,0xc0,0xfc,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*7*/ 0x00,0x00,0xfe,0xc6,0x06,0x06,0x0c,0x18,0x30,0x30,0x30,0x30,0x00,0x00,0x00,0x00,
/*8*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0x7c,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*9*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0x7e,0x06,0x06,0x06,0x0c,0x78,0x00,0x00,0x00,0x00,
/*:*/ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,
/*;*/ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x30,0x00,0x00,0x00,0x00,
/*<*/ 0x00,0x00,0x00,0x06,0x0c,0x18,0x30,0x60,0x30,0x18,0x0c,0x06,0x00,0x00,0x00,0x00,
/*=*/ 0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/*>*/ 0x00,0x00,0x00,0x60,0x30,0x18,0x0c,0x06,0x0c,0x18,0x30,0x60,0x00,0x00,0x00,0x00,
/*?*/ 0x00,0x00,0x7c,0xc6,0xc6,0x0c,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00,
/*@*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xde,0xde,0xde,0xdc,0xc0,0x7c,0x00,0x00,0x00,0x00,
/*A*/ 0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
/*B*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x66,0x66,0x66,0x66,0xfc,0x00,0x00,0x00,0x00,
/*C*/ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xc0,0xc0,0xc2,0x66,0x3c,0x00,0x00,0x00,0x00,
/*D*/ 0x00,0x00,0xf8,0x6c,0x66,0x66,0x66,0x66,0x66,0x66,0x6c,0xf8,0x00,0x00,0x00,0x00,
/*E*/ 0x00,0x00,0xfe,0x66,0x62,0x68,0x78,0x68,0x60,0x62,0x66,0xfe,0x00,0x00,0x00,0x00,
/*F*/ 0x00,0x00,0xfe,0x66,0x62,0x68,0x78,0x68,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00,
/*G*/ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xde,0xc6,0xc6,0x66,0x3a,0x00,0x00,0x00,0x00,
/*H*/ 0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
/*I*/ 0x00,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/*J*/ 0x00,0x00,0x1e,0x0c,0x0c,0x0c,0x0c,0x0c,0xcc,0xcc,0xcc,0x78,0x00,0x00,0x00,0x00,
/*K*/ 0x00,0x00,0xe6,0x66,0x66,0x6c,0x78,0x78,0x6c,0x66,0x66,0xe6,0x00,0x00,0x00,0x00,
/*L*/ 0x00,0x00,0xf0,0x60,0x60,0x60,0x60,0x60,0x60,0x62,0x66,0xfe,0x00,0x00,0x00,0x00,
/*M*/ 0x00,0x00,0xc3,0xe7,0xff,0xff,0xdb,0xc3,0xc3,0xc3,0xc3,0xc3,0x00,0x00,0x00,0x00,
/*N*/ 0x00,0x00,0xc6,0xe6,0xf6,0xfe,0xde,0xce,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
/*O*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*P*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x60,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00,
/*Q*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xd6,0xde,0x7c,0x0c,0x0e,0x00,0x00,
/*R*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x6c,0x66,0x66,0x66,0xe6,0x00,0x00,0x00,0x00,
/*S*/ 0x00,0x00,0x7c,0xc6,0xc6,0x60,0x38,0x0c,0x06,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*T*/ 0x00,0x00,0xff,0xdb,0x99,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/*U*/ 0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*V*/ 0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x00,0x00,0x00,0x00,
/*W*/ 0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xdb,0xdb,0xff,0x66,0x66,0x00,0x00,0x00,0x00,
/*X*/ 0x00,0x00,0xc3,0xc3,0x66,0x3c,0x18,0x18,0x3c,0x66,0xc3,0xc3,0x00,0x00,0x00,0x00,
/*Y*/ 0x00,0x00,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/*Z*/ 0x00,0x00,0xff,0xc3,0x86,0x0c,0x18,0x30,0x60,0xc1,0xc3,0xff,0x00,0x00,0x00,0x00,
/*[*/ 0x00,0x00,0x3c,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x3c,0x00,0x00,0x00,0x00,
/*\*/ 0x00,0x00,0x00,0x80,0xc0,0xe0,0x70,0x38,0x1c,0x0e,0x06,0x02,0x00,0x00,0x00,0x00,
/*]*/ 0x00,0x00,0x3c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x3c,0x00,0x00,0x00,0x00,
/*^*/ 0x10,0x38,0x6c,0xc6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/*_*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00,
/*`*/ 0x30,0x30,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/*a*/ 0x00,0x00,0x00,0x00,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/*b*/ 0x00,0x00,0xe0,0x60,0x60,0x78,0x6c,0x66,0x66,0x66,0x66,0x7c,0x00,0x00,0x00,0x00,
/*c*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xc0,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*d*/ 0x00,0x00,0x1c,0x0c,0x0c,0x3c,0x6c,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/*e*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*f*/ 0x00,0x00,0x38,0x6c,0x64,0x60,0xf0,0x60,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00,
/*g*/ 0x00,0x00,0x00,0x00,0x00,0x76,0xcc,0xcc,0xcc,0xcc,0xcc,0x7c,0x0c,0xcc,0x78,0x00,
/*h*/ 0x00,0x00,0xe0,0x60,0x60,0x6c,0x76,0x66,0x66,0x66,0x66,0xe6,0x00,0x00,0x00,0x00,
/*i*/ 0x00,0x00,0x18,0x18,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/*j*/ 0x00,0x00,0x06,0x06,0x00,0x0e,0x06,0x06,0x06,0x06,0x06,0x06,0x66,0x66,0x3c,0x00,
/*k*/ 0x00,0x00,0xe0,0x60,0x60,0x66,0x6c,0x78,0x78,0x6c,0x66,0xe6,0x00,0x00,0x00,0x00,
/*l*/ 0x00,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/*m*/ 0x00,0x00,0x00,0x00,0x00,0xe6,0xff,0xdb,0xdb,0xdb,0xdb,0xdb,0x00,0x00,0x00,0x00,
/*n*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x00,0x00,0x00,
/*o*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*p*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x7c,0x60,0x60,0xf0,0x00,
/*q*/ 0x00,0x00,0x00,0x00,0x00,0x76,0xcc,0xcc,0xcc,0xcc,0xcc,0x7c,0x0c,0x0c,0x1e,0x00,
/*r*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x76,0x66,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00,
/*s*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0x60,0x38,0x0c,0xc6,0x7c,0x00,0x00,0x00,0x00,
/*t*/ 0x00,0x00,0x10,0x30,0x30,0xfc,0x30,0x30,0x30,0x30,0x36,0x1c,0x00,0x00,0x00,0x00,
/*u*/ 0x00,0x00,0x00,0x00,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/*v*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x00,0x00,0x00,0x00,
/*w*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0xc3,0xc3,0xdb,0xdb,0xff,0x66,0x00,0x00,0x00,0x00,
/*x*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0x66,0x3c,0x18,0x3c,0x66,0xc3,0x00,0x00,0x00,0x00,
/*y*/ 0x00,0x00,0x00,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0xf8,0x00,
/*z*/ 0x00,0x00,0x00,0x00,0x00,0xfe,0xcc,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00,
/*{*/ 0x00,0x00,0x0e,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00,0x00,
/*|*/ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00,
/*}*/ 0x00,0x00,0x70,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00,0x00,
/*~*/ 0x00,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xc6,0xfe,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xc0,0xc2,0x66,0x3c,0x0c,0x06,0x7c,0x00,0x00,
/* */ 0x00,0x00,0xcc,0x00,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x0c,0x18,0x30,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x10,0x38,0x6c,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0xcc,0x00,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x60,0x30,0x18,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x38,0x6c,0x38,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x3c,0x66,0x60,0x60,0x66,0x3c,0x0c,0x06,0x3c,0x00,0x00,0x00,
/* */ 0x00,0x10,0x38,0x6c,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0xc6,0x00,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x60,0x30,0x18,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x66,0x00,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x18,0x3c,0x66,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x60,0x30,0x18,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/* */ 0x00,0xc6,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
/* */ 0x38,0x6c,0x38,0x00,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
/* */ 0x18,0x30,0x60,0x00,0xfe,0x66,0x60,0x7c,0x60,0x60,0x66,0xfe,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x3b,0x1b,0x7e,0xd8,0xdc,0x77,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x3e,0x6c,0xcc,0xcc,0xfe,0xcc,0xcc,0xcc,0xcc,0xce,0x00,0x00,0x00,0x00,
/* */ 0x00,0x10,0x38,0x6c,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0xc6,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x60,0x30,0x18,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x30,0x78,0xcc,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x60,0x30,0x18,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0xc6,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0x78,0x00,
/* */ 0x00,0xc6,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0xc6,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x18,0x18,0x7e,0xc3,0xc0,0xc0,0xc0,0xc3,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,
/* */ 0x00,0x38,0x6c,0x64,0x60,0xf0,0x60,0x60,0x60,0x60,0xe6,0xfc,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0xc3,0x66,0x3c,0x18,0xff,0x18,0xff,0x18,0x18,0x18,0x00,0x00,0x00,0x00,
/* */ 0x00,0xfc,0x66,0x66,0x7c,0x62,0x66,0x6f,0x66,0x66,0x66,0xf3,0x00,0x00,0x00,0x00,
/* */ 0x00,0x0e,0x1b,0x18,0x18,0x18,0x7e,0x18,0x18,0x18,0x18,0x18,0xd8,0x70,0x00,0x00,
/* */ 0x00,0x18,0x30,0x60,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x0c,0x18,0x30,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x18,0x30,0x60,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x18,0x30,0x60,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x76,0xdc,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x00,0x00,0x00,
/* */ 0x76,0xdc,0x00,0xc6,0xe6,0xf6,0xfe,0xde,0xce,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
/* */ 0x00,0x3c,0x6c,0x6c,0x3e,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x38,0x6c,0x6c,0x38,0x00,0x7c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x30,0x30,0x00,0x30,0x30,0x60,0xc0,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xc0,0xc0,0xc0,0xc0,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x06,0x06,0x06,0x06,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0xc0,0xc0,0xc2,0xc6,0xcc,0x18,0x30,0x60,0xce,0x9b,0x06,0x0c,0x1f,0x00,0x00,
/* */ 0x00,0xc0,0xc0,0xc2,0xc6,0xcc,0x18,0x30,0x66,0xce,0x96,0x3e,0x06,0x06,0x00,0x00,
/* */ 0x00,0x00,0x18,0x18,0x00,0x18,0x18,0x18,0x3c,0x3c,0x3c,0x18,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x36,0x6c,0xd8,0x6c,0x36,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0xd8,0x6c,0x36,0x6c,0xd8,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,
/* */ 0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,
/* */ 0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,
/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x00,0x00,0x00,0x00,0x00,0xf8,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x36,0x36,0x36,0x36,0x36,0xf6,0x06,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x00,0x00,0x00,0x00,0x00,0xfe,0x06,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x36,0x36,0x36,0x36,0x36,0xf6,0x06,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x36,0x36,0x36,0x36,0x36,0x37,0x30,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x3f,0x30,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x36,0x36,0x36,0x36,0x36,0xf7,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xf7,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x36,0x36,0x36,0x36,0x36,0x37,0x30,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x36,0x36,0x36,0x36,0x36,0xf7,0x00,0xf7,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x18,0x18,0x18,0x18,0x18,0xff,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x1f,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3f,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xff,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
/* */ 0x18,0x18,0x18,0x18,0x18,0xff,0x18,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
/* */ 0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,
/* */ 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,
/* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x76,0xdc,0xd8,0xd8,0xd8,0xdc,0x76,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x78,0xcc,0xcc,0xcc,0xd8,0xcc,0xc6,0xc6,0xc6,0xcc,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0xfe,0xc6,0xc6,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0xfe,0x6c,0x6c,0x6c,0x6c,0x6c,0x6c,0x6c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0xfe,0xc6,0x60,0x30,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x7e,0xd8,0xd8,0xd8,0xd8,0xd8,0x70,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x7c,0x60,0x60,0xc0,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x76,0xdc,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x7e,0x18,0x3c,0x66,0x66,0x66,0x3c,0x18,0x7e,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0x6c,0x38,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x38,0x6c,0xc6,0xc6,0xc6,0x6c,0x6c,0x6c,0x6c,0xee,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x1e,0x30,0x18,0x0c,0x3e,0x66,0x66,0x66,0x66,0x3c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x7e,0xdb,0xdb,0xdb,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x03,0x06,0x7e,0xdb,0xdb,0xf3,0x7e,0x60,0xc0,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x1c,0x30,0x60,0x60,0x7c,0x60,0x60,0x60,0x30,0x1c,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0xfe,0x00,0x00,0xfe,0x00,0x00,0xfe,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0xff,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x30,0x18,0x0c,0x06,0x0c,0x18,0x30,0x00,0x7e,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x0c,0x18,0x30,0x60,0x30,0x18,0x0c,0x00,0x7e,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x0e,0x1b,0x1b,0x1b,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xd8,0xd8,0xd8,0x70,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x7e,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x76,0xdc,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x38,0x6c,0x6c,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x0f,0x0c,0x0c,0x0c,0x0c,0x0c,0xec,0x6c,0x6c,0x3c,0x1c,0x00,0x00,0x00,0x00,
/* */ 0x00,0xd8,0x6c,0x6c,0x6c,0x6c,0x6c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
};
const struct font_desc font_sun_8x16 = {
.idx = SUN8x16_IDX,
.name = "SUN8x16",
.width = 8,
.height = 16,
.data = fontdata_sun8x16,
#ifdef __sparc__
.pref = 10,
#else
.pref = -1,
#endif
};

157
lib/fonts/fonts.c Normal file
View file

@ -0,0 +1,157 @@
/*
* `Soft' font definitions
*
* Created 1995 by Geert Uytterhoeven
* Rewritten 1998 by Martin Mares <mj@ucw.cz>
*
* 2001 - Documented with DocBook
* - Brad Douglas <brad@neruo.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#if defined(__mc68000__)
#include <asm/setup.h>
#endif
#include <linux/font.h>
#define NO_FONTS
static const struct font_desc *fonts[] = {
#ifdef CONFIG_FONT_8x8
#undef NO_FONTS
&font_vga_8x8,
#endif
#ifdef CONFIG_FONT_8x16
#undef NO_FONTS
&font_vga_8x16,
#endif
#ifdef CONFIG_FONT_6x11
#undef NO_FONTS
&font_vga_6x11,
#endif
#ifdef CONFIG_FONT_7x14
#undef NO_FONTS
&font_7x14,
#endif
#ifdef CONFIG_FONT_SUN8x16
#undef NO_FONTS
&font_sun_8x16,
#endif
#ifdef CONFIG_FONT_SUN12x22
#undef NO_FONTS
&font_sun_12x22,
#endif
#ifdef CONFIG_FONT_10x18
#undef NO_FONTS
&font_10x18,
#endif
#ifdef CONFIG_FONT_ACORN_8x8
#undef NO_FONTS
&font_acorn_8x8,
#endif
#ifdef CONFIG_FONT_PEARL_8x8
#undef NO_FONTS
&font_pearl_8x8,
#endif
#ifdef CONFIG_FONT_MINI_4x6
#undef NO_FONTS
&font_mini_4x6,
#endif
#ifdef CONFIG_FONT_6x10
#undef NO_FONTS
&font_6x10,
#endif
};
#define num_fonts ARRAY_SIZE(fonts)
#ifdef NO_FONTS
#error No fonts configured.
#endif
/**
* find_font - find a font
* @name: string name of a font
*
* Find a specified font with string name @name.
*
* Returns %NULL if no font found, or a pointer to the
* specified font.
*
*/
const struct font_desc *find_font(const char *name)
{
unsigned int i;
for (i = 0; i < num_fonts; i++)
if (!strcmp(fonts[i]->name, name))
return fonts[i];
return NULL;
}
/**
* get_default_font - get default font
* @xres: screen size of X
* @yres: screen size of Y
* @font_w: bit array of supported widths (1 - 32)
* @font_h: bit array of supported heights (1 - 32)
*
* Get the default font for a specified screen size.
* Dimensions are in pixels.
*
* Returns %NULL if no font is found, or a pointer to the
* chosen font.
*
*/
const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
u32 font_h)
{
int i, c, cc;
const struct font_desc *f, *g;
g = NULL;
cc = -10000;
for(i=0; i<num_fonts; i++) {
f = fonts[i];
c = f->pref;
#if defined(__mc68000__)
#ifdef CONFIG_FONT_PEARL_8x8
if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
c = 100;
#endif
#ifdef CONFIG_FONT_6x11
if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
c = 100;
#endif
#endif
if ((yres < 400) == (f->height <= 8))
c += 1000;
if ((font_w & (1 << (f->width - 1))) &&
(font_h & (1 << (f->height - 1))))
c += 1000;
if (c > cc) {
cc = c;
g = f;
}
}
return g;
}
EXPORT_SYMBOL(find_font);
EXPORT_SYMBOL(get_default_font);
MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
MODULE_DESCRIPTION("Console Fonts");
MODULE_LICENSE("GPL");

21
lib/gcd.c Normal file
View file

@ -0,0 +1,21 @@
#include <linux/kernel.h>
#include <linux/gcd.h>
#include <linux/export.h>
/* Greatest common divisor */
unsigned long gcd(unsigned long a, unsigned long b)
{
unsigned long r;
if (a < b)
swap(a, b);
if (!b)
return a;
while ((r = a % b) != 0) {
a = b;
b = r;
}
return b;
}
EXPORT_SYMBOL_GPL(gcd);

140
lib/gen_crc32table.c Normal file
View file

@ -0,0 +1,140 @@
#include <stdio.h>
#include "../include/generated/autoconf.h"
#include "crc32defs.h"
#include <inttypes.h>
#define ENTRIES_PER_LINE 4
#if CRC_LE_BITS > 8
# define LE_TABLE_ROWS (CRC_LE_BITS/8)
# define LE_TABLE_SIZE 256
#else
# define LE_TABLE_ROWS 1
# define LE_TABLE_SIZE (1 << CRC_LE_BITS)
#endif
#if CRC_BE_BITS > 8
# define BE_TABLE_ROWS (CRC_BE_BITS/8)
# define BE_TABLE_SIZE 256
#else
# define BE_TABLE_ROWS 1
# define BE_TABLE_SIZE (1 << CRC_BE_BITS)
#endif
static uint32_t crc32table_le[LE_TABLE_ROWS][256];
static uint32_t crc32table_be[BE_TABLE_ROWS][256];
static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
/**
* crc32init_le() - allocate and initialize LE table data
*
* crc is the crc of the byte i; other entries are filled in based on the
* fact that crctable[i^j] = crctable[i] ^ crctable[j].
*
*/
static void crc32init_le_generic(const uint32_t polynomial,
uint32_t (*tab)[256])
{
unsigned i, j;
uint32_t crc = 1;
tab[0][0] = 0;
for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) {
crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
tab[0][i + j] = crc ^ tab[0][j];
}
for (i = 0; i < LE_TABLE_SIZE; i++) {
crc = tab[0][i];
for (j = 1; j < LE_TABLE_ROWS; j++) {
crc = tab[0][crc & 0xff] ^ (crc >> 8);
tab[j][i] = crc;
}
}
}
static void crc32init_le(void)
{
crc32init_le_generic(CRCPOLY_LE, crc32table_le);
}
static void crc32cinit_le(void)
{
crc32init_le_generic(CRC32C_POLY_LE, crc32ctable_le);
}
/**
* crc32init_be() - allocate and initialize BE table data
*/
static void crc32init_be(void)
{
unsigned i, j;
uint32_t crc = 0x80000000;
crc32table_be[0][0] = 0;
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
for (j = 0; j < i; j++)
crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
}
for (i = 0; i < BE_TABLE_SIZE; i++) {
crc = crc32table_be[0][i];
for (j = 1; j < BE_TABLE_ROWS; j++) {
crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
crc32table_be[j][i] = crc;
}
}
}
static void output_table(uint32_t (*table)[256], int rows, int len, char *trans)
{
int i, j;
for (j = 0 ; j < rows; j++) {
printf("{");
for (i = 0; i < len - 1; i++) {
if (i % ENTRIES_PER_LINE == 0)
printf("\n");
printf("%s(0x%8.8xL), ", trans, table[j][i]);
}
printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
}
}
int main(int argc, char** argv)
{
printf("/* this file is generated - do not edit */\n\n");
if (CRC_LE_BITS > 1) {
crc32init_le();
printf("static u32 __cacheline_aligned "
"crc32table_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32table_le, LE_TABLE_ROWS,
LE_TABLE_SIZE, "tole");
printf("};\n");
}
if (CRC_BE_BITS > 1) {
crc32init_be();
printf("static u32 __cacheline_aligned "
"crc32table_be[%d][%d] = {",
BE_TABLE_ROWS, BE_TABLE_SIZE);
output_table(crc32table_be, LE_TABLE_ROWS,
BE_TABLE_SIZE, "tobe");
printf("};\n");
}
if (CRC_LE_BITS > 1) {
crc32cinit_le();
printf("static u32 __cacheline_aligned "
"crc32ctable_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32ctable_le, LE_TABLE_ROWS,
LE_TABLE_SIZE, "tole");
printf("};\n");
}
return 0;
}

647
lib/genalloc.c Normal file
View file

@ -0,0 +1,647 @@
/*
* Basic general purpose allocator for managing special purpose
* memory, for example, memory that is not managed by the regular
* kmalloc/kfree interface. Uses for this includes on-device special
* memory, uncached memory etc.
*
* It is safe to use the allocator in NMI handlers and other special
* unblockable contexts that could otherwise deadlock on locks. This
* is implemented by using atomic operations and retries on any
* conflicts. The disadvantage is that there may be livelocks in
* extreme cases. For better scalability, one allocator can be used
* for each CPU.
*
* The lockless operation only works if there is enough memory
* available. If new memory is added to the pool a lock has to be
* still taken. So any user relying on locklessness has to ensure
* that sufficient memory is preallocated.
*
* The basic atomic operation of this allocator is cmpxchg on long.
* On architectures that don't have NMI-safe cmpxchg implementation,
* the allocator can NOT be used in NMI handler. So code uses the
* allocator in NMI handler should depend on
* CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
*
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/bitmap.h>
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/genalloc.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
{
return chunk->end_addr - chunk->start_addr + 1;
}
static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
{
unsigned long val, nval;
nval = *addr;
do {
val = nval;
if (val & mask_to_set)
return -EBUSY;
cpu_relax();
} while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
return 0;
}
static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
{
unsigned long val, nval;
nval = *addr;
do {
val = nval;
if ((val & mask_to_clear) != mask_to_clear)
return -EBUSY;
cpu_relax();
} while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
return 0;
}
/*
* bitmap_set_ll - set the specified number of bits at the specified position
* @map: pointer to a bitmap
* @start: a bit position in @map
* @nr: number of bits to set
*
* Set @nr bits start from @start in @map lock-lessly. Several users
* can set/clear the same bitmap simultaneously without lock. If two
* users set the same bit, one user will return remain bits, otherwise
* return 0.
*/
static int bitmap_set_ll(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_set >= 0) {
if (set_bits_ll(p, mask_to_set))
return nr;
nr -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
if (nr) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
if (set_bits_ll(p, mask_to_set))
return nr;
}
return 0;
}
/*
* bitmap_clear_ll - clear the specified number of bits at the specified position
* @map: pointer to a bitmap
* @start: a bit position in @map
* @nr: number of bits to set
*
* Clear @nr bits start from @start in @map lock-lessly. Several users
* can set/clear the same bitmap simultaneously without lock. If two
* users clear the same bit, one user will return remain bits,
* otherwise return 0.
*/
static int bitmap_clear_ll(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_clear >= 0) {
if (clear_bits_ll(p, mask_to_clear))
return nr;
nr -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (nr) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
if (clear_bits_ll(p, mask_to_clear))
return nr;
}
return 0;
}
/**
* gen_pool_create - create a new special memory pool
* @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
* @nid: node id of the node the pool structure should be allocated on, or -1
*
* Create a new special memory pool that can be used to manage special purpose
* memory not managed by the regular kmalloc/kfree interface.
*/
struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
{
struct gen_pool *pool;
pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
if (pool != NULL) {
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->chunks);
pool->min_alloc_order = min_alloc_order;
pool->algo = gen_pool_first_fit;
pool->data = NULL;
}
return pool;
}
EXPORT_SYMBOL(gen_pool_create);
/**
* gen_pool_add_virt - add a new chunk of special memory to the pool
* @pool: pool to add new memory chunk to
* @virt: virtual starting address of memory chunk to add to pool
* @phys: physical starting address of memory chunk to add to pool
* @size: size in bytes of the memory chunk to add to pool
* @nid: node id of the node the chunk structure and bitmap should be
* allocated on, or -1
*
* Add a new chunk of special memory to the specified pool.
*
* Returns 0 on success or a -ve errno on failure.
*/
int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
size_t size, int nid)
{
struct gen_pool_chunk *chunk;
int nbits = size >> pool->min_alloc_order;
int nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
if (unlikely(chunk == NULL))
return -ENOMEM;
chunk->phys_addr = phys;
chunk->start_addr = virt;
chunk->end_addr = virt + size - 1;
atomic_set(&chunk->avail, size);
spin_lock(&pool->lock);
list_add_rcu(&chunk->next_chunk, &pool->chunks);
spin_unlock(&pool->lock);
return 0;
}
EXPORT_SYMBOL(gen_pool_add_virt);
/**
* gen_pool_virt_to_phys - return the physical address of memory
* @pool: pool to allocate from
* @addr: starting address of memory
*
* Returns the physical address on success, or -1 on error.
*/
phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
{
struct gen_pool_chunk *chunk;
phys_addr_t paddr = -1;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
paddr = chunk->phys_addr + (addr - chunk->start_addr);
break;
}
}
rcu_read_unlock();
return paddr;
}
EXPORT_SYMBOL(gen_pool_virt_to_phys);
/**
* gen_pool_destroy - destroy a special memory pool
* @pool: pool to destroy
*
* Destroy the specified special memory pool. Verifies that there are no
* outstanding allocations.
*/
void gen_pool_destroy(struct gen_pool *pool)
{
struct list_head *_chunk, *_next_chunk;
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
int bit, end_bit;
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
list_del(&chunk->next_chunk);
end_bit = chunk_size(chunk) >> order;
bit = find_next_bit(chunk->bits, end_bit, 0);
BUG_ON(bit < end_bit);
kfree(chunk);
}
kfree(pool);
return;
}
EXPORT_SYMBOL(gen_pool_destroy);
/**
* gen_pool_alloc - allocate special memory from the pool
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
*
* Allocate the requested number of bytes from the specified pool.
* Uses the pool allocation function (with first-fit algorithm by default).
* Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
*/
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
int nbits, start_bit = 0, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
#endif
if (size == 0)
return 0;
nbits = (size + (1UL << order) - 1) >> order;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
if (size > atomic_read(&chunk->avail))
continue;
end_bit = chunk_size(chunk) >> order;
retry:
start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
pool->data);
if (start_bit >= end_bit)
continue;
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
if (remain) {
remain = bitmap_clear_ll(chunk->bits, start_bit,
nbits - remain);
BUG_ON(remain);
goto retry;
}
addr = chunk->start_addr + ((unsigned long)start_bit << order);
size = nbits << order;
atomic_sub(size, &chunk->avail);
break;
}
rcu_read_unlock();
return addr;
}
EXPORT_SYMBOL(gen_pool_alloc);
/**
* gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
* @dma: dma-view physical address return value. Use NULL if unneeded.
*
* Allocate the requested number of bytes from the specified pool.
* Uses the pool allocation function (with first-fit algorithm by default).
* Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
*/
void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
{
unsigned long vaddr;
if (!pool)
return NULL;
vaddr = gen_pool_alloc(pool, size);
if (!vaddr)
return NULL;
if (dma)
*dma = gen_pool_virt_to_phys(pool, vaddr);
return (void *)vaddr;
}
EXPORT_SYMBOL(gen_pool_dma_alloc);
/**
* gen_pool_free - free allocated special memory back to the pool
* @pool: pool to free to
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
*
* Free previously allocated special memory back to the specified
* pool. Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
*/
void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
{
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
int start_bit, nbits, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
#endif
nbits = (size + (1UL << order) - 1) >> order;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
BUG_ON(addr + size - 1 > chunk->end_addr);
start_bit = (addr - chunk->start_addr) >> order;
remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
BUG_ON(remain);
size = nbits << order;
atomic_add(size, &chunk->avail);
rcu_read_unlock();
return;
}
}
rcu_read_unlock();
BUG();
}
EXPORT_SYMBOL(gen_pool_free);
/**
* gen_pool_for_each_chunk - call func for every chunk of generic memory pool
* @pool: the generic memory pool
* @func: func to call
* @data: additional data used by @func
*
* Call @func for every chunk of generic memory pool. The @func is
* called with rcu_read_lock held.
*/
void gen_pool_for_each_chunk(struct gen_pool *pool,
void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
void *data)
{
struct gen_pool_chunk *chunk;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
func(pool, chunk, data);
rcu_read_unlock();
}
EXPORT_SYMBOL(gen_pool_for_each_chunk);
/**
* addr_in_gen_pool - checks if an address falls within the range of a pool
* @pool: the generic memory pool
* @start: start address
* @size: size of the region
*
* Check if the range of addresses falls within the specified pool. Returns
* true if the entire range is contained in the pool and false otherwise.
*/
bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
size_t size)
{
bool found = false;
unsigned long end = start + size;
struct gen_pool_chunk *chunk;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
if (start >= chunk->start_addr && start <= chunk->end_addr) {
if (end <= chunk->end_addr) {
found = true;
break;
}
}
}
rcu_read_unlock();
return found;
}
/**
* gen_pool_avail - get available free space of the pool
* @pool: pool to get available free space
*
* Return available free space of the specified pool.
*/
size_t gen_pool_avail(struct gen_pool *pool)
{
struct gen_pool_chunk *chunk;
size_t avail = 0;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
avail += atomic_read(&chunk->avail);
rcu_read_unlock();
return avail;
}
EXPORT_SYMBOL_GPL(gen_pool_avail);
/**
* gen_pool_size - get size in bytes of memory managed by the pool
* @pool: pool to get size
*
* Return size in bytes of memory managed by the pool.
*/
size_t gen_pool_size(struct gen_pool *pool)
{
struct gen_pool_chunk *chunk;
size_t size = 0;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
size += chunk_size(chunk);
rcu_read_unlock();
return size;
}
EXPORT_SYMBOL_GPL(gen_pool_size);
/**
* gen_pool_set_algo - set the allocation algorithm
* @pool: pool to change allocation algorithm
* @algo: custom algorithm function
* @data: additional data used by @algo
*
* Call @algo for each memory allocation in the pool.
* If @algo is NULL use gen_pool_first_fit as default
* memory allocation function.
*/
void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
{
rcu_read_lock();
pool->algo = algo;
if (!pool->algo)
pool->algo = gen_pool_first_fit;
pool->data = data;
rcu_read_unlock();
}
EXPORT_SYMBOL(gen_pool_set_algo);
/**
* gen_pool_first_fit - find the first available region
* of memory matching the size requirement (no alignment constraint)
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
*/
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data)
{
return bitmap_find_next_zero_area(map, size, start, nr, 0);
}
EXPORT_SYMBOL(gen_pool_first_fit);
/**
* gen_pool_first_fit_order_align - find the first available region
* of memory matching the size requirement. The region will be aligned
* to the order of the size specified.
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
*/
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start,
unsigned int nr, void *data)
{
unsigned long align_mask = roundup_pow_of_two(nr) - 1;
return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
}
EXPORT_SYMBOL(gen_pool_first_fit_order_align);
/**
* gen_pool_best_fit - find the best fitting region of memory
* macthing the size requirement (no alignment constraint)
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
*
* Iterate over the bitmap to find the smallest free region
* which we can allocate the memory.
*/
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data)
{
unsigned long start_bit = size;
unsigned long len = size + 1;
unsigned long index;
index = bitmap_find_next_zero_area(map, size, start, nr, 0);
while (index < size) {
int next_bit = find_next_bit(map, size, index + nr);
if ((next_bit - index) < len) {
len = next_bit - index;
start_bit = index;
if (len == nr)
return start_bit;
}
index = bitmap_find_next_zero_area(map, size,
next_bit + 1, nr, 0);
}
return start_bit;
}
EXPORT_SYMBOL(gen_pool_best_fit);
static void devm_gen_pool_release(struct device *dev, void *res)
{
gen_pool_destroy(*(struct gen_pool **)res);
}
/**
* devm_gen_pool_create - managed gen_pool_create
* @dev: device that provides the gen_pool
* @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
* @nid: node id of the node the pool structure should be allocated on, or -1
*
* Create a new special memory pool that can be used to manage special purpose
* memory not managed by the regular kmalloc/kfree interface. The pool will be
* automatically destroyed by the device management code.
*/
struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
int nid)
{
struct gen_pool **ptr, *pool;
ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
pool = gen_pool_create(min_alloc_order, nid);
if (pool) {
*ptr = pool;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return pool;
}
EXPORT_SYMBOL(devm_gen_pool_create);
/**
* dev_get_gen_pool - Obtain the gen_pool (if any) for a device
* @dev: device to retrieve the gen_pool from
*
* Returns the gen_pool for the device if one is present, or NULL.
*/
struct gen_pool *dev_get_gen_pool(struct device *dev)
{
struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
NULL);
if (!p)
return NULL;
return *p;
}
EXPORT_SYMBOL_GPL(dev_get_gen_pool);
#ifdef CONFIG_OF
/**
* of_get_named_gen_pool - find a pool by phandle property
* @np: device node
* @propname: property name containing phandle(s)
* @index: index into the phandle array
*
* Returns the pool that contains the chunk starting at the physical
* address of the device tree node pointed at by the phandle property,
* or NULL if not found.
*/
struct gen_pool *of_get_named_gen_pool(struct device_node *np,
const char *propname, int index)
{
struct platform_device *pdev;
struct device_node *np_pool;
np_pool = of_parse_phandle(np, propname, index);
if (!np_pool)
return NULL;
pdev = of_find_device_by_node(np_pool);
of_node_put(np_pool);
if (!pdev)
return NULL;
return dev_get_gen_pool(&pdev->dev);
}
EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
#endif /* CONFIG_OF */

287
lib/glob.c Normal file
View file

@ -0,0 +1,287 @@
#include <linux/module.h>
#include <linux/glob.h>
/*
* The only reason this code can be compiled as a module is because the
* ATA code that depends on it can be as well. In practice, they're
* both usually compiled in and the module overhead goes away.
*/
MODULE_DESCRIPTION("glob(7) matching");
MODULE_LICENSE("Dual MIT/GPL");
/**
* glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0)
* @pat: Shell-style pattern to match, e.g. "*.[ch]".
* @str: String to match. The pattern must match the entire string.
*
* Perform shell-style glob matching, returning true (1) if the match
* succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0).
*
* Pattern metacharacters are ?, *, [ and \.
* (And, inside character classes, !, - and ].)
*
* This is small and simple implementation intended for device blacklists
* where a string is matched against a number of patterns. Thus, it
* does not preprocess the patterns. It is non-recursive, and run-time
* is at most quadratic: strlen(@str)*strlen(@pat).
*
* An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa");
* it takes 6 passes over the pattern before matching the string.
*
* Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT
* treat / or leading . specially; it isn't actually used for pathnames.
*
* Note that according to glob(7) (and unlike bash), character classes
* are complemented by a leading !; this does not support the regex-style
* [^a-z] syntax.
*
* An opening bracket without a matching close is matched literally.
*/
bool __pure glob_match(char const *pat, char const *str)
{
/*
* Backtrack to previous * on mismatch and retry starting one
* character later in the string. Because * matches all characters
* (no exception for /), it can be easily proved that there's
* never a need to backtrack multiple levels.
*/
char const *back_pat = NULL, *back_str = back_str;
/*
* Loop over each token (character or class) in pat, matching
* it against the remaining unmatched tail of str. Return false
* on mismatch, or true after matching the trailing nul bytes.
*/
for (;;) {
unsigned char c = *str++;
unsigned char d = *pat++;
switch (d) {
case '?': /* Wildcard: anything but nul */
if (c == '\0')
return false;
break;
case '*': /* Any-length wildcard */
if (*pat == '\0') /* Optimize trailing * case */
return true;
back_pat = pat;
back_str = --str; /* Allow zero-length match */
break;
case '[': { /* Character class */
bool match = false, inverted = (*pat == '!');
char const *class = pat + inverted;
unsigned char a = *class++;
/*
* Iterate over each span in the character class.
* A span is either a single character a, or a
* range a-b. The first span may begin with ']'.
*/
do {
unsigned char b = a;
if (a == '\0') /* Malformed */
goto literal;
if (class[0] == '-' && class[1] != ']') {
b = class[1];
if (b == '\0')
goto literal;
class += 2;
/* Any special action if a > b? */
}
match |= (a <= c && c <= b);
} while ((a = *class++) != ']');
if (match == inverted)
goto backtrack;
pat = class;
}
break;
case '\\':
d = *pat++;
/*FALLTHROUGH*/
default: /* Literal character */
literal:
if (c == d) {
if (d == '\0')
return true;
break;
}
backtrack:
if (c == '\0' || !back_pat)
return false; /* No point continuing */
/* Try again from last *, one character later in str. */
pat = back_pat;
str = ++back_str;
break;
}
}
}
EXPORT_SYMBOL(glob_match);
#ifdef CONFIG_GLOB_SELFTEST
#include <linux/printk.h>
#include <linux/moduleparam.h>
/* Boot with "glob.verbose=1" to show successful tests, too */
static bool verbose = false;
module_param(verbose, bool, 0);
struct glob_test {
char const *pat, *str;
bool expected;
};
static bool __pure __init test(char const *pat, char const *str, bool expected)
{
bool match = glob_match(pat, str);
bool success = match == expected;
/* Can't get string literals into a particular section, so... */
static char const msg_error[] __initconst =
KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
static char const msg_ok[] __initconst =
KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
static char const mismatch[] __initconst = "mismatch";
char const *message;
if (!success)
message = msg_error;
else if (verbose)
message = msg_ok;
else
return success;
printk(message, pat, str, mismatch + 3*match);
return success;
}
/*
* The tests are all jammed together in one array to make it simpler
* to place that array in the .init.rodata section. The obvious
* "array of structures containing char *" has no way to force the
* pointed-to strings to be in a particular section.
*
* Anyway, a test consists of:
* 1. Expected glob_match result: '1' or '0'.
* 2. Pattern to match: null-terminated string
* 3. String to match against: null-terminated string
*
* The list of tests is terminated with a final '\0' instead of
* a glob_match result character.
*/
static char const glob_tests[] __initconst =
/* Some basic tests */
"1" "a\0" "a\0"
"0" "a\0" "b\0"
"0" "a\0" "aa\0"
"0" "a\0" "\0"
"1" "\0" "\0"
"0" "\0" "a\0"
/* Simple character class tests */
"1" "[a]\0" "a\0"
"0" "[a]\0" "b\0"
"0" "[!a]\0" "a\0"
"1" "[!a]\0" "b\0"
"1" "[ab]\0" "a\0"
"1" "[ab]\0" "b\0"
"0" "[ab]\0" "c\0"
"1" "[!ab]\0" "c\0"
"1" "[a-c]\0" "b\0"
"0" "[a-c]\0" "d\0"
/* Corner cases in character class parsing */
"1" "[a-c-e-g]\0" "-\0"
"0" "[a-c-e-g]\0" "d\0"
"1" "[a-c-e-g]\0" "f\0"
"1" "[]a-ceg-ik[]\0" "a\0"
"1" "[]a-ceg-ik[]\0" "]\0"
"1" "[]a-ceg-ik[]\0" "[\0"
"1" "[]a-ceg-ik[]\0" "h\0"
"0" "[]a-ceg-ik[]\0" "f\0"
"0" "[!]a-ceg-ik[]\0" "h\0"
"0" "[!]a-ceg-ik[]\0" "]\0"
"1" "[!]a-ceg-ik[]\0" "f\0"
/* Simple wild cards */
"1" "?\0" "a\0"
"0" "?\0" "aa\0"
"0" "??\0" "a\0"
"1" "?x?\0" "axb\0"
"0" "?x?\0" "abx\0"
"0" "?x?\0" "xab\0"
/* Asterisk wild cards (backtracking) */
"0" "*??\0" "a\0"
"1" "*??\0" "ab\0"
"1" "*??\0" "abc\0"
"1" "*??\0" "abcd\0"
"0" "??*\0" "a\0"
"1" "??*\0" "ab\0"
"1" "??*\0" "abc\0"
"1" "??*\0" "abcd\0"
"0" "?*?\0" "a\0"
"1" "?*?\0" "ab\0"
"1" "?*?\0" "abc\0"
"1" "?*?\0" "abcd\0"
"1" "*b\0" "b\0"
"1" "*b\0" "ab\0"
"0" "*b\0" "ba\0"
"1" "*b\0" "bb\0"
"1" "*b\0" "abb\0"
"1" "*b\0" "bab\0"
"1" "*bc\0" "abbc\0"
"1" "*bc\0" "bc\0"
"1" "*bc\0" "bbc\0"
"1" "*bc\0" "bcbc\0"
/* Multiple asterisks (complex backtracking) */
"1" "*ac*\0" "abacadaeafag\0"
"1" "*ac*ae*ag*\0" "abacadaeafag\0"
"1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
"0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
"1" "*abcd*\0" "abcabcabcabcdefg\0"
"1" "*ab*cd*\0" "abcabcabcabcdefg\0"
"1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
"0" "*abcd*\0" "abcabcabcabcefg\0"
"0" "*ab*cd*\0" "abcabcabcabcefg\0";
static int __init glob_init(void)
{
unsigned successes = 0;
unsigned n = 0;
char const *p = glob_tests;
static char const message[] __initconst =
KERN_INFO "glob: %u self-tests passed, %u failed\n";
/*
* Tests are jammed together in a string. The first byte is '1'
* or '0' to indicate the expected outcome, or '\0' to indicate the
* end of the tests. Then come two null-terminated strings: the
* pattern and the string to match it against.
*/
while (*p) {
bool expected = *p++ & 1;
char const *pat = p;
p += strlen(p) + 1;
successes += test(pat, p, expected);
p += strlen(p) + 1;
n++;
}
n -= successes;
printk(message, successes, n);
/* What's the errno for "kernel bug detected"? Guess... */
return n ? -ECANCELED : 0;
}
/* We need a dummy exit function to allow unload */
static void __exit glob_fini(void) { }
module_init(glob_init);
module_exit(glob_fini);
#endif /* CONFIG_GLOB_SELFTEST */

66
lib/halfmd4.c Normal file
View file

@ -0,0 +1,66 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/cryptohash.h>
/* F, G and H are basic MD4 functions: selection, majority, parity */
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
/*
* The generic round function. The application is so specific that
* we don't bother protecting all the arguments with parens, as is generally
* good macro practice, in favor of extra legibility.
* Rotation is separate from addition to prevent recomputation
*/
#define ROUND(f, a, b, c, d, x, s) \
(a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
#define K1 0
#define K2 013240474631UL
#define K3 015666365641UL
/*
* Basic cut-down MD4 transform. Returns only 32 bits of result.
*/
__u32 half_md4_transform(__u32 buf[4], __u32 const in[8])
{
__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
/* Round 1 */
ROUND(F, a, b, c, d, in[0] + K1, 3);
ROUND(F, d, a, b, c, in[1] + K1, 7);
ROUND(F, c, d, a, b, in[2] + K1, 11);
ROUND(F, b, c, d, a, in[3] + K1, 19);
ROUND(F, a, b, c, d, in[4] + K1, 3);
ROUND(F, d, a, b, c, in[5] + K1, 7);
ROUND(F, c, d, a, b, in[6] + K1, 11);
ROUND(F, b, c, d, a, in[7] + K1, 19);
/* Round 2 */
ROUND(G, a, b, c, d, in[1] + K2, 3);
ROUND(G, d, a, b, c, in[3] + K2, 5);
ROUND(G, c, d, a, b, in[5] + K2, 9);
ROUND(G, b, c, d, a, in[7] + K2, 13);
ROUND(G, a, b, c, d, in[0] + K2, 3);
ROUND(G, d, a, b, c, in[2] + K2, 5);
ROUND(G, c, d, a, b, in[4] + K2, 9);
ROUND(G, b, c, d, a, in[6] + K2, 13);
/* Round 3 */
ROUND(H, a, b, c, d, in[3] + K3, 3);
ROUND(H, d, a, b, c, in[7] + K3, 9);
ROUND(H, c, d, a, b, in[2] + K3, 11);
ROUND(H, b, c, d, a, in[6] + K3, 15);
ROUND(H, a, b, c, d, in[1] + K3, 3);
ROUND(H, d, a, b, c, in[5] + K3, 9);
ROUND(H, c, d, a, b, in[0] + K3, 11);
ROUND(H, b, c, d, a, in[4] + K3, 15);
buf[0] += a;
buf[1] += b;
buf[2] += c;
buf[3] += d;
return buf[1]; /* "most hashed" word */
}
EXPORT_SYMBOL(half_md4_transform);

39
lib/hash.c Normal file
View file

@ -0,0 +1,39 @@
/* General purpose hashing library
*
* That's a start of a kernel hashing library, which can be extended
* with further algorithms in future. arch_fast_hash{2,}() will
* eventually resolve to an architecture optimized implementation.
*
* Copyright 2013 Francesco Fusco <ffusco@redhat.com>
* Copyright 2013 Daniel Borkmann <dborkman@redhat.com>
* Copyright 2013 Thomas Graf <tgraf@redhat.com>
* Licensed under the GNU General Public License, version 2.0 (GPLv2)
*/
#include <linux/jhash.h>
#include <linux/hash.h>
#include <linux/cache.h>
static struct fast_hash_ops arch_hash_ops __read_mostly = {
.hash = jhash,
.hash2 = jhash2,
};
u32 arch_fast_hash(const void *data, u32 len, u32 seed)
{
return arch_hash_ops.hash(data, len, seed);
}
EXPORT_SYMBOL_GPL(arch_fast_hash);
u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
{
return arch_hash_ops.hash2(data, len, seed);
}
EXPORT_SYMBOL_GPL(arch_fast_hash2);
static int __init hashlib_init(void)
{
setup_arch_fast_hash(&arch_hash_ops);
return 0;
}
early_initcall(hashlib_init);

269
lib/hexdump.c Normal file
View file

@ -0,0 +1,269 @@
/*
* lib/hexdump.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*/
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/export.h>
const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
const char hex_asc_upper[] = "0123456789ABCDEF";
EXPORT_SYMBOL(hex_asc_upper);
/**
* hex_to_bin - convert a hex digit to its real value
* @ch: ascii character represents hex digit
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
*/
int hex_to_bin(char ch)
{
if ((ch >= '0') && (ch <= '9'))
return ch - '0';
ch = tolower(ch);
if ((ch >= 'a') && (ch <= 'f'))
return ch - 'a' + 10;
return -1;
}
EXPORT_SYMBOL(hex_to_bin);
/**
* hex2bin - convert an ascii hexadecimal string to its binary representation
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
*
* Return 0 on success, -1 in case of bad input.
*/
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
int hi = hex_to_bin(*src++);
int lo = hex_to_bin(*src++);
if ((hi < 0) || (lo < 0))
return -1;
*dst++ = (hi << 4) | lo;
}
return 0;
}
EXPORT_SYMBOL(hex2bin);
/**
* bin2hex - convert binary data to an ascii hexadecimal string
* @dst: ascii hexadecimal result
* @src: binary data
* @count: binary data length
*/
char *bin2hex(char *dst, const void *src, size_t count)
{
const unsigned char *_src = src;
while (count--)
dst = hex_byte_pack(dst, *_src++);
return dst;
}
EXPORT_SYMBOL(bin2hex);
/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @linebuf: where to put the converted data
* @linebuflen: total size of @linebuf, including space for terminating NUL
* @ascii: include ASCII after the hex output
*
* hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
*
* Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
* to a hex + ASCII dump at the supplied memory location.
* The converted output is always NUL-terminated.
*
* E.g.:
* hex_dump_to_buffer(frame->data, frame->len, 16, 1,
* linebuf, sizeof(linebuf), true);
*
* example output buffer:
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
*/
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii)
{
const u8 *ptr = buf;
u8 ch;
int j, lx = 0;
int ascii_column;
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
if (!len)
goto nil;
if (len > rowsize) /* limit to one line at a time */
len = rowsize;
if ((len % groupsize) != 0) /* no mixed size output */
groupsize = 1;
switch (groupsize) {
case 8: {
const u64 *ptr8 = buf;
int ngroups = len / groupsize;
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%16.16llx", j ? " " : "",
(unsigned long long)*(ptr8 + j));
ascii_column = 17 * ngroups + 2;
break;
}
case 4: {
const u32 *ptr4 = buf;
int ngroups = len / groupsize;
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "", *(ptr4 + j));
ascii_column = 9 * ngroups + 2;
break;
}
case 2: {
const u16 *ptr2 = buf;
int ngroups = len / groupsize;
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "", *(ptr2 + j));
ascii_column = 5 * ngroups + 2;
break;
}
default:
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = hex_asc_hi(ch);
linebuf[lx++] = hex_asc_lo(ch);
linebuf[lx++] = ' ';
}
if (j)
lx--;
ascii_column = 3 * rowsize + 2;
break;
}
if (!ascii)
goto nil;
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
linebuf[lx++] = ' ';
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
}
nil:
linebuf[lx++] = '\0';
}
EXPORT_SYMBOL(hex_dump_to_buffer);
#ifdef CONFIG_PRINTK
/**
* print_hex_dump - print a text hex dump to syslog for a binary blob of data
* @level: kernel log level (e.g. KERN_DEBUG)
* @prefix_str: string to prefix each line with;
* caller supplies trailing spaces for alignment if desired
* @prefix_type: controls whether prefix of an offset, address, or none
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @ascii: include ASCII after the hex output
*
* Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
* to the kernel log at the specified kernel log level, with an optional
* leading prefix.
*
* print_hex_dump() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
* print_hex_dump() iterates over the entire input @buf, breaking it into
* "line size" chunks to format and print.
*
* E.g.:
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
* 16, 1, frame->data, frame->len, true);
*
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
* Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
*/
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
const u8 *ptr = buf;
int i, linelen, remaining = len;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
for (i = 0; i < len; i += rowsize) {
linelen = min(remaining, rowsize);
remaining -= rowsize;
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
linebuf, sizeof(linebuf), ascii);
switch (prefix_type) {
case DUMP_PREFIX_ADDRESS:
printk("%s%s%p: %s\n",
level, prefix_str, ptr + i, linebuf);
break;
case DUMP_PREFIX_OFFSET:
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
break;
default:
printk("%s%s%s\n", level, prefix_str, linebuf);
break;
}
}
}
EXPORT_SYMBOL(print_hex_dump);
#if !defined(CONFIG_DYNAMIC_DEBUG)
/**
* print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
* @prefix_str: string to prefix each line with;
* caller supplies trailing spaces for alignment if desired
* @prefix_type: controls whether prefix of an offset, address, or none
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
* @buf: data blob to dump
* @len: number of bytes in the @buf
*
* Calls print_hex_dump(), with log level of KERN_DEBUG,
* rowsize of 16, groupsize of 1, and ASCII output included.
*/
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
const void *buf, size_t len)
{
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
buf, len, true);
}
EXPORT_SYMBOL(print_hex_dump_bytes);
#endif /* !defined(CONFIG_DYNAMIC_DEBUG) */
#endif /* defined(CONFIG_PRINTK) */

67
lib/hweight.c Normal file
View file

@ -0,0 +1,67 @@
#include <linux/export.h>
#include <linux/bitops.h>
#include <asm/types.h>
/**
* hweightN - returns the hamming weight of a N-bit word
* @x: the word to weigh
*
* The Hamming Weight of a number is the total number of bits set in it.
*/
unsigned int __sw_hweight32(unsigned int w)
{
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
return (w * 0x01010101) >> 24;
#else
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
#endif
}
EXPORT_SYMBOL(__sw_hweight32);
unsigned int __sw_hweight16(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res + (res >> 4)) & 0x0F0F;
return (res + (res >> 8)) & 0x00FF;
}
EXPORT_SYMBOL(__sw_hweight16);
unsigned int __sw_hweight8(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55);
res = (res & 0x33) + ((res >> 2) & 0x33);
return (res + (res >> 4)) & 0x0F;
}
EXPORT_SYMBOL(__sw_hweight8);
unsigned long __sw_hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
return __sw_hweight32((unsigned int)(w >> 32)) +
__sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
return (w * 0x0101010101010101ul) >> 56;
#else
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
res = res + (res >> 8);
res = res + (res >> 16);
return (res + (res >> 32)) & 0x00000000000000FFul;
#endif
#endif
}
EXPORT_SYMBOL(__sw_hweight64);

1148
lib/idr.c Normal file

File diff suppressed because it is too large Load diff

1309
lib/inflate.c Normal file

File diff suppressed because it is too large Load diff

38
lib/int_sqrt.c Normal file
View file

@ -0,0 +1,38 @@
/*
* Copyright (C) 2013 Davidlohr Bueso <davidlohr.bueso@hp.com>
*
* Based on the shift-and-subtract algorithm for computing integer
* square root from Guy L. Steele.
*/
#include <linux/kernel.h>
#include <linux/export.h>
/**
* int_sqrt - rough approximation to sqrt
* @x: integer of which to calculate the sqrt
*
* A very rough approximation to the sqrt() function.
*/
unsigned long int_sqrt(unsigned long x)
{
unsigned long b, m, y = 0;
if (x <= 1)
return x;
m = 1UL << (BITS_PER_LONG - 2);
while (m != 0) {
b = y + m;
y >>= 1;
if (x >= b) {
x -= b;
y += m;
}
m >>= 2;
}
return y;
}
EXPORT_SYMBOL(int_sqrt);

16
lib/interval_tree.c Normal file
View file

@ -0,0 +1,16 @@
#include <linux/init.h>
#include <linux/interval_tree.h>
#include <linux/interval_tree_generic.h>
#include <linux/module.h>
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
unsigned long, __subtree_last,
START, LAST,, interval_tree)
EXPORT_SYMBOL_GPL(interval_tree_insert);
EXPORT_SYMBOL_GPL(interval_tree_remove);
EXPORT_SYMBOL_GPL(interval_tree_iter_first);
EXPORT_SYMBOL_GPL(interval_tree_iter_next);

106
lib/interval_tree_test.c Normal file
View file

@ -0,0 +1,106 @@
#include <linux/module.h>
#include <linux/interval_tree.h>
#include <linux/random.h>
#include <asm/timex.h>
#define NODES 100
#define PERF_LOOPS 100000
#define SEARCHES 100
#define SEARCH_LOOPS 10000
static struct rb_root root = RB_ROOT;
static struct interval_tree_node nodes[NODES];
static u32 queries[SEARCHES];
static struct rnd_state rnd;
static inline unsigned long
search(unsigned long query, struct rb_root *root)
{
struct interval_tree_node *node;
unsigned long results = 0;
for (node = interval_tree_iter_first(root, query, query); node;
node = interval_tree_iter_next(node, query, query))
results++;
return results;
}
static void init(void)
{
int i;
for (i = 0; i < NODES; i++) {
u32 a = prandom_u32_state(&rnd);
u32 b = prandom_u32_state(&rnd);
if (a <= b) {
nodes[i].start = a;
nodes[i].last = b;
} else {
nodes[i].start = b;
nodes[i].last = a;
}
}
for (i = 0; i < SEARCHES; i++)
queries[i] = prandom_u32_state(&rnd);
}
static int interval_tree_test_init(void)
{
int i, j;
unsigned long results;
cycles_t time1, time2, time;
printk(KERN_ALERT "interval tree insert/remove");
prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
for (i = 0; i < PERF_LOOPS; i++) {
for (j = 0; j < NODES; j++)
interval_tree_insert(nodes + j, &root);
for (j = 0; j < NODES; j++)
interval_tree_remove(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, PERF_LOOPS);
printk(" -> %llu cycles\n", (unsigned long long)time);
printk(KERN_ALERT "interval tree search");
for (j = 0; j < NODES; j++)
interval_tree_insert(nodes + j, &root);
time1 = get_cycles();
results = 0;
for (i = 0; i < SEARCH_LOOPS; i++)
for (j = 0; j < SEARCHES; j++)
results += search(queries[j], &root);
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, SEARCH_LOOPS);
results = div_u64(results, SEARCH_LOOPS);
printk(" -> %llu cycles (%lu results)\n",
(unsigned long long)time, results);
return -EAGAIN; /* Fail will directly unload the module */
}
static void interval_tree_test_exit(void)
{
printk(KERN_ALERT "test exit\n");
}
module_init(interval_tree_test_init)
module_exit(interval_tree_test_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michel Lespinasse");
MODULE_DESCRIPTION("Interval Tree test");

252
lib/iomap.c Normal file
View file

@ -0,0 +1,252 @@
/*
* Implement the default iomap interfaces
*
* (C) Copyright 2004 Linus Torvalds
*/
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/export.h>
/*
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
* access or a MMIO access, these functions don't care. The info is
* encoded in the hardware mapping set up by the mapping functions
* (or the cookie itself, depending on implementation and hw).
*
* The generic routines don't assume any hardware mappings, and just
* encode the PIO/MMIO as part of the cookie. They coldly assume that
* the MMIO IO mappings are not in the low address range.
*
* Architectures for which this is not true can't use this generic
* implementation and should do their own copy.
*/
#ifndef HAVE_ARCH_PIO_SIZE
/*
* We encode the physical PIO addresses (0-0xffff) into the
* pointer by offsetting them with a constant (0x10000) and
* assuming that all the low addresses are always PIO. That means
* we can do some sanity checks on the low bits, and don't
* need to just take things for granted.
*/
#define PIO_OFFSET 0x10000UL
#define PIO_MASK 0x0ffffUL
#define PIO_RESERVED 0x40000UL
#endif
static void bad_io_access(unsigned long port, const char *access)
{
static int count = 10;
if (count) {
count--;
WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
}
}
/*
* Ugly macros are a way of life.
*/
#define IO_COND(addr, is_pio, is_mmio) do { \
unsigned long port = (unsigned long __force)addr; \
if (port >= PIO_RESERVED) { \
is_mmio; \
} else if (port > PIO_OFFSET) { \
port &= PIO_MASK; \
is_pio; \
} else \
bad_io_access(port, #is_pio ); \
} while (0)
#ifndef pio_read16be
#define pio_read16be(port) swab16(inw(port))
#define pio_read32be(port) swab32(inl(port))
#endif
#ifndef mmio_read16be
#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
#endif
unsigned int ioread8(void __iomem *addr)
{
IO_COND(addr, return inb(port), return readb(addr));
return 0xff;
}
unsigned int ioread16(void __iomem *addr)
{
IO_COND(addr, return inw(port), return readw(addr));
return 0xffff;
}
unsigned int ioread16be(void __iomem *addr)
{
IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
return 0xffff;
}
unsigned int ioread32(void __iomem *addr)
{
IO_COND(addr, return inl(port), return readl(addr));
return 0xffffffff;
}
unsigned int ioread32be(void __iomem *addr)
{
IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
return 0xffffffff;
}
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
#ifndef pio_write16be
#define pio_write16be(val,port) outw(swab16(val),port)
#define pio_write32be(val,port) outl(swab32(val),port)
#endif
#ifndef mmio_write16be
#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
#endif
void iowrite8(u8 val, void __iomem *addr)
{
IO_COND(addr, outb(val,port), writeb(val, addr));
}
void iowrite16(u16 val, void __iomem *addr)
{
IO_COND(addr, outw(val,port), writew(val, addr));
}
void iowrite16be(u16 val, void __iomem *addr)
{
IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
}
void iowrite32(u32 val, void __iomem *addr)
{
IO_COND(addr, outl(val,port), writel(val, addr));
}
void iowrite32be(u32 val, void __iomem *addr)
{
IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
}
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
/*
* These are the "repeat MMIO read/write" functions.
* Note the "__raw" accesses, since we don't want to
* convert to CPU byte order. We write in "IO byte
* order" (we also don't have IO barriers).
*/
#ifndef mmio_insb
static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
{
while (--count >= 0) {
u8 data = __raw_readb(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
{
while (--count >= 0) {
u16 data = __raw_readw(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
{
while (--count >= 0) {
u32 data = __raw_readl(addr);
*dst = data;
dst++;
}
}
#endif
#ifndef mmio_outsb
static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
{
while (--count >= 0) {
__raw_writeb(*src, addr);
src++;
}
}
static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
{
while (--count >= 0) {
__raw_writew(*src, addr);
src++;
}
}
static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
{
while (--count >= 0) {
__raw_writel(*src, addr);
src++;
}
}
#endif
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
}
void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
}
void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
}
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
EXPORT_SYMBOL(ioread32_rep);
void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
}
void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
}
void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
}
EXPORT_SYMBOL(iowrite8_rep);
EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
#ifdef CONFIG_HAS_IOPORT_MAP
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
if (port > PIO_MASK)
return NULL;
return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
}
void ioport_unmap(void __iomem *addr)
{
/* Nothing to do */
}
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifdef CONFIG_PCI
/* Hide the details if this is a MMIO or PIO address space and just do what
* you expect in the correct way. */
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
IO_COND(addr, /* nothing */, iounmap(addr));
}
EXPORT_SYMBOL(pci_iounmap);
#endif /* CONFIG_PCI */

70
lib/iomap_copy.c Normal file
View file

@ -0,0 +1,70 @@
/*
* Copyright 2006 PathScale, Inc. All Rights Reserved.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/export.h>
#include <linux/io.h>
/**
* __iowrite32_copy - copy data to MMIO space, in 32-bit units
* @to: destination, in MMIO space (must be 32-bit aligned)
* @from: source (must be 32-bit aligned)
* @count: number of 32-bit quantities to copy
*
* Copy data from kernel space to MMIO space, in units of 32 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
const void *from,
size_t count)
{
u32 __iomem *dst = to;
const u32 *src = from;
const u32 *end = src + count;
while (src < end)
__raw_writel(*src++, dst++);
}
EXPORT_SYMBOL_GPL(__iowrite32_copy);
/**
* __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units
* @to: destination, in MMIO space (must be 64-bit aligned)
* @from: source (must be 64-bit aligned)
* @count: number of 64-bit quantities to copy
*
* Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
const void *from,
size_t count)
{
#ifdef CONFIG_64BIT
u64 __iomem *dst = to;
const u64 *src = from;
const u64 *end = src + count;
while (src < end)
__raw_writeq(*src++, dst++);
#else
__iowrite32_copy(to, from, count * 2);
#endif
}
EXPORT_SYMBOL_GPL(__iowrite64_copy);

Some files were not shown because too many files have changed in this diff Show more