diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a6e0a129d044e..349d10b5e9acc 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2191,6 +2191,32 @@ config ATAGS_PROC Should the atags used to boot the kernel be exported in an "atags" file in procfs. Useful with kexec. +config KEXEC_HARDBOOT + bool "Support hard booting to a kexec kernel" + depends on KEXEC + help + Allows hard booting (i.e., with a full hardware reboot) to a kernel + previously loaded in memory by kexec. This works around the problem of + soft-booted kernel hangs due to improper device shutdown and/or + reinitialization. Support is comprised of two components: + + First, a "hardboot" flag is added to the kexec syscall to force a hard + reboot in relocate_new_kernel() (which requires machine-specific assembly + code). This also requires the kexec userspace tool to load the kexec'd + kernel in memory region left untouched by the bootloader (i.e., not + explicitly cleared and not overwritten by the boot kernel). Just prior + to reboot, the kexec kernel arguments are stashed in a machine-specific + memory page that must also be preserved. Note that this hardboot page + need not be reserved during regular kernel execution. + + Second, the zImage decompresor of the boot (bootloader-loaded) kernel is + modified to check the hardboot page for fresh kexec arguments, and if + present, attempts to jump to the kexec'd kernel preserved in memory. + + Note that hardboot support is only required in the boot kernel and any + kernel capable of performing a hardboot kexec. It is _not_ required by a + kexec'd kernel. + config CRASH_DUMP bool "Build kdump crash kernel (EXPERIMENTAL)" depends on EXPERIMENTAL diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index ea0ae2d991160..0253d13ba3caa 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -314,6 +314,11 @@ restart: adr r0, LC0 dtb_check_done: #endif +#ifdef CONFIG_KEXEC_HARDBOOT + #include + #include +#endif + /* * Check to see if we will overwrite ourselves. * r4 = final kernel address diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index c2b9b4bdec00d..564c55b394e51 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -17,6 +17,10 @@ #define KEXEC_ARM_ATAGS_OFFSET 0x1000 #define KEXEC_ARM_ZIMAGE_OFFSET 0x8000 +#ifdef CONFIG_KEXEC_HARDBOOT + #define KEXEC_HB_PAGE_MAGIC 0x4a5db007 +#endif + #ifndef __ASSEMBLY__ /** @@ -53,6 +57,10 @@ static inline void crash_setup_regs(struct pt_regs *newregs, /* Function pointer to optional machine-specific reinitialization */ extern void (*kexec_reinit)(void); +#ifdef CONFIG_KEXEC_HARDBOOT +extern void (*kexec_hardboot_hook)(void); +#endif + #endif /* __ASSEMBLY__ */ #endif /* CONFIG_KEXEC */ diff --git a/arch/arm/kernel/atags.c b/arch/arm/kernel/atags.c index 42a1a1415fa6c..0cfd7e4803877 100644 --- a/arch/arm/kernel/atags.c +++ b/arch/arm/kernel/atags.c @@ -4,29 +4,45 @@ #include #include +/* + * [PATCH] Backport arch/arm/kernel/atags.c from 3.10 + * + * There is a bug in older kernels, causing kexec-tools binary to + * only read first 1024 bytes from /proc/atags. I guess the bug is + * somewhere in /fs/proc/, since I don't think the callback in atags.c + * does something wrong. It might affect all procfs files using that + * old read callback instead of fops. Doesn't matter though, since it + * was accidentally fixed when 3.10 removed it. + * + * This might have no particular effect on real devices, because the + * atags _might_ be organized "just right", but it might be very hard + * to track down on a device where it causes problems. + * + */ + struct buffer { size_t size; char data[]; }; -static int -read_buffer(char* page, char** start, off_t off, int count, - int* eof, void* data) -{ - struct buffer *buffer = (struct buffer *)data; - - if (off >= buffer->size) { - *eof = 1; - return 0; - } - - count = min((int) (buffer->size - off), count); +static struct buffer* atags_buffer = NULL; - memcpy(page, &buffer->data[off], count); - - return count; +static ssize_t atags_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + // These are introduced in kernel 3.10. I don't want to backport + // the whole chunk, and other things (ram_console) use static + // variable to keep data too, so I guess it's okay. + //struct buffer *b = PDE_DATA(file_inode(file)); + struct buffer *b = atags_buffer; + return simple_read_from_buffer(buf, count, ppos, b->data, b->size); } +static const struct file_operations atags_fops = { + .read = atags_read, + .llseek = default_llseek, +}; + #define BOOT_PARAMS_SIZE 1536 static char __initdata atags_copy[BOOT_PARAMS_SIZE]; @@ -66,12 +82,13 @@ static int __init init_atags_procfs(void) b->size = size; memcpy(b->data, atags_copy, size); - tags_entry = create_proc_read_entry("atags", 0400, - NULL, read_buffer, b); + tags_entry = proc_create_data("atags", 0400, NULL, &atags_fops, b); if (!tags_entry) goto nomem; + atags_buffer = b; + return 0; nomem: diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index c355aebdf2d0d..449394d81fbc5 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -14,6 +14,7 @@ #include #include #include +#include extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; @@ -22,6 +23,10 @@ extern unsigned long kexec_start_address; extern unsigned long kexec_indirection_page; extern unsigned long kexec_mach_type; extern unsigned long kexec_boot_atags; +#ifdef CONFIG_KEXEC_HARDBOOT +extern unsigned long kexec_hardboot; +void (*kexec_hardboot_hook)(void); +#endif static atomic_t waiting_for_crash_ipi; @@ -120,10 +125,13 @@ void machine_kexec(struct kimage *image) reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ - kexec_start_address = image->start; - kexec_indirection_page = page_list; - kexec_mach_type = machine_arch_type; - kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; + mem_text_write_kernel_word(&kexec_start_address, image->start); + mem_text_write_kernel_word(&kexec_indirection_page, page_list); + mem_text_write_kernel_word(&kexec_mach_type, machine_arch_type); + mem_text_write_kernel_word(&kexec_boot_atags, image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET); +#ifdef CONFIG_KEXEC_HARDBOOT + mem_text_write_kernel_word(&kexec_hardboot, image->hardboot); +#endif /* copy our kernel relocation code to the control code page */ memcpy(reboot_code_buffer, @@ -137,5 +145,11 @@ void machine_kexec(struct kimage *image) if (kexec_reinit) kexec_reinit(); +#ifdef CONFIG_KEXEC_HARDBOOT + /* Run any final machine-specific shutdown code. */ + if (image->hardboot && kexec_hardboot_hook) + kexec_hardboot_hook(); +#endif + soft_restart(reboot_code_buffer_phys); } diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S index d0cdedf4864dc..f5342939d95ae 100644 --- a/arch/arm/kernel/relocate_kernel.S +++ b/arch/arm/kernel/relocate_kernel.S @@ -4,6 +4,15 @@ #include +#ifdef CONFIG_KEXEC_HARDBOOT +#include +#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC) + #include +#elif defined(CONFIG_ARCH_APQ8064) + #include +#endif +#endif + .globl relocate_new_kernel relocate_new_kernel: @@ -52,6 +61,12 @@ relocate_new_kernel: b 0b 2: +#ifdef CONFIG_KEXEC_HARDBOOT + ldr r0, kexec_hardboot + teq r0, #0 + bne hardboot +#endif + /* Jump to relocated kernel */ mov lr,r1 mov r0,#0 @@ -60,6 +75,40 @@ relocate_new_kernel: ARM( mov pc, lr ) THUMB( bx lr ) +#ifdef CONFIG_KEXEC_HARDBOOT +hardboot: + /* Stash boot arguments in hardboot page: + * 0: KEXEC_HB_PAGE_MAGIC + * 4: kexec_start_address + * 8: kexec_mach_type + * 12: kexec_boot_atags */ + ldr r0, =KEXEC_HB_PAGE_ADDR + str r1, [r0, #4] + ldr r1, kexec_mach_type + str r1, [r0, #8] + ldr r1, kexec_boot_atags + str r1, [r0, #12] + ldr r1, =KEXEC_HB_PAGE_MAGIC + str r1, [r0] + +#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC) + ldr r0, =TEGRA_PMC_BASE + ldr r1, [r0] + orr r1, r1, #0x10 + str r1, [r0] +loop: b loop +#elif defined(CONFIG_ARCH_APQ8064) + /* Restart using the PMIC chip, see mach-msm/restart.c */ + ldr r0, =APQ8064_TLMM_PHYS + mov r1, #0 + str r1, [r0, #0x820] @ PSHOLD_CTL_SU +loop: b loop +#else +#error "No reboot method defined for hardboot." +#endif + + .ltorg +#endif .align .globl kexec_start_address @@ -79,6 +128,12 @@ kexec_mach_type: kexec_boot_atags: .long 0x0 +#ifdef CONFIG_KEXEC_HARDBOOT + .globl kexec_hardboot +kexec_hardboot: + .long 0x0 +#endif + relocate_new_kernel_end: .globl relocate_new_kernel_size diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index e149c17983898..034ca08e5ac2f 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -2595,6 +2595,7 @@ config MSM_HSIC_SYSMON_TEST can be read and written to send character data to the sysmon port of the modem over USB. + config MSM_CPU_PWRCTL bool "Ensures that krait droop detectors are always off" help @@ -2602,4 +2603,18 @@ config MSM_CPU_PWRCTL stand alone power collapse operation. Selecting this option ensures that they are always off. +config FORCE_FAST_CHARGE + bool "Force AC charge mode at will" + default y + help + A simple sysfs interface to force adapters that + are detected as USB to charge as AC. + +config FORCE_FAST_CHARGE + bool "Force AC charge mode at will" + default y + help + A simple sysfs interface to force adapters that + are detected as USB to charge as AC. + endif diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index 747f1aedf3433..05129909867cc 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile @@ -386,8 +386,10 @@ obj-$(CONFIG_MSM_HSIC_SYSMON_TEST) += hsic_sysmon_test.o obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o obj-$(CONFIG_MSM_CPR) += msm_cpr.o obj-$(CONFIG_MSM_VP_REGULATOR) += msm_vp.o +obj-$(CONFIG_FORCE_FAST_CHARGE) += fastchg.o ifdef CONFIG_MSM_CPR obj-$(CONFIG_DEBUG_FS) += msm_cpr-debug.o endif obj-$(CONFIG_MSM_CPU_PWRCTL) += msm_cpu_pwrctl.o +obj-$(CONFIG_FORCE_FAST_CHARGE) += fastchg.o diff --git a/arch/arm/mach-msm/fastchg.c b/arch/arm/mach-msm/fastchg.c new file mode 100644 index 0000000000000..d92ef2f4f0e2d --- /dev/null +++ b/arch/arm/mach-msm/fastchg.c @@ -0,0 +1,178 @@ +/* + * based on sysfs interface from: + * Chad Froebel & + * Jean-Pierre Rasquin + * for backwards compatibility + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Possible values for "force_fast_charge" are : + * + * 0 - disabled (default) + * 1 - substitute AC to USB unconditional + * 2 - custom +*/ + +#include +#include +#include +#include + +#define FAST_CHARGE_VERSION "version 1.0 by Paul Reioux" + +int force_fast_charge; +int fast_charge_level; + +/* sysfs interface for "force_fast_charge" */ +static ssize_t force_fast_charge_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", force_fast_charge); +} + +static ssize_t force_fast_charge_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, + size_t count) +{ + + int new_force_fast_charge; + + sscanf(buf, "%du", &new_force_fast_charge); + + switch(new_force_fast_charge) { + case FAST_CHARGE_DISABLED: + case FAST_CHARGE_FORCE_AC: + case FAST_CHARGE_FORCE_CUSTOM_MA: + force_fast_charge = new_force_fast_charge; + return count; + default: + return -EINVAL; + } +} + +static ssize_t charge_level_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", fast_charge_level); +} + +static ssize_t charge_level_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, + size_t count) +{ + + int new_charge_level; + + sscanf(buf, "%du", &new_charge_level); + + switch (new_charge_level) { + case FAST_CHARGE_500: + case FAST_CHARGE_700: + case FAST_CHARGE_900: + case FAST_CHARGE_1100: + case FAST_CHARGE_1300: + case FAST_CHARGE_1500: + fast_charge_level = new_charge_level; + return count; + default: + return -EINVAL; + } + return -EINVAL; +} + +/* sysfs interface for "fast_charge_levels" */ +static ssize_t available_charge_levels_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", FAST_CHARGE_LEVELS); +} + +/* sysfs interface for "version" */ +static ssize_t version_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", FAST_CHARGE_VERSION); +} + +static struct kobj_attribute version_attribute = + __ATTR(version, 0444, version_show, NULL); + +static struct kobj_attribute available_charge_levels_attribute = + __ATTR(available_charge_levels, 0444, + available_charge_levels_show, NULL); + +static struct kobj_attribute fast_charge_level_attribute = + __ATTR(fast_charge_level, 0666, + charge_level_show, + charge_level_store); + +static struct kobj_attribute force_fast_charge_attribute = + __ATTR(force_fast_charge, 0666, + force_fast_charge_show, + force_fast_charge_store); + +static struct attribute *force_fast_charge_attrs[] = { + &force_fast_charge_attribute.attr, + &fast_charge_level_attribute.attr, + &available_charge_levels_attribute.attr, + &version_attribute.attr, + NULL, +}; + +static struct attribute_group force_fast_charge_attr_group = { + .attrs = force_fast_charge_attrs, +}; + +/* Initialize fast charge sysfs folder */ +static struct kobject *force_fast_charge_kobj; + +int force_fast_charge_init(void) +{ + int force_fast_charge_retval; + + /* Forced fast charge disabled by default */ + force_fast_charge = FAST_CHARGE_DISABLED; + + force_fast_charge_kobj + = kobject_create_and_add("fast_charge", kernel_kobj); + + if (!force_fast_charge_kobj) { + return -ENOMEM; + } + + force_fast_charge_retval + = sysfs_create_group(force_fast_charge_kobj, + &force_fast_charge_attr_group); + + if (force_fast_charge_retval) + kobject_put(force_fast_charge_kobj); + + if (force_fast_charge_retval) + kobject_put(force_fast_charge_kobj); + + return (force_fast_charge_retval); +} + +void force_fast_charge_exit(void) +{ + kobject_put(force_fast_charge_kobj); +} + +module_init(force_fast_charge_init); +module_exit(force_fast_charge_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jean-Pierre Rasquin "); +MODULE_AUTHOR("Paul Reioux "); +MODULE_DESCRIPTION("Fast Charge Hack for Android"); + diff --git a/arch/arm/mach-msm/include/mach/kgsl.h b/arch/arm/mach-msm/include/mach/kgsl.h index 2d7e8df5d5631..359841fd2034f 100644 --- a/arch/arm/mach-msm/include/mach/kgsl.h +++ b/arch/arm/mach-msm/include/mach/kgsl.h @@ -41,6 +41,10 @@ (((_mi) & 0xFF) << 8) | \ ((_pa) & 0xFF)) +#ifdef CONFIG_CPU_FREQ_GOV_ELEMENTALX +extern int graphics_boost; +#endif + enum kgsl_iommu_context_id { KGSL_IOMMU_CONTEXT_USER = 0, KGSL_IOMMU_CONTEXT_PRIV = 1, diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h index 83296117c70ec..d9f6b78b01d93 100644 --- a/arch/arm/mach-msm/include/mach/memory.h +++ b/arch/arm/mach-msm/include/mach/memory.h @@ -20,6 +20,16 @@ /* physical offset of RAM */ #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) +#if defined(CONFIG_KEXEC_HARDBOOT) +#if defined(CONFIG_MACH_APQ8064_FLO) +#define KEXEC_HB_PAGE_ADDR UL(0x88C00000) +#elif defined(CONFIG_MACH_APQ8064_MAKO) +#define KEXEC_HB_PAGE_ADDR UL(0x88600000) +#else +#error "Adress for kexec hardboot page not defined" +#endif +#endif + #define MAX_PHYSMEM_BITS 32 #define SECTION_SIZE_BITS 28 diff --git a/arch/arm/mach-msm/lge/devices_lge.c b/arch/arm/mach-msm/lge/devices_lge.c index 504cc1ebe4aa8..f74e4e793c246 100644 --- a/arch/arm/mach-msm/lge/devices_lge.c +++ b/arch/arm/mach-msm/lge/devices_lge.c @@ -26,6 +26,10 @@ #include +#ifdef CONFIG_KEXEC_HARDBOOT +#include +#endif + /* setting whether uart console is enalbed or disabled */ static int uart_console_mode = 0; @@ -187,6 +191,17 @@ void __init lge_add_persistent_ram(void) void __init lge_reserve(void) { +#ifdef CONFIG_KEXEC_HARDBOOT + // Reserve space for hardboot page, just before the ram_console + struct membank* bank = &meminfo.bank[0]; + phys_addr_t start = bank->start + bank->size - SZ_1M - LGE_PERSISTENT_RAM_SIZE; + int ret = memblock_remove(start, SZ_1M); + if(!ret) + pr_info("Hardboot page reserved at 0x%X\n", start); + else + pr_err("Failed to reserve space for hardboot page at 0x%X!\n", start); +#endif + lge_add_persistent_ram(); } diff --git a/arch/arm/mach-msm/lge/mako/board-mako-sound.c b/arch/arm/mach-msm/lge/mako/board-mako-sound.c index 1fb59ffd10d22..0810e079db049 100644 --- a/arch/arm/mach-msm/lge/mako/board-mako-sound.c +++ b/arch/arm/mach-msm/lge/mako/board-mako-sound.c @@ -34,7 +34,12 @@ #define AGC_COMPRESIION_RATE 0 #define AGC_OUTPUT_LIMITER_DISABLE 1 #define AGC_FIXED_GAIN 12 - +#define AGC_ATK_TIME 5 +#define AGC_REL_TIME 11 +#define AGC_HOLD_TIME 0 +#define AGC_OUTPUT_LIMIT_LEVEL 26 +#define AGC_MAX_GAIN 12 +#define AGC_NOISE_GATE_THRESHOLD 1 #define GPIO_EAR_MIC_BIAS_EN PM8921_GPIO_PM_TO_SYS(20) #define GPIO_EAR_SENSE_N 82 @@ -99,6 +104,12 @@ static struct audio_amp_platform_data amp_platform_data = { .agc_compression_rate = AGC_COMPRESIION_RATE, .agc_output_limiter_disable = AGC_OUTPUT_LIMITER_DISABLE, .agc_fixed_gain = AGC_FIXED_GAIN, + .ATK_time = AGC_ATK_TIME, + .REL_time = AGC_REL_TIME, + .Hold_time = AGC_HOLD_TIME, + .Output_limit_level = AGC_OUTPUT_LIMIT_LEVEL, + .Noise_Gate_Threshold = AGC_NOISE_GATE_THRESHOLD, + .AGC_Max_Gain = AGC_MAX_GAIN, }; #endif diff --git a/arch/arm/mach-msm/restart.c b/arch/arm/mach-msm/restart.c index 8fac40c758632..74ef77a210302 100644 --- a/arch/arm/mach-msm/restart.c +++ b/arch/arm/mach-msm/restart.c @@ -35,6 +35,10 @@ #include "msm_watchdog.h" #include "timer.h" +#ifdef CONFIG_KEXEC_HARDBOOT +#include +#endif + #define WDT0_RST 0x38 #define WDT0_EN 0x40 #define WDT0_BARK_TIME 0x4C @@ -322,6 +326,14 @@ static int __init msm_pmic_restart_init(void) late_initcall(msm_pmic_restart_init); +#ifdef CONFIG_KEXEC_HARDBOOT +static void msm_kexec_hardboot_hook(void) +{ + // Set PMIC to restart-on-poweroff + pm8xxx_reset_pwr_off(1); +} +#endif + static int __init msm_restart_init(void) { #ifdef CONFIG_MSM_DLOAD_MODE @@ -337,6 +349,10 @@ static int __init msm_restart_init(void) restart_reason = MSM_IMEM_BASE + RESTART_REASON_ADDR; pm_power_off = msm_power_off; +#ifdef CONFIG_KEXEC_HARDBOOT + kexec_hardboot_hook = msm_kexec_hardboot_hook; +#endif + return 0; } early_initcall(msm_restart_init); diff --git a/bina.sh b/bina.sh new file mode 100755 index 0000000000000..c9ec113e95ec2 --- /dev/null +++ b/bina.sh @@ -0,0 +1,21 @@ +export ARCH=arm +unset CROSS_COMPILE +#export CROSS_COMPILE=/opt/gcc-4.8-linaro/bin/arm-cortex_a15-linux-gnueabi- +#export CROSS_COMPILE=/opt/gcc-4.7-linaro/bin/arm-cortex_a15-linux-gnueabi- +export CROSS_COMPILE=/opt/sabermod-gcc/bin/arm-eabi- + +#echo "Cleaning old craps..." +#make distclean + +export KBUILD_BUILD_USER=najmi +export KBUILD_BUILD_HOST="kampung-pandan" +export LOCALVERSION="-Semaphore-Pandan-CM11-N4_2.0.5" + +#echo "Copy backup config..." +#cp najmi-mako-config .config +make semaphore_mako_defconfig +make menuconfig +#echo "Begin compile..." +#make -j8 + + diff --git a/config-kexec b/config-kexec new file mode 100644 index 0000000000000..039fa4dcec850 --- /dev/null +++ b/config-kexec @@ -0,0 +1,3425 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm 3.4.0 Kernel Configuration +# +CONFIG_ARM=y +CONFIG_ARM_HAS_SG_CHAIN=y +CONFIG_MIGHT_HAVE_PCI=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_KTIME_SCALAR=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_ARM_TICKET_LOCKS=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +# CONFIG_ARM_PATCH_PHYS_VIRT is not set +CONFIG_NEED_MACH_IO_H=y +CONFIG_NEED_MACH_MEMORY_H=y +CONFIG_PHYS_OFFSET=0x80200000 +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS=y +CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE=0xfffef000 +# CONFIG_ARCH_RANDOM is not set +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_HAVE_IRQ_WORK=y +CONFIG_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-cyanogenmod" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_FHANDLE is not set +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_AUDIT=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y +# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_IRQ_DOMAIN=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set + +# +# RCU Subsystem +# +CONFIG_TREE_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +CONFIG_RCU_FAST_NO_HZ=y +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_RCU_BOOST is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +# CONFIG_CGROUP_PERF is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +CONFIG_RT_GROUP_SCHED=y +# CONFIG_BLK_CGROUP is not set +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_IPC_NS is not set +# CONFIG_USER_NS is not set +# CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +# CONFIG_RELAY is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_EMBEDDED=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_PERF_COUNTERS is not set +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +CONFIG_COMPAT_BRK=y +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_OPROFILE=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_JUMP_LABEL is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_DMA_ATTRS=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_USE_GENERIC_SMP_HELPERS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +# CONFIG_MODULES is not set +CONFIG_STOP_MACHINE=y +CONFIG_BLOCK=y +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y +# CONFIG_BLK_DEV_BSGLIB is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_TEST is not set +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_ROW=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_ROW=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="row" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +CONFIG_UNINLINE_SPIN_UNLOCK=y +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_HIGHBANK is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_PRIMA2 is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PICOXCELL is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C24XX is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_ARCH_VT8500 is not set +# CONFIG_ARCH_ZYNQ is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set + +# +# MSM SoC Type +# +# CONFIG_ARCH_MSM7X01A is not set +# CONFIG_ARCH_MSM7X25 is not set +# CONFIG_ARCH_MSM7X27 is not set +# CONFIG_ARCH_MSM7X30 is not set +# CONFIG_ARCH_QSD8X50 is not set +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM8960=y +# CONFIG_ARCH_MSM8930 is not set +CONFIG_ARCH_APQ8064=y +# CONFIG_ARCH_MSM8974 is not set +# CONFIG_ARCH_MPQ8092 is not set +# CONFIG_ARCH_MSM8226 is not set +# CONFIG_ARCH_FSM9XXX is not set +# CONFIG_ARCH_MSM9615 is not set +# CONFIG_ARCH_MSM8625 is not set +# CONFIG_ARCH_MSM9625 is not set +CONFIG_MSM_SOC_REV_NONE=y +# CONFIG_MSM_SOC_REV_A is not set +CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER=y +CONFIG_ARCH_MSM_KRAIT=y +CONFIG_MSM_SMP=y +CONFIG_ARCH_MSM_KRAITMP=y +CONFIG_MSM_KRAIT_WFE_FIXUP=y +CONFIG_MSM_RPM=y +# CONFIG_MSM_RPM_SMD is not set +CONFIG_MSM_MPM=y +CONFIG_MSM_XO=y +CONFIG_MSM_REMOTE_SPINLOCK_SFPB=y + +# +# MSM Board Selection +# +# CONFIG_MACH_MSM8960_CDP is not set +# CONFIG_MACH_MSM8960_MTP is not set +# CONFIG_MACH_MSM8960_FLUID is not set +# CONFIG_MACH_MSM8960_LIQUID is not set +# CONFIG_MACH_APQ8064_CDP is not set +# CONFIG_MACH_APQ8064_MTP is not set +# CONFIG_MACH_APQ8064_LIQUID is not set +# CONFIG_MACH_MPQ8064_CDP is not set +# CONFIG_MACH_MPQ8064_HRD is not set +# CONFIG_MACH_MPQ8064_DTV is not set +CONFIG_MACH_MSM_DUMMY=y +CONFIG_MACH_LGE=y + +# +# LGE Board Selection +# +CONFIG_BOARD_HEADER_FILE="mach/lge/board_mako.h" +CONFIG_MACH_APQ8064_MAKO=y +# CONFIG_MACH_LGE_DUMMY is not set + +# +# LGE Specific Patches +# +CONFIG_LGE_QFPROM_INTERFACE=y +CONFIG_UPDATE_LCDC_LUT=y +CONFIG_LCD_KCAL=y +CONFIG_EARJACK_DEBUGGER=y +# CONFIG_LGE_CRASH_HANDLER is not set + +# +# ASUSTek Board Selection +# +# CONFIG_MACH_APQ8064_FLO is not set +# CONFIG_MACH_APQ8064_DEB is not set +# CONFIG_MACH_ASUSTEK_DUMMY is not set + +# +# ASUSTek Specific Feature +# +# CONFIG_MSM_STACKED_MEMORY is not set +CONFIG_KERNEL_MSM_CONTIG_MEM_REGION=y +CONFIG_MSM_AMSS_VERSION=6225 +# CONFIG_MSM_AMSS_VERSION_6210 is not set +# CONFIG_MSM_AMSS_VERSION_6220 is not set +CONFIG_MSM_AMSS_VERSION_6225=y +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=20000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET=1000000000 +CONFIG_CPU_HAS_L2_PMU=y +# CONFIG_HTC_HEADSET is not set +# CONFIG_HTC_PWRSINK is not set +# CONFIG_MSM_FIQ_SUPPORT is not set +# CONFIG_MSM_SERIAL_DEBUGGER is not set +# CONFIG_MSM_PROC_COMM is not set +CONFIG_MSM_SMD=y +# CONFIG_MSM_SMD_PKG3 is not set +CONFIG_MSM_SMD_PKG4=y +CONFIG_MSM_SMD_DEBUG=y +CONFIG_MSM_BAM_DMUX=y +CONFIG_MSM_N_WAY_SMD=y +CONFIG_MSM_N_WAY_SMSM=y +# CONFIG_MSM_RESET_MODEM is not set +CONFIG_MSM_SMD_LOGGING=y +# CONFIG_MSM_IPC_LOGGING is not set +CONFIG_MSM_SMD_NMEA=y +# CONFIG_MSM_HSIC_TTY is not set +CONFIG_MSM_SMD_TTY=y +CONFIG_MSM_SMD_QMI=y +CONFIG_MSM_SMD_PKT=y +CONFIG_MSM_DSPS=y +# CONFIG_MSM_ONCRPCROUTER is not set +CONFIG_MSM_IPC_ROUTER=y +CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y +# CONFIG_MSM_IPC_ROUTER_SECURITY is not set +# CONFIG_MSM_DALRPC is not set +# CONFIG_MSM_CPU_FREQ_SET_MIN_MAX is not set +CONFIG_MSM_AVS_HW=y +# CONFIG_MSM_HW3D is not set +CONFIG_AMSS_7X25_VERSION_2009=y +# CONFIG_AMSS_7X25_VERSION_2008 is not set +CONFIG_RTAC=y +# CONFIG_MSM_VREG_SWITCH_INVERTED is not set +# CONFIG_MSM_DMA_TEST is not set +# CONFIG_WIFI_CONTROL_FUNC is not set +CONFIG_MSM_SLEEP_TIME_OVERRIDE=y +# CONFIG_MSM_MEMORY_LOW_POWER_MODE is not set +CONFIG_MSM_PM_TIMEOUT_HALT=y +# CONFIG_MSM_PM_TIMEOUT_RESET_MODEM is not set +# CONFIG_MSM_PM_TIMEOUT_RESET_CHIP is not set +CONFIG_MSM_IDLE_WAIT_ON_MODEM=0 +CONFIG_MSM_RPM_REGULATOR=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +# CONFIG_MSM_SYSMON_COMM is not set +CONFIG_MSM_PIL=y +# CONFIG_MSM_PIL_MODEM is not set +# CONFIG_MSM_PIL_QDSP6V3 is not set +CONFIG_MSM_PIL_QDSP6V4=y +# CONFIG_MSM_PIL_LPASS_QDSP6V5 is not set +# CONFIG_MSM_PIL_MSS_QDSP6V5 is not set +CONFIG_MSM_PIL_RIVA=y +CONFIG_MSM_PIL_TZAPPS=y +CONFIG_MSM_PIL_DSPS=y +CONFIG_MSM_PIL_VIDC=y +# CONFIG_MSM_PIL_VENUS is not set +CONFIG_MSM_PIL_GSS=y +# CONFIG_MSM_PIL_PRONTO is not set +CONFIG_MSM_SCM=y +CONFIG_MSM_MODEM_8960=y +CONFIG_MSM_LPASS_8960=y +CONFIG_MSM_WCNSS_SSR_8960=y +CONFIG_MSM_GSS_SSR_8064=y +# CONFIG_MSM_BUSPM_DEV is not set +CONFIG_MSM_TZ_LOG=y +CONFIG_MSM_RPM_LOG=y +CONFIG_MSM_RPM_STATS_LOG=y +# CONFIG_MSM_RPM_RBCPR_STATS_LOG is not set +CONFIG_MSM_DIRECT_SCLK_ACCESS=y +CONFIG_IOMMU_API=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_NATIVE_RESTART=y +CONFIG_MSM_PM8X60=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_BUS_SCALING=y +CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED=y +CONFIG_MSM_WATCHDOG=y +# CONFIG_MSM_WATCHDOG_V2 is not set +# CONFIG_MSM_MEMORY_DUMP is not set +CONFIG_MSM_DLOAD_MODE=y +# CONFIG_MSM_JTAG is not set +# CONFIG_MSM_JTAG_MM is not set +# CONFIG_MSM_SLEEP_STATS_DEVICE is not set +CONFIG_MSM_RUN_QUEUE_STATS=y +# CONFIG_MSM_STANDALONE_POWER_COLLAPSE is not set +# CONFIG_MSM_GSBI9_UART is not set +CONFIG_MSM_SHOW_RESUME_IRQ=y +# CONFIG_MSM_FAKE_BATTERY is not set +CONFIG_MSM_QDSP6_APR=y +# CONFIG_MSM_QDSP6_APRV2 is not set +CONFIG_MSM_QDSP6_CODECS=y +# CONFIG_MSM_QDSP6V2_CODECS is not set +CONFIG_MSM_AUDIO_QDSP6=y +# CONFIG_MSM_AUDIO_QDSP6V2 is not set +CONFIG_MSM_ADSP_LOADER=y +CONFIG_MSM_ULTRASOUND=y +# CONFIG_MSM_SPM_V1 is not set +CONFIG_MSM_SPM_V2=y +CONFIG_MSM_L2_SPM=y +CONFIG_MSM_MULTIMEDIA_USE_ION=y +# CONFIG_MSM_OCMEM is not set +# CONFIG_MSM_RTB is not set +# CONFIG_MSM_EBI_ERP is not set +CONFIG_MSM_CACHE_ERP=y +CONFIG_MSM_L1_ERR_PANIC=y +# CONFIG_MSM_L1_RECOV_ERR_PANIC is not set +# CONFIG_MSM_L1_ERR_LOG is not set +# CONFIG_MSM_L2_ERP_PRINT_ACCESS_ERRORS is not set +# CONFIG_MSM_L2_ERP_1BIT_PANIC is not set +CONFIG_MSM_L2_ERP_2BIT_PANIC=y +CONFIG_MSM_DCVS=y +# CONFIG_MSM_CPR is not set +CONFIG_HAVE_ARCH_HAS_CURRENT_TIMER=y +CONFIG_MSM_CACHE_DUMP=y +CONFIG_MSM_CACHE_DUMP_ON_PANIC=y +CONFIG_MSM_HSIC_SYSMON=y +# CONFIG_MSM_HSIC_SYSMON_TEST is not set +CONFIG_MSM_CPU_PWRCTL=y +CONFIG_MSM_USE_USER_ACCESSIBLE_TIMERS=y +CONFIG_FORCE_FAST_CHARGE=y + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_V7=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +# CONFIG_ARM_LPAE is not set +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set +CONFIG_ARM_THUMB=y +# CONFIG_ARM_THUMBEE is not set +CONFIG_SWP_EMULATE=y +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +# CONFIG_CACHE_L2X0 is not set +CONFIG_ARM_L1_CACHE_SHIFT_6=y +CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_STRICT_MEMORY_RWX=y +CONFIG_ARM_NR_BANKS=8 +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +CONFIG_MULTI_IRQ_HANDLER=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_742230 is not set +# CONFIG_ARM_ERRATA_742231 is not set +# CONFIG_ARM_ERRATA_720789 is not set +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_ARM_ERRATA_751472 is not set +# CONFIG_ARM_ERRATA_754322 is not set +# CONFIG_ARM_ERRATA_754327 is not set +# CONFIG_ARM_ERRATA_764369 is not set +# CONFIG_KSAPI is not set +CONFIG_ARM_GIC=y +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI is not set +# CONFIG_PCI_SYSCALL is not set +CONFIG_ARCH_SUPPORTS_MSI=y +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_HAVE_SMP=y +CONFIG_SMP=y +# CONFIG_SMP_ON_UP is not set +CONFIG_ARM_CPU_TOPOLOGY=y +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_HAVE_ARM_SCU=y +# CONFIG_ARM_ARCH_TIMER is not set +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_NR_CPUS=4 +CONFIG_HOTPLUG_CPU=y +CONFIG_LOCAL_TIMERS=y +CONFIG_ARCH_NR_GPIO=0 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HIGHMEM=y +# CONFIG_HIGHPTE is not set +CONFIG_HW_PERF_EVENTS=y +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_CLEANCACHE=y +CONFIG_USE_USER_ACCESSIBLE_TIMERS=y +# CONFIG_ARCH_MEMORY_PROBE is not set +# CONFIG_ARCH_MEMORY_REMOVE is not set +# CONFIG_ENABLE_DMM is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +CONFIG_CC_STACKPROTECTOR=y +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART is not set +CONFIG_CP_ACCESS=y + +# +# Boot options +# +# CONFIG_USE_OF is not set +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZBOOT_ROM_BSS=0 +CONFIG_CMDLINE="" +# CONFIG_XIP_KERNEL is not set +CONFIG_KEXEC=y +CONFIG_ATAGS_PROC=y +CONFIG_KEXEC_HARDBOOT=y +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +# CONFIG_CPU_FREQ_STAT_DETAILS is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ELEMENTALX is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_GOV_ELEMENTALX is not set +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set + +# +# ARM CPU frequency scaling drivers +# +# CONFIG_ARM_EXYNOS4210_CPUFREQ is not set +# CONFIG_ARM_EXYNOS4X12_CPUFREQ is not set +# CONFIG_ARM_EXYNOS5250_CPUFREQ is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +# CONFIG_CONSOLE_EARLYSUSPEND is not set +CONFIG_FB_EARLYSUSPEND=y +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +CONFIG_PM_RUNTIME=y +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_APM_EMULATION is not set +CONFIG_PM_CLK=y +CONFIG_CPU_PM=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARM_CPU_SUSPEND=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +# CONFIG_IP_FIB_TRIE_STATS is not set +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_XFRM_TUNNEL=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +# CONFIG_IPV6_TUNNEL is not set +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETLABEL is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +CONFIG_NETWORK_SECMARK=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +# CONFIG_NETFILTER_NETLINK_ACCT is not set +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +# CONFIG_NF_CONNTRACK_TIMESTAMP is not set +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_BROADCAST=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +# CONFIG_NF_CONNTRACK_SNMP is not set +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +# CONFIG_NF_CT_NETLINK_TIMEOUT is not set +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +# CONFIG_NETFILTER_XT_TARGET_HL is not set +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +# CONFIG_NETFILTER_XT_TARGET_LED is not set +# CONFIG_NETFILTER_XT_TARGET_LOG is not set +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +CONFIG_NETFILTER_XT_MATCH_ECN=y +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_SET is not set +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +# CONFIG_IP_NF_MATCH_RPFILTER is not set +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_REJECT_SKERR=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +# CONFIG_IP6_NF_MATCH_AH is not set +# CONFIG_IP6_NF_MATCH_EUI64 is not set +# CONFIG_IP6_NF_MATCH_FRAG is not set +# CONFIG_IP6_NF_MATCH_OPTS is not set +# CONFIG_IP6_NF_MATCH_HL is not set +# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set +# CONFIG_IP6_NF_MATCH_MH is not set +# CONFIG_IP6_NF_MATCH_RPFILTER is not set +# CONFIG_IP6_NF_MATCH_RT is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_TARGET_REJECT_SKERR=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_IP6_NF_SECURITY is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_L2TP=y +# CONFIG_L2TP_DEBUGFS is not set +# CONFIG_L2TP_V3 is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +CONFIG_NET_SCH_PRIO=y +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFB is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +# CONFIG_NET_SCH_MQPRIO is not set +# CONFIG_NET_SCH_CHOKE is not set +# CONFIG_NET_SCH_QFQ is not set +# CONFIG_NET_SCH_INGRESS is not set +# CONFIG_NET_SCH_PLUG is not set + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +CONFIG_CLS_U32_MARK=y +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +CONFIG_NET_CLS_FLOW=y +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +# CONFIG_NET_ACT_POLICE is not set +# CONFIG_NET_ACT_GACT is not set +# CONFIG_NET_ACT_MIRRED is not set +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_NETPRIO_CGROUP is not set +CONFIG_BQL=y +CONFIG_HAVE_BPF_JIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCISMD is not set +# CONFIG_BT_HCIBTUSB is not set +# CONFIG_BT_HCIBTSDIO is not set +# CONFIG_BT_HCIUART is not set +# CONFIG_BT_HCIBCM203X is not set +# CONFIG_BT_HCIBPA10X is not set +# CONFIG_BT_MSM_SLEEP is not set +# CONFIG_BT_HCIBFUSB is not set +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_MSM_BT_POWER is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=y +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_WEXT is not set +# CONFIG_WIRELESS_EXT_SYSFS is not set +# CONFIG_LIB80211 is not set +# CONFIG_CFG80211_ALLOW_RECONNECT is not set +# CONFIG_MAC80211 is not set +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +CONFIG_RFKILL_PM=y +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_RFKILL_REGULATOR is not set +# CONFIG_RFKILL_GPIO is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +CONFIG_BCM2079X=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_DMA_SHARED_BUFFER=y +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_SW_SYNC_USER is not set +# CONFIG_CMA is not set +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_MTD is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_UB is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ATMEL_PWM is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_TSIF is not set +# CONFIG_TSPP is not set +# CONFIG_HAPTIC_ISA1200 is not set +# CONFIG_PMIC8XXX_VIBRATOR is not set +CONFIG_ANDROID_VIBRATOR=y +# CONFIG_TOUCHSENSE_VIBRATOR is not set +# CONFIG_PMIC8XXX_NFC is not set +# CONFIG_PMIC8XXX_UPL is not set +CONFIG_QSEECOM=y +# CONFIG_QFP_FUSE is not set +CONFIG_USB_HSIC_SMSC_HUB=y +# CONFIG_BU52031NVX is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set +CONFIG_SLIMPORT_ANX7808=y + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_TGT=y +# CONFIG_SCSI_NETLINK is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_TCP is not set +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_LIBFC is not set +# CONFIG_LIBFCOE is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_THIN_PROVISIONING is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_TARGET_CORE is not set +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +CONFIG_DUMMY=y +# CONFIG_EQUALIZER is not set +CONFIG_MII=y +# CONFIG_IFB is not set +# CONFIG_NET_TEAM is not set +# CONFIG_MACVLAN is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set + +# +# CAIF transport drivers +# +CONFIG_ETHERNET=y +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_CALXEDA_XGMAC is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_DM9000 is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_FARADAY is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_MSM_RMNET is not set +CONFIG_MSM_RMNET_BAM=y +CONFIG_MSM_RMNET_SMUX=y +# CONFIG_QFEC is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_ETHOC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_PHYLIB is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_SLIP=y +CONFIG_SLHC=y +CONFIG_SLIP_COMPRESSED=y +# CONFIG_SLIP_SMART is not set +CONFIG_SLIP_MODE_SLIP6=y + +# +# USB Network Adapters +# +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +CONFIG_USB_USBNET=y +CONFIG_USB_NET_AX8817X=y +CONFIG_USB_NET_CDCETHER=y +# CONFIG_USB_NET_CDC_EEM is not set +CONFIG_USB_NET_CDC_NCM=y +# CONFIG_USB_NET_DM9601 is not set +# CONFIG_USB_NET_SMSC75XX is not set +# CONFIG_USB_NET_SMSC95XX is not set +# CONFIG_USB_NET_GL620A is not set +CONFIG_USB_NET_NET1080=y +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_MCS7830 is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +CONFIG_USB_NET_CDC_SUBSET=y +# CONFIG_USB_ALI_M5632 is not set +# CONFIG_USB_AN2720 is not set +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +# CONFIG_USB_EPSON2888 is not set +# CONFIG_USB_KC2190 is not set +CONFIG_USB_NET_ZAURUS=y +# CONFIG_USB_NET_CX82310_ETH is not set +# CONFIG_USB_NET_KALMIA is not set +# CONFIG_USB_NET_QMI_WWAN is not set +# CONFIG_USB_HSO is not set +# CONFIG_USB_NET_INT51X1 is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_USB_SIERRA_NET is not set +# CONFIG_USB_VL600 is not set +CONFIG_MSM_RMNET_USB=y +CONFIG_WLAN=y +# CONFIG_USB_ZD1201 is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set +# CONFIG_LIBRA_SDIOIF is not set +# CONFIG_ATH6K_LEGACY_EXT is not set +CONFIG_WCNSS_CORE=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCMDHD is not set +# CONFIG_BRCMFMAC is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set +# CONFIG_MWIFIEX is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y +# CONFIG_INPUT_LID is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +CONFIG_KEYBOARD_PMIC8XXX=y +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_QCIKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_RMI4_I2C is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_MSM_LEGACY is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_CY8C_TS is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C_QC is not set +# CONFIG_TOUCHSCREEN_FT5X06 is not set +CONFIG_TOUCHSCREEN_LGE_COMMON=y +CONFIG_TOUCHSCREEN_LGE_SYNAPTICS=y +CONFIG_TOUCH_REG_MAP_TM2000=y +# CONFIG_TOUCHSCREEN_ELAN_TF_3K is not set +CONFIG_TOUCHSCREEN_CHARGER_NOTIFY=y +CONFIG_TOUCHSCREEN_SWEEP2WAKE=y +CONFIG_TOUCHSCREEN_DOUBLETAP2WAKE=y +CONFIG_TOUCHSCREEN_PREVENT_SLEEP=y +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_PM8XXX_VIBRATOR is not set +CONFIG_INPUT_PMIC8XXX_PWRKEY=y +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_MPU3050 is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_TILT_POLLED is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +# CONFIG_INPUT_GPIO is not set +# CONFIG_INPUT_ISA1200_FF_MEMLESS is not set +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_BOSCH_BMA150 is not set +# CONFIG_STM_LIS3DH is not set +# CONFIG_BMP18X is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set +CONFIG_N_SMUX=y +CONFIG_N_SMUX_LOOPBACK=y +CONFIG_SMUX_CTL=y +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y +CONFIG_DEVKMEM=y + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_MSM is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_MSM_HSL=y +CONFIG_SERIAL_MSM_HSL_CONSOLE=y +# CONFIG_SERIAL_BCM_BT_LPM is not set +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_MSM_SMD is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set + +# +# Diag Support +# +CONFIG_DIAG_CHAR=y + +# +# DIAG traffic over USB +# +CONFIG_DIAG_OVER_USB=y + +# +# SDIO support for DIAG +# + +# +# HSIC/SMUX support for DIAG +# +CONFIG_DIAGFWD_BRIDGE_CODE=y +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_MSM=y +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_MSM_ROTATOR=y +CONFIG_MSM_ADSPRPC=y +# CONFIG_MMC_GENERIC_CSDIO is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_MSM is not set +CONFIG_I2C_QUP=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPMI is not set +CONFIG_SLIMBUS=y +CONFIG_SLIMBUS_MSM_CTRL=y +# CONFIG_HSI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# + +# +# PTP clock support +# + +# +# Enable Device Drivers -> PPS to see the PTP clock options. +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +CONFIG_DEBUG_GPIO=y +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers: +# +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_MSM_V1 is not set +CONFIG_GPIO_MSM_V2=y +# CONFIG_GPIO_MSM_V3 is not set +# CONFIG_GPIO_FSM9XXX is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_GPIO_PM8XXX=y +CONFIG_GPIO_PM8XXX_MPP=y +# CONFIG_GPIO_PM8XXX_RPC is not set +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_SMB345 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_ISP1704 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_BATTERY_MSM is not set +# CONFIG_BATTERY_MSM8X60 is not set +CONFIG_ISL9519_CHARGER=y +# CONFIG_SMB137B_CHARGER is not set +# CONFIG_SMB349_CHARGER is not set +# CONFIG_BATTERY_BQ27520 is not set +# CONFIG_BATTERY_BQ27541 is not set +CONFIG_PM8921_CHARGER=y +CONFIG_PM8XXX_CCADC=y +# CONFIG_LTC4088_CHARGER is not set +CONFIG_PM8921_BMS=y +# CONFIG_CHARGER_SMB347 is not set +CONFIG_WIRELESS_CHARGER=y +CONFIG_BQ51051B_CHARGER=y +CONFIG_BATTERY_TEMP_CONTROL=y +# CONFIG_BATTERY_ASUS_BQ27541 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +CONFIG_SENSORS_PM8XXX_ADC=y +# CONFIG_SENSORS_EPM_ADC is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_CAP1106 is not set +CONFIG_THERMAL=y +CONFIG_THERMAL_HWMON=y +# CONFIG_THERMAL_MSM_POPMEM is not set +# CONFIG_THERMAL_TSENS is not set +CONFIG_THERMAL_TSENS8960=y +# CONFIG_THERMAL_TSENS8974 is not set +CONFIG_THERMAL_PM8XXX=y +CONFIG_THERMAL_MONITOR=y +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y + +# +# Broadcom specific AMBA +# +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_PMIC8058 is not set +# CONFIG_PMIC8901 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_S5M_CORE is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13XXX is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_MFD_PM8XXX=y +CONFIG_MFD_PM8921_CORE=y +CONFIG_MFD_PM8821_CORE=y +# CONFIG_MFD_PM8018_CORE is not set +CONFIG_MFD_PM8038_CORE=y +CONFIG_MFD_PM8XXX_IRQ=y +CONFIG_MFD_PM8821_IRQ=y +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_AAT2870_CORE is not set +CONFIG_MFD_PM8XXX_DEBUG=y +CONFIG_MFD_PM8XXX_PWM=y +CONFIG_MFD_PM8XXX_MISC=y +CONFIG_MFD_PM8XXX_SPK=y +CONFIG_MFD_PM8XXX_BATT_ALARM=y +CONFIG_WCD9304_CODEC=y +CONFIG_WCD9310_CODEC=y +# CONFIG_WCD9320_CODEC is not set +# CONFIG_MFD_RC5T583 is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_REGULATOR_PM8XXX=y +CONFIG_REGULATOR_MSM_GPIO=y +# CONFIG_REGULATOR_STUB is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_DEV=y +CONFIG_VIDEO_V4L2_COMMON=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_DVB_CORE is not set +CONFIG_VIDEO_MEDIA=y + +# +# Multimedia drivers +# +# CONFIG_RC_CORE is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set +CONFIG_MSM_CAMERA_V4L2=y + +# +# Camera Sensor Selection +# +# CONFIG_IMX074 is not set +# CONFIG_OV5647 is not set +# CONFIG_MT9M114 is not set +CONFIG_IMX111=y +# CONFIG_IMX111_ACT is not set +CONFIG_SEKONIX_LENS_ACT=y +# CONFIG_IMX091 is not set +# CONFIG_IMX091_ACT is not set +CONFIG_IMX119=y +CONFIG_MSM_CAMERA_FLASH_LM3559=y +# CONFIG_IMX074_ACT is not set +# CONFIG_S5K4E1 is not set +# CONFIG_MSM_CAMERA_FLASH_SC628A is not set +# CONFIG_MSM_CAMERA_FLASH_TPS61310 is not set +# CONFIG_IMX072 is not set +# CONFIG_OV2720 is not set +# CONFIG_OV8825 is not set +CONFIG_MSM_CAMERA_FLASH=y +CONFIG_MSM_CAMERA_SENSOR=y +CONFIG_MSM_ACTUATOR=y +CONFIG_MSM_EEPROM=y +# CONFIG_IMX074_EEPROM is not set +# CONFIG_IMX091_EEPROM is not set +CONFIG_MSM_GEMINI=y +# CONFIG_MSM_MERCURY is not set +# CONFIG_MSM_CAM_IRQ_ROUTER is not set +# CONFIG_MSM_CPP is not set +# CONFIG_MSM_CCI is not set +# CONFIG_QUP_EXCLUSIVE_TO_CAMERA is not set +CONFIG_MSM_CSI20_HEADER=y +# CONFIG_MSM_CSI30_HEADER is not set +# CONFIG_MSM_CSIPHY is not set +# CONFIG_MSM_CSID is not set +# CONFIG_MSM_CSI2_REGISTER is not set +# CONFIG_MSM_ISPIF is not set +# CONFIG_S5K3L1YX is not set +# CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE is not set +# CONFIG_OV7692 is not set +# CONFIG_MSMB_CAMERA is not set +CONFIG_MEDIA_TUNER=y +# CONFIG_MEDIA_TUNER_CUSTOMISE is not set +CONFIG_MEDIA_TUNER_SIMPLE=y +CONFIG_MEDIA_TUNER_TDA8290=y +CONFIG_MEDIA_TUNER_TDA827X=y +CONFIG_MEDIA_TUNER_TDA18271=y +CONFIG_MEDIA_TUNER_TDA9887=y +CONFIG_MEDIA_TUNER_TEA5761=y +CONFIG_MEDIA_TUNER_TEA5767=y +CONFIG_MEDIA_TUNER_MT20XX=y +CONFIG_MEDIA_TUNER_XC2028=y +CONFIG_MEDIA_TUNER_XC5000=y +CONFIG_MEDIA_TUNER_XC4000=y +CONFIG_MEDIA_TUNER_MC44S803=y +CONFIG_VIDEO_V4L2=y +CONFIG_VIDEOBUF2_CORE=y +CONFIG_VIDEOBUF2_MEMOPS=y +CONFIG_VIDEOBUF2_DMA_CONTIG=y +CONFIG_VIDEOBUF2_VMALLOC=y +CONFIG_VIDEOBUF2_DMA_SG=y +CONFIG_VIDEOBUF2_MSM_MEM=y +CONFIG_VIDEO_CAPTURE_DRIVERS=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_HELPER_CHIPS_AUTO=y + +# +# Audio decoders, processors and mixers +# + +# +# RDS decoders +# + +# +# Video decoders +# + +# +# Video and audio decoders +# + +# +# MPEG video encoders +# + +# +# Video encoders +# + +# +# Camera sensor devices +# + +# +# Flash devices +# + +# +# Video improvement chips +# + +# +# Miscelaneous helper chips +# +# CONFIG_MSM_VCAP is not set +CONFIG_V4L_USB_DRIVERS=y +CONFIG_USB_VIDEO_CLASS=y +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +# CONFIG_USB_GSPCA is not set +# CONFIG_VIDEO_PVRUSB2 is not set +# CONFIG_VIDEO_HDPVR is not set +# CONFIG_VIDEO_EM28XX is not set +# CONFIG_VIDEO_USBVISION is not set +# CONFIG_USB_ET61X251 is not set +# CONFIG_USB_SN9C102 is not set +# CONFIG_USB_PWC is not set +# CONFIG_VIDEO_CPIA2 is not set +# CONFIG_USB_ZR364XX is not set +# CONFIG_USB_STKWEBCAM is not set +# CONFIG_USB_S2255 is not set +CONFIG_V4L_PLATFORM_DRIVERS=y +# CONFIG_SOC_CAMERA is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +CONFIG_MSM_WFD=y +CONFIG_RADIO_ADAPTERS=y +# CONFIG_RADIO_SI470X is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_I2C_SI4713 is not set +# CONFIG_RADIO_SI4713 is not set +# CONFIG_USB_KEENE is not set +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set + +# +# Texas Instruments WL128x FM driver (ST based) +# +# CONFIG_RADIO_WL128X is not set +# CONFIG_RADIO_IRIS is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +CONFIG_MSM_KGSL_2D=y +CONFIG_KGSL_PER_PROCESS_PAGE_TABLE=y +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_PAGE_TABLE_COUNT=24 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_UVESA is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_TMIO is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +CONFIG_FB_VIRTUAL=y +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_MSM_VIDC=y +CONFIG_MSM_VIDC_1080P=y +CONFIG_MSM_VIDC_VENC=y +CONFIG_MSM_VIDC_VDEC=y +# CONFIG_MSM_VIDC_CONTENT_PROTECTION is not set +CONFIG_FB_MSM=y +# CONFIG_FB_MSM_BACKLIGHT is not set +CONFIG_FB_MSM_LOGO=y +CONFIG_FB_MSM_LCDC_HW=y +CONFIG_FB_MSM_TRIPLE_BUFFER=y +CONFIG_FB_MSM_MDP_HW=y +# CONFIG_FB_MSM_MDP22 is not set +# CONFIG_FB_MSM_MDP30 is not set +# CONFIG_FB_MSM_MDP31 is not set +CONFIG_FB_MSM_MDP40=y +# CONFIG_FB_MSM_MDSS is not set +# CONFIG_FB_MSM_MDP_NONE is not set +# CONFIG_FB_MSM_EBI2 is not set +# CONFIG_FB_MSM_MDDI is not set +CONFIG_FB_MSM_MIPI_DSI=y +# CONFIG_FB_MSM_LCDC is not set +# CONFIG_FB_MSM_LVDS is not set +CONFIG_FB_MSM_OVERLAY=y +CONFIG_FB_MSM_DTV=y +# CONFIG_FB_MSM_EXTMDDI is not set +# CONFIG_FB_MSM_TVOUT is not set +# CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON is not set +# CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON_VGA is not set +# CONFIG_FB_MSM_MDDI_ORISE is not set +# CONFIG_FB_MSM_MDDI_QUICKVX is not set +# CONFIG_FB_MSM_MDDI_AUTO_DETECT is not set +# CONFIG_FB_MSM_LCDC_AUTO_DETECT is not set +# CONFIG_FB_MSM_LCDC_PANEL is not set +# CONFIG_FB_MSM_MIPI_DSI_TOSHIBA is not set +CONFIG_FB_MSM_MIPI_DSI_LGIT=y +# CONFIG_FB_MSM_MIPI_DSI_RENESAS is not set +# CONFIG_FB_MSM_MIPI_DSI_SIMULATOR is not set +# CONFIG_FB_MSM_MIPI_DSI_NOVATEK is not set +# CONFIG_FB_MSM_MIPI_DSI_LG is not set +# CONFIG_FB_MSM_MIPI_DSI_JDI is not set +# CONFIG_FB_MSM_MIPI_DSI_ORISE is not set +# CONFIG_FB_MSM_LCDC_ST15_WXGA is not set +# CONFIG_FB_MSM_LCDC_ST15_PANEL is not set +# CONFIG_FB_MSM_LCDC_PRISM_WVGA is not set +# CONFIG_FB_MSM_LCDC_SAMSUNG_WSVGA is not set +# CONFIG_FB_MSM_LCDC_CHIMEI_WXGA is not set +# CONFIG_FB_MSM_LCDC_GORDON_VGA is not set +# CONFIG_FB_MSM_LCDC_TOSHIBA_WVGA_PT is not set +# CONFIG_FB_MSM_LCDC_TOSHIBA_FWVGA_PT is not set +# CONFIG_FB_MSM_LCDC_SHARP_WVGA_PT is not set +# CONFIG_FB_MSM_LCDC_AUO_WVGA is not set +# CONFIG_FB_MSM_LCDC_TRULY_HVGA_IPS3P2335 is not set +# CONFIG_FB_MSM_LCDC_TRULY_HVGA_IPS3P2335_PT_PANEL is not set +# CONFIG_FB_MSM_LCDC_SAMSUNG_OLED_PT is not set +# CONFIG_FB_MSM_LCDC_NT35582_WVGA is not set +# CONFIG_FB_MSM_LCDC_WXGA is not set +CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT=y +# CONFIG_FB_MSM_LVDS_CHIMEI_WXGA is not set +# CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT is not set +# CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WSVGA_PT is not set +# CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WUXGA is not set +# CONFIG_FB_MSM_MIPI_NOVATEK_VIDEO_QHD_PT is not set +# CONFIG_FB_MSM_MIPI_NOVATEK_CMD_QHD_PT is not set +# CONFIG_FB_MSM_MIPI_NOVATEK_1080_HD_PT is not set +# CONFIG_FB_MSM_MIPI_LG_1080_HD_PT is not set +# CONFIG_FB_MSM_MIPI_JDI_1080_HD_PT is not set +# CONFIG_FB_MSM_MIPI_ORISE_VIDEO_720P_PT is not set +# CONFIG_FB_MSM_MIPI_ORISE_CMD_720P_PT is not set +# CONFIG_FB_MSM_MIPI_RENESAS_VIDEO_FWVGA_PT is not set +# CONFIG_FB_MSM_MIPI_RENESAS_CMD_FWVGA_PT is not set +# CONFIG_FB_MSM_MIPI_NT35510_VIDEO_WVGA_PT is not set +# CONFIG_FB_MSM_MIPI_NT35510_CMD_WVGA_PT is not set +# CONFIG_FB_MSM_MIPI_NT35516_VIDEO_QHD_PT is not set +# CONFIG_FB_MSM_MIPI_NT35516_CMD_QHD_PT is not set +# CONFIG_FB_MSM_MIPI_CHIMEI_WXGA is not set +# CONFIG_FB_MSM_MIPI_CHIMEI_WUXGA is not set +# CONFIG_FB_MSM_MIPI_SIMULATOR_VIDEO is not set +CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y +CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y +CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y +CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y +# CONFIG_FB_MSM_MIPI_NOVATEK_VIDEO_MODE is not set +# CONFIG_FB_MSM_MIPI_LG_VIDEO_MODE is not set +# CONFIG_FB_MSM_MIPI_JDI_CMD_MODE is not set +CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT_PANEL=y +CONFIG_LGIT_VIDEO_WXGA_CABC=y +CONFIG_FB_MSM_EXT_INTERFACE_COMMON=y +CONFIG_FB_MSM_HDMI_COMMON=y +CONFIG_FB_MSM_HDMI_3D=y +# CONFIG_FB_MSM_HDMI_ADV7520_PANEL is not set +CONFIG_FB_MSM_HDMI_MSM_PANEL=y +# CONFIG_FB_MSM_HDMI_MSM_PANEL_DVI_SUPPORT is not set +# CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT is not set +# CONFIG_FB_MSM_HDMI_MHL_9244 is not set +# CONFIG_FB_MSM_HDMI_MHL_8334 is not set +# CONFIG_FB_MSM_TVOUT_NTSC_M is not set +# CONFIG_FB_MSM_TVOUT_NTSC_J is not set +# CONFIG_FB_MSM_TVOUT_PAL_BDGHIN is not set +# CONFIG_FB_MSM_TVOUT_PAL_M is not set +# CONFIG_FB_MSM_TVOUT_PAL_N is not set +CONFIG_FB_MSM_TVOUT_NONE=y +# CONFIG_FB_MSM_DEFAULT_DEPTH_RGB565 is not set +# CONFIG_FB_MSM_DEFAULT_DEPTH_ARGB8888 is not set +CONFIG_FB_MSM_DEFAULT_DEPTH_RGBA8888=y +# CONFIG_FB_MSM_EBI2_EPSON_S1D_QVGA_PANEL is not set +# CONFIG_FB_MSM_EBI2_PANEL_DETECT is not set +# CONFIG_EXYNOS_VIDEO is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=y +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LP855X is not set +CONFIG_BACKLIGHT_LM3530=y +# CONFIG_BACKLIGHT_LM3533 is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set +CONFIG_SOUND=y +# CONFIG_SOUND_OSS_CORE is not set +CONFIG_SND=y +CONFIG_SND_TIMER=y +CONFIG_SND_PCM=y +CONFIG_SND_HWDEP=y +CONFIG_SND_RAWMIDI=y +CONFIG_SND_COMPRESS_OFFLOAD=y +CONFIG_SND_JACK=y +# CONFIG_SND_SEQUENCER is not set +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +# CONFIG_SND_HRTIMER is not set +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_ALOOP is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +# CONFIG_SND_ARM is not set +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=y +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_6FIRE is not set +CONFIG_SND_SOC=y + +# +# MSM SoC Audio support +# +CONFIG_SND_SOC_MSM_HOSTLESS_PCM=y +CONFIG_SND_SOC_MSM_QDSP6_HDMI_AUDIO=y +CONFIG_SND_SOC_MSM_QDSP6_INTF=y +# CONFIG_SND_SOC_MSM_QDSP6V2_INTF is not set +CONFIG_SND_SOC_VOICE=y +CONFIG_SND_SOC_QDSP6=y +# CONFIG_SND_SOC_QDSP6V2 is not set +CONFIG_SND_SOC_MSM8960=y +CONFIG_SND_SOC_DUAL_AMIC=y +# CONFIG_ASUSTEK_HEADSET is not set +CONFIG_SND_SOC_I2C_AND_SPI=y +# CONFIG_SND_SOC_ALL_CODECS is not set +CONFIG_SND_SOC_WCD9304=y +CONFIG_SND_SOC_WCD9310=y +CONFIG_SND_SOC_MSM_STUB=y +CONFIG_SND_SOC_TPA2028D=y +# CONFIG_SOUND_PRIME is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +CONFIG_UHID=y + +# +# USB Input Devices +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +CONFIG_USB_HIDDEV=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_PRODIKEYS is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LOGITECH is not set +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB_ARCH_HAS_XHCI is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +# CONFIG_USB_DEBUG is not set +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +# CONFIG_USB_DEVICEFS is not set +CONFIG_USB_DEVICE_CLASS=y +# CONFIG_USB_DYNAMIC_MINORS is not set +CONFIG_USB_SUSPEND=y +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_MON is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_EHSET=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_MSM=y +CONFIG_USB_EHCI_MSM_HSIC=y +# CONFIG_USB_EHCI_MSM_HOST4 is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1760_HCD is not set +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_PEHCI_HCD is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_RENESAS_USBHS is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=y +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DEBUG=y +# CONFIG_USB_STORAGE_REALTEK is not set +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +# CONFIG_USB_EZUSB is not set +# CONFIG_USB_SERIAL_GENERIC is not set +# CONFIG_USB_SERIAL_AIRCABLE is not set +# CONFIG_USB_SERIAL_ARK3116 is not set +# CONFIG_USB_SERIAL_BELKIN is not set +# CONFIG_USB_SERIAL_CH341 is not set +# CONFIG_USB_SERIAL_WHITEHEAT is not set +# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set +# CONFIG_USB_SERIAL_CP210X is not set +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +# CONFIG_USB_SERIAL_EMPEG is not set +# CONFIG_USB_SERIAL_FTDI_SIO is not set +# CONFIG_USB_SERIAL_FUNSOFT is not set +# CONFIG_USB_SERIAL_VISOR is not set +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_GARMIN is not set +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_IUU is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +# CONFIG_USB_SERIAL_MCT_U232 is not set +# CONFIG_USB_SERIAL_METRO is not set +# CONFIG_USB_SERIAL_MOS7720 is not set +# CONFIG_USB_SERIAL_MOS7840 is not set +# CONFIG_USB_SERIAL_MOTOROLA is not set +# CONFIG_USB_SERIAL_NAVMAN is not set +# CONFIG_USB_SERIAL_PL2303 is not set +# CONFIG_USB_SERIAL_OTI6858 is not set +# CONFIG_USB_SERIAL_QCAUX is not set +# CONFIG_USB_SERIAL_QUALCOMM is not set +# CONFIG_USB_SERIAL_SPCP8X5 is not set +# CONFIG_USB_SERIAL_HP4X is not set +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_SIEMENS_MPI is not set +# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set +# CONFIG_USB_SERIAL_SYMBOL is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +# CONFIG_USB_SERIAL_XIRCOM is not set +# CONFIG_USB_SERIAL_OPTION is not set +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set +# CONFIG_USB_SERIAL_ZIO is not set +# CONFIG_USB_SERIAL_SSU100 is not set +CONFIG_USB_SERIAL_CSVT=y +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +CONFIG_USB_EHSET_TEST_FIXTURE=y +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +CONFIG_USB_QCOM_DIAG_BRIDGE=y +# CONFIG_USB_QCOM_DIAG_BRIDGE_TEST is not set +CONFIG_USB_QCOM_MDM_BRIDGE=y +CONFIG_USB_QCOM_KS_BRIDGE=y +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +CONFIG_USB_GADGET_DEBUG_FILES=y +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_FUSB300 is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_NET2272 is not set +CONFIG_USB_CI13XXX_MSM=y +# CONFIG_USB_CI13XXX_MSM_HSIC is not set +# CONFIG_USB_DWC3_MSM is not set +# CONFIG_USB_MSM_72K is not set +# CONFIG_USB_DUMMY_HCD is not set +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_GADGET_SUPERSPEED is not set +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_G_ANDROID=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_MSC_PROFILING is not set +CONFIG_MODEM_SUPPORT=y +CONFIG_RMNET_SMD_CTL_CHANNEL="" +CONFIG_RMNET_SMD_DATA_CHANNEL="" +# CONFIG_USB_ANDROID_CDC_ECM is not set + +# +# OTG and related infrastructure +# +CONFIG_USB_OTG_UTILS=y +# CONFIG_USB_OTG_WAKELOCK is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_USB_MSM_OTG_72K is not set +# CONFIG_NOP_USB_XCEIV is not set +CONFIG_USB_MSM_OTG=y +# CONFIG_USB_MSM_ACA is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_UNSAFE_RESUME=y +CONFIG_MMC_CLKGATE=y +# CONFIG_MMC_EMBEDDED_SDIO is not set +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +# CONFIG_MMC_SECDISCARD is not set +CONFIG_MMC_BLOCK_MINORS=32 +# CONFIG_MMC_BLOCK_BOUNCE is not set +# CONFIG_MMC_BLOCK_DEFERRED_RESUME is not set +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_SDHCI_PXAV3 is not set +# CONFIG_MMC_SDHCI_PXAV2 is not set +CONFIG_MMC_MSM=y +CONFIG_MMC_MSM_SDC1_SUPPORT=y +CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y +# CONFIG_MMC_MSM_SDC2_SUPPORT is not set +# CONFIG_MMC_MSM_SDC3_SUPPORT is not set +# CONFIG_MMC_MSM_SDC3_POLLING is not set +# CONFIG_MMC_MSM_SDC4_SUPPORT is not set +# CONFIG_MMC_MSM_SDC5_SUPPORT is not set +CONFIG_MMC_MSM_SPS_SUPPORT=y +# CONFIG_MMC_DW is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_LEDS_MSM_PDM is not set +# CONFIG_LEDS_PMIC_MPP is not set +# CONFIG_LEDS_MSM_TRICOLOR is not set +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_CPLD is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_PCA955X is not set +CONFIG_LEDS_PM8XXX=y +# CONFIG_LEDS_PCA9633 is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_MSM_PMIC is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_RENESAS_TPU is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_OT200 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +# CONFIG_LEDS_TRIGGER_TIMER is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +# CONFIG_LEDS_TRIGGER_SLEEP is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_SWITCH=y +# CONFIG_SWITCH_GPIO is not set +CONFIG_SWITCH_FSA8008=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_MSM is not set +# CONFIG_RTC_DRV_MSM7X00A is not set +CONFIG_RTC_DRV_PM8XXX=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set + +# +# Virtio drivers +# +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_STAGING=y +# CONFIG_USBIP_CORE is not set +# CONFIG_PRISM2_USB is not set +# CONFIG_ECHO is not set +# CONFIG_ASUS_OLED is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5139 is not set +# CONFIG_TRANZPORT is not set +# CONFIG_LINE6_USB is not set +# CONFIG_USB_SERIAL_QUATECH2 is not set +# CONFIG_USB_SERIAL_QUATECH_USB2 is not set +# CONFIG_IIO is not set +CONFIG_QCACHE=y +# CONFIG_ZSMALLOC is not set +# CONFIG_FB_SM7XX is not set +# CONFIG_BCM_WIMAX is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_PERSISTENT_RAM=y +CONFIG_ANDROID_RAM_CONSOLE=y +# CONFIG_PERSISTENT_TRACER is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y +# CONFIG_ANDROID_SWITCH is not set +# CONFIG_ANDROID_INTF_ALARM_DEV is not set +# CONFIG_PHONE is not set +# CONFIG_USB_WPAN_HCD is not set + +# +# Qualcomm Atheros Prima WLAN module +# +CONFIG_PRIMA_WLAN=y +# CONFIG_PRIMA_WLAN_BTAMP is not set +CONFIG_PRIMA_WLAN_LFR=y +CONFIG_PRIMA_WLAN_OKC=y +# CONFIG_PRIMA_WLAN_11AC_HIGH_TP is not set +# CONFIG_QCOM_VOWIFI_11R is not set + +# +# Qualcomm MSM specific device drivers +# +CONFIG_MSM_SSBI=y +CONFIG_SPS=y +# CONFIG_USB_BAM is not set +CONFIG_SPS_SUPPORT_BAMDMA=y +# CONFIG_SPS_SUPPORT_NDP_BAM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y + +# +# Hardware Spinlock drivers +# +CONFIG_IOMMU_SUPPORT=y +CONFIG_MSM_IOMMU=y +CONFIG_MSM_IOMMU_GPU_SYNC=y +CONFIG_IOMMU_PGTABLES_L2=y + +# +# Remoteproc drivers (EXPERIMENTAL) +# + +# +# Rpmsg drivers (EXPERIMENTAL) +# +# CONFIG_VIRT_DRIVERS is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_MOBICORE_SUPPORT is not set +# CONFIG_CORESIGHT is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +# CONFIG_EXT2_FS_POSIX_ACL is not set +# CONFIG_EXT2_FS_SECURITY is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_TMPFS_XATTR is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +# CONFIG_CIFS_WEAK_PW_HASH is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHEDSTATS is not set +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_HIGHMEM is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_CPU_STALL_VERBOSE=y +# CONFIG_RCU_CPU_STALL_INFO is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_LKDTM is not set +# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +# CONFIG_SCHED_TRACER is not set +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_CPU_FREQ_SWITCH_PROFILER is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_PANIC_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +CONFIG_ARM_UNWIND=y +CONFIG_DEBUG_USER=y +# CONFIG_DEBUG_LL is not set +CONFIG_PID_IN_CONTEXTIDR=y + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_NETWORK_XFRM is not set +# CONFIG_SECURITY_PATH is not set +CONFIG_LSM_MMAP_MIN_ADDR=4096 +CONFIG_SECURITY_SELINUX=y +# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set +# CONFIG_SECURITY_SELINUX_DISABLE is not set +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_IMA is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_HW is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_AUDIT_GENERIC=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_NLATTR=y +# CONFIG_AVERAGE is not set +# CONFIG_CORDIC is not set diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c index 684d283c70702..73d81e75136c0 100644 --- a/drivers/char/msm_rotator.c +++ b/drivers/char/msm_rotator.c @@ -615,6 +615,7 @@ static int get_bpp(int format) case MDP_YCRCB_H1V1: return 3; + case MDP_YCBYCR_H2V1: case MDP_YCRYCB_H2V1: return 2;/* YCrYCb interleave */ @@ -658,6 +659,7 @@ static int msm_rotator_get_plane_sizes(uint32_t format, uint32_t w, uint32_t h, case MDP_RGB_888: case MDP_RGB_565: case MDP_BGR_565: + case MDP_YCBYCR_H2V1: case MDP_YCRYCB_H2V1: case MDP_YCBCR_H1V1: case MDP_YCRCB_H1V1: @@ -1028,7 +1030,7 @@ static int msm_rotator_ycxcx_h2v2(struct msm_rotator_img_info *info, return 0; } -static int msm_rotator_ycrycb(struct msm_rotator_img_info *info, +static int msm_rotator_ycxycx(struct msm_rotator_img_info *info, unsigned int in_paddr, unsigned int out_paddr, unsigned int use_imem, @@ -1038,13 +1040,22 @@ static int msm_rotator_ycrycb(struct msm_rotator_img_info *info, int bpp; uint32_t dst_format; - if (info->src.format == MDP_YCRYCB_H2V1) { + switch (info->src.format) { + case MDP_YCBYCR_H2V1: + if (info->rotations & MDP_ROT_90) + dst_format = MDP_Y_CBCR_H1V2; + else + dst_format = MDP_Y_CBCR_H2V1; + break; + case MDP_YCRYCB_H2V1: if (info->rotations & MDP_ROT_90) dst_format = MDP_Y_CRCB_H1V2; else dst_format = MDP_Y_CRCB_H2V1; - } else + break; + default: return -EINVAL; + } if (info->dst.format != dst_format) return -EINVAL; @@ -1073,10 +1084,18 @@ static int msm_rotator_ycrycb(struct msm_rotator_img_info *info, (info->dst.width) << 16, MSM_ROTATOR_OUT_YSTRIDE1); - iowrite32(GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8), - MSM_ROTATOR_SRC_UNPACK_PATTERN1); - iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8), - MSM_ROTATOR_OUT_PACK_PATTERN1); + if (dst_format == MDP_Y_CBCR_H1V2 || + dst_format == MDP_Y_CBCR_H2V1) { + iowrite32(GET_PACK_PATTERN(0, CLR_CB, 0, CLR_CR, 8), + MSM_ROTATOR_SRC_UNPACK_PATTERN1); + iowrite32(GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8), + MSM_ROTATOR_OUT_PACK_PATTERN1); + } else { + iowrite32(GET_PACK_PATTERN(0, CLR_CR, 0, CLR_CB, 8), + MSM_ROTATOR_SRC_UNPACK_PATTERN1); + iowrite32(GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8), + MSM_ROTATOR_OUT_PACK_PATTERN1); + } iowrite32((1 << 18) | /* chroma sampling 1=H2V1 */ (ROTATIONS_TO_BITMASK(info->rotations) << 9) | 1 << 8 | /* ROT_EN */ @@ -1632,8 +1651,9 @@ static int msm_rotator_do_rotate_sub( in_chroma_paddr, out_chroma_paddr); break; + case MDP_YCBYCR_H2V1: case MDP_YCRYCB_H2V1: - rc = msm_rotator_ycrycb(img_info, + rc = msm_rotator_ycxycx(img_info, in_paddr, out_paddr, use_imem, msm_rotator_dev->last_session_idx != s, out_chroma_paddr); @@ -1986,6 +2006,12 @@ static int msm_rotator_start(unsigned long arg, case MDP_YCRCB_H1V1: info.dst.format = info.src.format; break; + case MDP_YCBYCR_H2V1: + if (info.rotations & MDP_ROT_90) + info.dst.format = MDP_Y_CBCR_H1V2; + else + info.dst.format = MDP_Y_CBCR_H2V1; + break; case MDP_YCRYCB_H2V1: if (info.rotations & MDP_ROT_90) info.dst.format = MDP_Y_CRCB_H1V2; diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 57f96ebbce4b6..b337301d2ec52 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -67,6 +67,13 @@ config CPU_FREQ_DEFAULT_GOV_POWERSAVE the frequency statically to the lowest frequency supported by the CPU. +config CPU_FREQ_DEFAULT_GOV_SELADANG + bool "seladang" + select CPU_FREQ_GOV_SELADANG + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'seladang' as default. + config CPU_FREQ_DEFAULT_GOV_USERSPACE bool "userspace" select CPU_FREQ_GOV_USERSPACE @@ -88,6 +95,13 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. +config CPU_FREQ_DEFAULT_GOV_ELEMENTALX + bool "elementalx" + select CPU_FREQ_GOV_ELEMENTALX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'elementalx' as default. + config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE bool "conservative" select CPU_FREQ_GOV_CONSERVATIVE @@ -109,6 +123,20 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE loading your cpufreq low-level hardware driver, using the 'interactive' governor for latency-sensitive workloads. +config CPU_FREQ_DEFAULT_GOV_WHEATLEY + bool "wheatley" + select CPU_FREQ_GOV_WHEATLEY + select CPU_FREQ_GOV_PERFORMANCE + ---help--- + Use the CPUFreq governor 'wheatley' as default. + +config CPU_FREQ_DEFAULT_GOV_LAZY + bool "lazy" + select CPU_FREQ_GOV_LAZY + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lazy' as default. + endchoice config CPU_FREQ_GOV_PERFORMANCE @@ -133,6 +161,10 @@ config CPU_FREQ_GOV_POWERSAVE If in doubt, say Y. +config CPU_FREQ_GOV_SELADANG + tristate "'seladang' cpufreq governor" + depends on CPU_FREQ + config CPU_FREQ_GOV_USERSPACE tristate "'userspace' governor for userspace frequency scaling" help @@ -166,6 +198,19 @@ config CPU_FREQ_GOV_ONDEMAND If in doubt, say N. +config CPU_FREQ_GOV_ELEMENTALX + tristate "'elementalx' cpufreq policy governor" + select CPU_FREQ_TABLE + help + 'elementalx' - This driver adds a dynamic cpufreq policy governor. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_elementalx. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + config CPU_FREQ_GOV_INTERACTIVE tristate "'interactive' cpufreq policy governor" help @@ -206,6 +251,10 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. +config CPU_FREQ_GOV_LAZY + tristate "'lazy' cpufreq governor" + depends on CPU_FREQ + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 46756c5184765..cd3a6666fa209 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -6,10 +6,12 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o # CPUfreq governors obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o +obj-$(CONFIG_CPU_FREQ_GOV_SELADANG) += cpufreq_seladang.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o +obj-$(CONFIG_CPU_FREQ_GOV_ELEMENTALX) += cpufreq_elementalx.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_elementalx.c b/drivers/cpufreq/cpufreq_elementalx.c new file mode 100644 index 0000000000000..0958bb2048ce4 --- /dev/null +++ b/drivers/cpufreq/cpufreq_elementalx.c @@ -0,0 +1,1780 @@ +/* + * drivers/cpufreq/cpufreq_elementalx.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2013 flar2 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +//gboost +#include +static int orig_up_threshold = 90; + +#define DEF_SAMPLING_RATE (40000) +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1) +#define DBS_INPUT_EVENT_MIN_FREQ (960000) +#define DEF_UI_DYNAMIC_SAMPLING_RATE (15000) +#define DBS_UI_SAMPLING_MIN_TIMEOUT (30) +#define DBS_UI_SAMPLING_MAX_TIMEOUT (1000) +#define DBS_UI_SAMPLING_TIMEOUT (80) +#define DBS_SWITCH_MODE_TIMEOUT (1000) + +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; +static unsigned int skip_elementalx = 0; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +#define POWERSAVE_BIAS_MAXLEVEL (1000) +#define POWERSAVE_BIAS_MINLEVEL (-1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ELEMENTALX +static +#endif +struct cpufreq_governor cpufreq_gov_elementalx = { + .name = "elementalx", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + unsigned int prev_load; + unsigned int max_load; + int input_event_freq; + int cpu; + unsigned int sample_type:1; + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info); +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info); + +static unsigned int dbs_enable; + +static DEFINE_PER_CPU(struct task_struct *, up_task); +static spinlock_t input_boost_lock; +static bool input_event_boost = false; +static unsigned long input_event_boost_expired = 0; + +#ifdef CONFIG_EARLYSUSPEND_BOOST_CPU_SPEED +extern int has_boost_cpu_func; +#endif + +#define TABLE_SIZE 5 +#define MAX(x,y) (x > y ? x : y) +#define MIN(x,y) (x < y ? x : y) +#define FREQ_NEED_BURST(x) (x < 600000 ? 1 : 0) + +static struct cpufreq_frequency_table *tbl = NULL; +static unsigned int *tblmap[TABLE_SIZE] __read_mostly; +static unsigned int tbl_select[4]; +static unsigned int up_threshold_level[2] = {95, 85}; +static int input_event_counter = 0; +struct timer_list freq_mode_timer; + +static inline void switch_turbo_mode(unsigned); +static inline void switch_normal_mode(void); + +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int up_threshold_multi_core; + unsigned int down_differential; + unsigned int down_differential_multi_core; + unsigned int optimal_freq; + unsigned int up_threshold_any_cpu_load; + unsigned int sync_freq; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + int powersave_bias; + unsigned int io_is_busy; + unsigned int two_phase_freq; + unsigned int origin_sampling_rate; + unsigned int ui_sampling_rate; + unsigned int ui_timeout; + unsigned int enable_boost_cpu; + int gboost; +} dbs_tuners_ins = { + .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD, + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL, + .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD, + .ignore_nice = 0, + .powersave_bias = 0, + .sync_freq = 0, + .optimal_freq = 0, + .two_phase_freq = 0, + .ui_sampling_rate = DEF_UI_DYNAMIC_SAMPLING_RATE, + .ui_timeout = DBS_UI_SAMPLING_TIMEOUT, + .enable_boost_cpu = 1, + .gboost = 1, +}; + +bool is_elementalx_locked(void) +{ + if((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) || + (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) + return true; + else + return false; +} +EXPORT_SYMBOL(is_elementalx_locked); + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + int freq_reduc; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static int elementalx_powersave_bias_setspeed(struct cpufreq_policy *policy, + struct cpufreq_policy *altpolicy, + int level) +{ + if (level == POWERSAVE_BIAS_MAXLEVEL) { + + __cpufreq_driver_target(policy, + (altpolicy) ? altpolicy->min : policy->min, + CPUFREQ_RELATION_L); + return 1; + } else if (level == POWERSAVE_BIAS_MINLEVEL) { + + __cpufreq_driver_target(policy, + (altpolicy) ? altpolicy->max : policy->max, + CPUFREQ_RELATION_H); + return 1; + } + return 0; +} + +static void elementalx_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void elementalx_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + elementalx_powersave_bias_init_cpu(i); + } +} + +void elementalx_boost_cpu(int boost) +{ + int cpu; + + if (!dbs_tuners_ins.enable_boost_cpu) + return; + + for_each_online_cpu(cpu) { + struct cpufreq_policy *policy; + struct cpu_dbs_info_s *dbs_info; + + policy = cpufreq_cpu_get(cpu); + if (!policy) + continue; + dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); + cpufreq_cpu_put(policy); + + mutex_lock(&dbs_info->timer_mutex); + if (boost) { + skip_elementalx = 1; + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + } else { + skip_elementalx = 0; + } + mutex_unlock(&dbs_info->timer_mutex); + } +} +EXPORT_SYMBOL(elementalx_boost_cpu); + + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(up_threshold_multi_core, up_threshold_multi_core); +show_one(down_differential, down_differential); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(optimal_freq, optimal_freq); +show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load); +show_one(sync_freq, sync_freq); +show_one(enable_boost_cpu, enable_boost_cpu); +show_one(gboost, gboost); + +static ssize_t show_powersave_bias +(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias); +} + +static void update_sampling_rate(unsigned int new_rate) +{ + int cpu; + + dbs_tuners_ins.sampling_rate = new_rate + = max(new_rate, min_sampling_rate); + + for_each_online_cpu(cpu) { + struct cpufreq_policy *policy; + struct cpu_dbs_info_s *dbs_info; + unsigned long next_sampling, appointed_at; + + policy = cpufreq_cpu_get(cpu); + if (!policy) + continue; + dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); + cpufreq_cpu_put(policy); + + mutex_lock(&dbs_info->timer_mutex); + + if (!delayed_work_pending(&dbs_info->work)) { + mutex_unlock(&dbs_info->timer_mutex); + continue; + } + + next_sampling = jiffies + usecs_to_jiffies(new_rate); + appointed_at = dbs_info->work.timer.expires; + + if (time_before(next_sampling, appointed_at)) { + + mutex_unlock(&dbs_info->timer_mutex); + cancel_delayed_work_sync(&dbs_info->work); + mutex_lock(&dbs_info->timer_mutex); + + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, + usecs_to_jiffies(new_rate)); + + } + mutex_unlock(&dbs_info->timer_mutex); + } +} + +show_one(ui_timeout, ui_timeout); + +static ssize_t store_ui_timeout(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + input = max(input, (unsigned int)DBS_UI_SAMPLING_MIN_TIMEOUT); + dbs_tuners_ins.ui_timeout = min(input, (unsigned int)DBS_UI_SAMPLING_MAX_TIMEOUT); + + return count; +} + +static int two_phase_freq_array[NR_CPUS] = {[0 ... NR_CPUS-1] = 0} ; + +static ssize_t show_two_phase_freq +(struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i = 0 ; + int shift = 0 ; + char *buf_pos = buf; + for ( i = 0 ; i < NR_CPUS; i++) { + shift = sprintf(buf_pos,"%d,",two_phase_freq_array[i]); + buf_pos += shift; + } + *(buf_pos-1) = '\0'; + return strlen(buf); +} + +static ssize_t store_two_phase_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + + int ret = 0; + if (NR_CPUS == 1) + ret = sscanf(buf,"%u",&two_phase_freq_array[0]); + else if (NR_CPUS == 2) + ret = sscanf(buf,"%u,%u",&two_phase_freq_array[0], + &two_phase_freq_array[1]); + else if (NR_CPUS == 4) + ret = sscanf(buf, "%u,%u,%u,%u", &two_phase_freq_array[0], + &two_phase_freq_array[1], + &two_phase_freq_array[2], + &two_phase_freq_array[3]); + if (ret < NR_CPUS) + return -EINVAL; + + return count; +} + +static int input_event_min_freq_array[NR_CPUS] = {[0 ... NR_CPUS-1] = DBS_INPUT_EVENT_MIN_FREQ} ; + +static ssize_t show_input_event_min_freq +(struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i = 0 ; + int shift = 0 ; + char *buf_pos = buf; + for ( i = 0 ; i < NR_CPUS; i++) { + shift = sprintf(buf_pos,"%d,",input_event_min_freq_array[i]); + buf_pos += shift; + } + *(buf_pos-1) = '\0'; + return strlen(buf); +} + +static ssize_t store_input_event_min_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + + int ret = 0; + if (NR_CPUS == 1) + ret = sscanf(buf,"%u",&input_event_min_freq_array[0]); + else if (NR_CPUS == 2) + ret = sscanf(buf,"%u,%u",&input_event_min_freq_array[0], + &input_event_min_freq_array[1]); + else if (NR_CPUS == 4) + ret = sscanf(buf, "%u,%u,%u,%u", &input_event_min_freq_array[0], + &input_event_min_freq_array[1], + &input_event_min_freq_array[2], + &input_event_min_freq_array[3]); + if (ret < NR_CPUS) + return -EINVAL; + + return count; +} + +show_one(ui_sampling_rate, ui_sampling_rate); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input == dbs_tuners_ins.origin_sampling_rate) + return count; + update_sampling_rate(input); + dbs_tuners_ins.origin_sampling_rate = dbs_tuners_ins.sampling_rate; + return count; +} + +static ssize_t store_ui_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.ui_sampling_rate = max(input, min_sampling_rate); + + return count; +} + +static ssize_t store_sync_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sync_freq = input; + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.optimal_freq = input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_up_threshold_multi_core(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold_multi_core = input; + return count; +} + +static ssize_t store_up_threshold_any_cpu_load(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold_any_cpu_load = input; + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input >= dbs_tuners_ins.up_threshold || + input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) { + return -EINVAL; + } + + dbs_tuners_ins.down_differential = input; + + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { + return count; + } + dbs_tuners_ins.ignore_nice = input; + + + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + int input = 0; + int bypass = 0; + int ret, cpu, reenable_timer, j; + struct cpu_dbs_info_s *dbs_info; + + struct cpumask cpus_timer_done; + cpumask_clear(&cpus_timer_done); + + ret = sscanf(buf, "%d", &input); + + if (ret != 1) + return -EINVAL; + + if (input >= POWERSAVE_BIAS_MAXLEVEL) { + input = POWERSAVE_BIAS_MAXLEVEL; + bypass = 1; + } else if (input <= POWERSAVE_BIAS_MINLEVEL) { + input = POWERSAVE_BIAS_MINLEVEL; + bypass = 1; + } + + if (input == dbs_tuners_ins.powersave_bias) { + + return count; + } + + reenable_timer = ((dbs_tuners_ins.powersave_bias == + POWERSAVE_BIAS_MAXLEVEL) || + (dbs_tuners_ins.powersave_bias == + POWERSAVE_BIAS_MINLEVEL)); + + dbs_tuners_ins.powersave_bias = input; + if (!bypass) { + if (reenable_timer) { + + for_each_online_cpu(cpu) { + if (lock_policy_rwsem_write(cpu) < 0) + continue; + + dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + for_each_cpu(j, &cpus_timer_done) { + if (!dbs_info->cur_policy) { + pr_err("Dbs policy is NULL\n"); + goto skip_this_cpu; + } + if (cpumask_test_cpu(j, dbs_info-> + cur_policy->cpus)) + goto skip_this_cpu; + } + + cpumask_set_cpu(cpu, &cpus_timer_done); + if (dbs_info->cur_policy) { + + dbs_timer_init(dbs_info); + } +skip_this_cpu: + unlock_policy_rwsem_write(cpu); + } + } + elementalx_powersave_bias_init(); + } else { + for_each_online_cpu(cpu) { + if (lock_policy_rwsem_write(cpu) < 0) + continue; + + dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + for_each_cpu(j, &cpus_timer_done) { + if (!dbs_info->cur_policy) { + pr_err("Dbs policy is NULL\n"); + goto skip_this_cpu_bypass; + } + if (cpumask_test_cpu(j, dbs_info-> + cur_policy->cpus)) + goto skip_this_cpu_bypass; + } + + cpumask_set_cpu(cpu, &cpus_timer_done); + + if (dbs_info->cur_policy) { + + mutex_lock(&dbs_info->timer_mutex); + dbs_timer_exit(dbs_info); + + elementalx_powersave_bias_setspeed( + dbs_info->cur_policy, + NULL, + input); + + mutex_unlock(&dbs_info->timer_mutex); + } +skip_this_cpu_bypass: + unlock_policy_rwsem_write(cpu); + } + } + + return count; +} + +static ssize_t store_enable_boost_cpu(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if(ret != 1) + return -EINVAL; + + dbs_tuners_ins.enable_boost_cpu = (input > 0 ? input : 0); +#ifdef CONFIG_EARLYSUSPEND_BOOST_CPU_SPEED + has_boost_cpu_func = (unsigned int) dbs_tuners_ins.enable_boost_cpu; +#endif + return count; +} + +static ssize_t store_gboost(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if(ret != 1) + return -EINVAL; + dbs_tuners_ins.gboost = (input > 0 ? input : 0); + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(down_differential); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +define_one_global_rw(up_threshold_multi_core); +define_one_global_rw(optimal_freq); +define_one_global_rw(up_threshold_any_cpu_load); +define_one_global_rw(sync_freq); +define_one_global_rw(two_phase_freq); +define_one_global_rw(input_event_min_freq); +define_one_global_rw(ui_sampling_rate); +define_one_global_rw(ui_timeout); +define_one_global_rw(enable_boost_cpu); +define_one_global_rw(gboost); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &down_differential.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, + &up_threshold_multi_core.attr, + &optimal_freq.attr, + &up_threshold_any_cpu_load.attr, + &sync_freq.attr, + &two_phase_freq.attr, + &input_event_min_freq.attr, + &ui_sampling_rate.attr, + &ui_timeout.attr, + &enable_boost_cpu.attr, + &gboost.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "elementalx", +}; + + +static inline void switch_turbo_mode(unsigned timeout) +{ + if (timeout > 0) + mod_timer(&freq_mode_timer, jiffies + msecs_to_jiffies(timeout)); + + tbl_select[0] = 2; + tbl_select[1] = 3; + tbl_select[2] = 4; + tbl_select[3] = 4; +} + +static inline void switch_normal_mode(void) +{ + if (input_event_counter > 0) + return; + + tbl_select[0] = 0; + tbl_select[1] = 1; + tbl_select[2] = 2; + tbl_select[3] = 3; +} + +static void switch_mode_timer(unsigned long data) +{ + switch_normal_mode(); +} + +static void dbs_init_freq_map_table(struct cpufreq_policy *policy) +{ + unsigned int min_diff, top1, top2; + int cnt, i, j; + + tbl = cpufreq_frequency_get_table(0); + min_diff = policy->cpuinfo.max_freq; + + + for (cnt = 0; (tbl[cnt].frequency != CPUFREQ_TABLE_END); cnt++) { + if (cnt > 0) + min_diff = MIN(tbl[cnt].frequency - tbl[cnt-1].frequency, min_diff); + } + + + top1 = (policy->cpuinfo.max_freq + policy->cpuinfo.min_freq) / 2; + top2 = (policy->cpuinfo.max_freq + top1) / 2; + + for (i = 0; i < TABLE_SIZE; i++) { + + tblmap[i] = kmalloc(sizeof(unsigned int) * cnt, GFP_KERNEL); + BUG_ON(!tblmap[i]); + + for (j = 0; j < cnt; j++) + tblmap[i][j] = tbl[j].frequency; + } + + for (j = 0; j < cnt; j++) { + + if (tbl[j].frequency < top1) { + tblmap[0][j] += MAX((top1 - tbl[j].frequency)/3, min_diff); + } + + if (tbl[j].frequency < top2) { + tblmap[1][j] += MAX((top2 - tbl[j].frequency)/3, min_diff); + tblmap[2][j] += MAX(((top2 - tbl[j].frequency)*2)/5, min_diff); + tblmap[3][j] += MAX((top2 - tbl[j].frequency)/2, min_diff); + } + else { + tblmap[3][j] += MAX((policy->cpuinfo.max_freq - tbl[j].frequency)/3, min_diff); + } + + tblmap[4][j] += MAX((policy->cpuinfo.max_freq - tbl[j].frequency)/2, min_diff); + } + + switch_normal_mode(); + + + init_timer(&freq_mode_timer); + freq_mode_timer.function = switch_mode_timer; + freq_mode_timer.data = 0; + +#if 0 + + for (i = 0; i < TABLE_SIZE; i++) { + pr_info("Table %d shows:\n", i+1); + for (j = 0; j < cnt; j++) { + pr_info("%02d: %8u\n", j, tblmap[i][j]); + } + } +#endif +} + +static void dbs_deinit_freq_map_table(void) +{ + int i; + + if (!tbl) + return; + + tbl = NULL; + + for (i = 0; i < TABLE_SIZE; i++) + kfree(tblmap[i]); + + del_timer(&freq_mode_timer); +} + +static inline int get_cpu_freq_index(unsigned int freq) +{ + static int saved_index = 0; + int index; + + if (!tbl) { + pr_warn("tbl is NULL, use previous value %d\n", saved_index); + return saved_index; + } + + for (index = 0; (tbl[index].frequency != CPUFREQ_TABLE_END); index++) { + if (tbl[index].frequency >= freq) { + saved_index = index; + break; + } + } + + return index; +} + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned load, unsigned int freq) +{ + if (dbs_tuners_ins.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) { + trace_cpufreq_interactive_already (p->cpu, load, p->cur, p->cur, p->cur); + return; + } + + trace_cpufreq_interactive_target (p->cpu, load, p->cur, p->cur, freq); + + __cpufreq_driver_target(p, freq, (dbs_tuners_ins.powersave_bias || freq < p->max) ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); + + trace_cpufreq_interactive_up (p->cpu, freq, p->cur); +} + +int set_two_phase_freq(int cpufreq) +{ + int i = 0; + for ( i = 0 ; i < NR_CPUS; i++) + two_phase_freq_array[i] = cpufreq; + return 0; +} + +void set_two_phase_freq_by_cpu ( int cpu_nr, int cpufreq){ + two_phase_freq_array[cpu_nr-1] = cpufreq; +} + +int input_event_boosted(void) +{ + unsigned long flags; + + + spin_lock_irqsave(&input_boost_lock, flags); + if (input_event_boost) { + if (time_before(jiffies, input_event_boost_expired)) { + spin_unlock_irqrestore(&input_boost_lock, flags); + return 1; + } + input_event_boost = false; + dbs_tuners_ins.sampling_rate = dbs_tuners_ins.origin_sampling_rate; + } + spin_unlock_irqrestore(&input_boost_lock, flags); + + return 0; +} + +static unsigned int get_cpu_current_load(unsigned int j, unsigned int *record) +{ + + unsigned int cur_load = 0; + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + if (record) + *record = j_dbs_info->prev_load; + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) + (cur_iowait_time - j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + u64 cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + return j_dbs_info->prev_load; + + cur_load = 100 * (wall_time - idle_time) / wall_time; + j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load); + j_dbs_info->prev_load = cur_load; + + return cur_load; +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + + unsigned int load_at_max_freq = 0; + unsigned int max_load_freq; + + unsigned int cur_load = 0; + + unsigned int max_load_other_cpu = 0; + struct cpufreq_policy *policy; + unsigned int j, prev_load = 0, freq_next; + + static unsigned int phase = 0; + static unsigned int counter = 0; + unsigned int nr_cpus; + + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + + + + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + unsigned int load_freq; + int freq_avg; + + cur_load = get_cpu_current_load(j, &prev_load); + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = cur_load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + + + load_at_max_freq += (cur_load * policy->cur) / + policy->cpuinfo.max_freq; + } + + for_each_online_cpu(j) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + if (j == policy->cpu) + continue; + + if (max_load_other_cpu < j_dbs_info->max_load) + max_load_other_cpu = j_dbs_info->max_load; + + if ((j_dbs_info->cur_policy != NULL) + && (j_dbs_info->cur_policy->cur == + j_dbs_info->cur_policy->max)) { + + if (policy->cur >= dbs_tuners_ins.optimal_freq) + max_load_other_cpu = + dbs_tuners_ins.up_threshold_any_cpu_load; + } + } + + cpufreq_notify_utilization(policy, load_at_max_freq); + + +//gboost +//printk("graphics boost = %d\n", graphics_boost); +if (graphics_boost < 4) { + + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { + + if (counter < 5) { + counter++; + if (counter > 2) { + + phase = 1; + } + } + + nr_cpus = num_online_cpus(); + dbs_tuners_ins.two_phase_freq = two_phase_freq_array[nr_cpus-1]; + if (dbs_tuners_ins.two_phase_freq < policy->cur) + phase=1; + + if (dbs_tuners_ins.two_phase_freq != 0 && phase == 0) { + + dbs_freq_increase(policy, cur_load, dbs_tuners_ins.two_phase_freq); + } else { + + if (policy->cur < policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, cur_load, policy->max); + } + return; + } + +} else { + if (max_load_freq > up_threshold_level[1] * policy->cur) { + unsigned int avg_load = (prev_load + cur_load) >> 1; + int index = get_cpu_freq_index(policy->cur); + + + if (FREQ_NEED_BURST(policy->cur) && cur_load > up_threshold_level[0]) { + freq_next = tblmap[tbl_select[3]][index]; + } + + else if (avg_load > up_threshold_level[0]) { + freq_next = tblmap[tbl_select[3]][index]; + } + + else if (avg_load <= up_threshold_level[1]) { + freq_next = tblmap[tbl_select[0]][index]; + } + + else { + + if (cur_load > up_threshold_level[0]) { + freq_next = tblmap[tbl_select[2]][index]; + } + + else { + freq_next = tblmap[tbl_select[1]][index]; + } + } + dbs_freq_increase(policy, cur_load, freq_next); + if (policy->cur == policy->max) + this_dbs_info->rate_mult = dbs_tuners_ins.sampling_down_factor; + + return; + } +} + +if (dbs_tuners_ins.gboost) { + if (counter > 0) { + counter--; + if (counter == 0) { + + phase = 0; + } + } +} + +//graphics boost + if (graphics_boost < 4 && dbs_tuners_ins.gboost) { + dbs_tuners_ins.up_threshold = 60 + (graphics_boost * 10); + } else { + dbs_tuners_ins.up_threshold = orig_up_threshold; + } +//end + + if (num_online_cpus() > 1) { + if (max_load_other_cpu > + dbs_tuners_ins.up_threshold_any_cpu_load) { + if (policy->cur < dbs_tuners_ins.sync_freq) + dbs_freq_increase(policy, cur_load, + dbs_tuners_ins.sync_freq); + return; + } + + if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core * + policy->cur) { + if (policy->cur < dbs_tuners_ins.optimal_freq) + dbs_freq_increase(policy, cur_load, + dbs_tuners_ins.optimal_freq); + return; + } + } + + if (input_event_boosted()) + { + trace_cpufreq_interactive_already (policy->cpu, cur_load, policy->cur, policy->cur, policy->cur); + return; + } + + + + if (policy->cur == policy->min){ + trace_cpufreq_interactive_already (policy->cpu, cur_load, policy->cur, policy->cur, policy->cur); + return; + } + + + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + + this_dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + if (num_online_cpus() > 1) { + if (max_load_other_cpu > + (dbs_tuners_ins.up_threshold_multi_core - + dbs_tuners_ins.down_differential) && + freq_next < dbs_tuners_ins.sync_freq) + freq_next = dbs_tuners_ins.sync_freq; + + if (dbs_tuners_ins.optimal_freq > policy->min && max_load_freq > + (dbs_tuners_ins.up_threshold_multi_core - + dbs_tuners_ins.down_differential_multi_core) * + policy->cur) + freq_next = dbs_tuners_ins.optimal_freq; + + } + + if (dbs_tuners_ins.powersave_bias) + freq_next = powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); + + trace_cpufreq_interactive_target (policy->cpu, cur_load, policy->cur, policy->cur, freq_next); + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + trace_cpufreq_interactive_down (policy->cpu, freq_next, policy->cur); + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + int delay = msecs_to_jiffies(50); + + mutex_lock(&dbs_info->timer_mutex); + + if (skip_elementalx) + goto sched_wait; + + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + } else { + if (input_event_boosted()) + goto sched_wait; + + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; + } + +sched_wait: + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +static int should_io_be_busy(void) +{ + return 1; +} + + +static void dbs_input_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + int i; + struct cpu_dbs_info_s *dbs_info; + unsigned long flags; + int input_event_min_freq; + + if ((dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) || + (dbs_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) { + + return; + } + + if (type == EV_SYN && code == SYN_REPORT) { + + dbs_tuners_ins.powersave_bias = 0; + } + else if (type == EV_ABS && code == ABS_MT_TRACKING_ID) { + + if (value != -1) { + + input_event_min_freq = input_event_min_freq_array[num_online_cpus() - 1]; + + input_event_counter++; + switch_turbo_mode(0); + + + spin_lock_irqsave(&input_boost_lock, flags); + input_event_boost = true; + input_event_boost_expired = jiffies + usecs_to_jiffies(dbs_tuners_ins.sampling_rate * 2); + spin_unlock_irqrestore(&input_boost_lock, flags); + + for_each_online_cpu(i) { + dbs_info = &per_cpu(od_cpu_dbs_info, i); + + if (dbs_info->cur_policy + && dbs_info->cur_policy->cur < input_event_min_freq) { + dbs_info->input_event_freq = input_event_min_freq; + wake_up_process(per_cpu(up_task, i)); + } + } + } + else { + if (likely(input_event_counter > 0)) + input_event_counter--; + else + pr_warning("dbs_input_event: Touch isn't paired!\n"); + + + switch_turbo_mode(DBS_SWITCH_MODE_TIMEOUT); + } + } +} + +static int input_dev_filter(const char *input_dev_name) +{ + if (strstr(input_dev_name, "touchscreen") || + strstr(input_dev_name, "keypad")) { + return 0; + } else { + return 1; + } +} + +static int dbs_input_connect(struct input_handler *handler, + struct input_dev *dev, const struct input_device_id *id) +{ + struct input_handle *handle; + int error; + + + if (input_dev_filter(dev->name)) + return -ENODEV; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "cpufreq"; + + error = input_register_handle(handle); + if (error) + goto err2; + + error = input_open_device(handle); + if (error) + goto err1; + + return 0; +err1: + input_unregister_handle(handle); +err2: + kfree(handle); + return error; +} + +static void dbs_input_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id dbs_ids[] = { + /* multi-touch touchscreen */ + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_ABSBIT, + .evbit = { BIT_MASK(EV_ABS) }, + .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = + BIT_MASK(ABS_MT_POSITION_X) | + BIT_MASK(ABS_MT_POSITION_Y) }, + }, + /* touchpad */ + { + .flags = INPUT_DEVICE_ID_MATCH_KEYBIT | + INPUT_DEVICE_ID_MATCH_ABSBIT, + .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, + .absbit = { [BIT_WORD(ABS_X)] = + BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, + }, + /* Keypad */ + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT, + .evbit = { BIT_MASK(EV_KEY) }, + }, + { }, +}; + +static struct input_handler dbs_input_handler = { + .event = dbs_input_event, + .connect = dbs_input_connect, + .disconnect = dbs_input_disconnect, + .name = "cpufreq_ond", + .id_table = dbs_ids, +}; + +int set_input_event_min_freq(int cpufreq) +{ + int i = 0; + for ( i = 0 ; i < NR_CPUS; i++) + input_event_min_freq_array[i] = cpufreq; + return 0; +} + +void set_input_event_min_freq_by_cpu ( int cpu_nr, int cpufreq){ + input_event_min_freq_array[cpu_nr-1] = cpufreq; +} +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + elementalx_powersave_bias_init_cpu(cpu); + set_two_phase_freq(1574000); + set_input_event_min_freq_by_cpu(1, 1190000); + set_input_event_min_freq_by_cpu(2, 1036000); + set_input_event_min_freq_by_cpu(3, 729000); + set_input_event_min_freq_by_cpu(4, 729000); + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + if (dbs_tuners_ins.sampling_rate < DEF_SAMPLING_RATE) + dbs_tuners_ins.sampling_rate = DEF_SAMPLING_RATE; + dbs_tuners_ins.origin_sampling_rate = dbs_tuners_ins.sampling_rate; + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + + if (dbs_tuners_ins.optimal_freq == 0) + dbs_tuners_ins.optimal_freq = policy->min; + + if (dbs_tuners_ins.sync_freq == 0) + dbs_tuners_ins.sync_freq = policy->min; + + dbs_init_freq_map_table(policy); + } + if (!cpu) + rc = input_register_handler(&dbs_input_handler); + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + + if (!elementalx_powersave_bias_setspeed( + this_dbs_info->cur_policy, + NULL, + dbs_tuners_ins.powersave_bias)) + dbs_timer_init(this_dbs_info); + trace_cpufreq_interactive_target (cpu, 0, 0, 0, 0); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + dbs_enable--; + this_dbs_info->cur_policy = NULL; + if (!cpu) + input_unregister_handler(&dbs_input_handler); + mutex_unlock(&dbs_mutex); + if (!dbs_enable) { + dbs_deinit_freq_map_table(); + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + } + trace_cpufreq_interactive_target (cpu, 0, 0, 0, 0); + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if(this_dbs_info->cur_policy){ + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + else if (dbs_tuners_ins.powersave_bias != 0) + elementalx_powersave_bias_setspeed( + this_dbs_info->cur_policy, + policy, + dbs_tuners_ins.powersave_bias); + } + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int cpufreq_gov_dbs_up_task(void *data) +{ + struct cpufreq_policy *policy; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int cpu = smp_processor_id(); + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + + if (kthread_should_stop()) + break; + + set_current_state(TASK_RUNNING); + + get_online_cpus(); + + if (lock_policy_rwsem_write(cpu) < 0) + goto bail_acq_sema_failed; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + policy = this_dbs_info->cur_policy; + if (!policy) { + + goto bail_incorrect_governor; + } + + mutex_lock(&this_dbs_info->timer_mutex); + + + dbs_tuners_ins.powersave_bias = 0; + dbs_freq_increase(policy, this_dbs_info->prev_load, this_dbs_info->input_event_freq); + this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu, &this_dbs_info->prev_cpu_wall); + + mutex_unlock(&this_dbs_info->timer_mutex); + +bail_incorrect_governor: + unlock_policy_rwsem_write(cpu); + +bail_acq_sema_failed: + put_online_cpus(); + + dbs_tuners_ins.sampling_rate = dbs_tuners_ins.ui_sampling_rate; + } + + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + u64 idle_time; + unsigned int i; + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + struct task_struct *pthread; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, NULL); + put_cpu(); + if (idle_time != -1ULL) { + + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + +#ifdef CONFIG_EARLYSUSPEND_BOOST_CPU_SPEED + has_boost_cpu_func = (unsigned int) dbs_tuners_ins.enable_boost_cpu; +#endif + + spin_lock_init(&input_boost_lock); + + for_each_possible_cpu(i) { + pthread = kthread_create_on_node(cpufreq_gov_dbs_up_task, + NULL, cpu_to_node(i), + "kdbs_up/%d", i); + if (likely(!IS_ERR(pthread))) { + kthread_bind(pthread, i); + sched_setscheduler_nocheck(pthread, SCHED_FIFO, ¶m); + get_task_struct(pthread); + per_cpu(up_task, i) = pthread; + } + } + return cpufreq_register_governor(&cpufreq_gov_elementalx); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + unsigned int i; + + cpufreq_unregister_governor(&cpufreq_gov_elementalx); + for_each_possible_cpu(i) { + struct cpu_dbs_info_s *this_dbs_info = + &per_cpu(od_cpu_dbs_info, i); + mutex_destroy(&this_dbs_info->timer_mutex); + if (per_cpu(up_task, i)) { + kthread_stop(per_cpu(up_task, i)); + put_task_struct(per_cpu(up_task, i)); + } + } +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_AUTHOR("flar2 "); +MODULE_DESCRIPTION("'cpufreq_elementalx' - multiphase dynamic cpufreq governor"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ELEMENTALX +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_lazy.c b/drivers/cpufreq/cpufreq_lazy.c new file mode 100644 index 0000000000000..58bde440c74a6 --- /dev/null +++ b/drivers/cpufreq/cpufreq_lazy.c @@ -0,0 +1,741 @@ +/* + * drivers/cpufreq/cpufreq_lazy.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2011 Ezekeel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_SAMPLE_RATE (15000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (90) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate, current_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY +static +#endif +struct cpufreq_governor cpufreq_gov_lazy = { + .name = "lazy", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + int cpu; + unsigned int sample_type:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int powersave_bias; + unsigned int io_is_busy; + unsigned int min_timeinstate; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .powersave_bias = 0, +}; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_reduc, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(current_sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static void lazy_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void lazy_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + lazy_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_lazy Governor Tunables */ +#define show_one(file_name, object) \ + static ssize_t show_##file_name \ + (struct kobject *kobj, struct attribute *attr, char *buf) \ + { \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ + } +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(ignore_nice_load, ignore_nice); +show_one(powersave_bias, powersave_bias); +show_one(min_timeinstate, min_timeinstate); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + dbs_tuners_ins.min_timeinstate = max(dbs_tuners_ins.min_timeinstate, dbs_tuners_ins.sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 1000) + input = 1000; + + dbs_tuners_ins.powersave_bias = input; + lazy_powersave_bias_init(); + return count; +} + +static ssize_t store_min_timeinstate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.min_timeinstate = max(input, dbs_tuners_ins.sampling_rate); + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +define_one_global_rw(min_timeinstate); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, + &min_timeinstate.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "lazy", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + + current_sampling_rate = dbs_tuners_ins.sampling_rate; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) (cur_wall_time - j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) (cur_idle_time - j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) (cur_iowait_time - j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of lazy, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + + /* Check for frequency increase */ + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { + /* if we are already at full speed then break out early */ + if (!dbs_tuners_ins.powersave_bias) { + if (policy->cur == policy->max) + return; + + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + } else { + int freq = powersave_bias_target(policy, policy->max, + CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + current_sampling_rate = dbs_tuners_ins.min_timeinstate; + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + current_sampling_rate = dbs_tuners_ins.min_timeinstate; + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int delay; + int sample_type = dbs_info->sample_type; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + delay = usecs_to_jiffies(current_sampling_rate); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = usecs_to_jiffies(current_sampling_rate); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(current_sampling_rate); + delay -= jiffies % delay; + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + lazy_powersave_bias_init_cpu(cpu); + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = max(min_sampling_rate, DEF_SAMPLE_RATE); + current_sampling_rate = dbs_tuners_ins.sampling_rate; + dbs_tuners_ins.min_timeinstate = latency * LATENCY_MULTIPLIER; + dbs_tuners_ins.min_timeinstate = max(dbs_tuners_ins.sampling_rate, dbs_tuners_ins.min_timeinstate); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + return cpufreq_register_governor(&cpufreq_gov_lazy); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_lazy); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_AUTHOR("Ezekeel "); +MODULE_DESCRIPTION("'cpufreq_lazy' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_seladang.c b/drivers/cpufreq/cpufreq_seladang.c new file mode 100644 index 0000000000000..d94303cedc18c --- /dev/null +++ b/drivers/cpufreq/cpufreq_seladang.c @@ -0,0 +1,839 @@ +/* + * drivers/cpufreq/cpufreq_seladang.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2012 Ezekeel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define DEF_TARGET_RESIDENCY (10000) +#define DEF_ALLOWED_MISSES (5) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate, num_misses; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SELADANG +static +#endif +struct cpufreq_governor cpufreq_gov_seladang = { + .name = "seladang", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + int cpu; + unsigned int sample_type:1; + unsigned long long prev_idletime; + unsigned long long prev_idleusage; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int powersave_bias; + unsigned int io_is_busy; + unsigned int target_residency; + unsigned int allowed_misses; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .powersave_bias = 0, + .target_residency = DEF_TARGET_RESIDENCY, + .allowed_misses = DEF_ALLOWED_MISSES, +}; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_reduc, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static void seladang_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void seladang_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + seladang_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_seladang Governor Tunables */ +#define show_one(file_name, object) \ + static ssize_t show_##file_name \ + (struct kobject *kobj, struct attribute *attr, char *buf) \ + { \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ + } +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(powersave_bias, powersave_bias); +show_one(target_residency, target_residency); +show_one(allowed_misses, allowed_misses); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 1000) + input = 1000; + + dbs_tuners_ins.powersave_bias = input; + seladang_powersave_bias_init(); + return count; +} + +static ssize_t store_target_residency(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.target_residency = input; + return count; +} + +static ssize_t store_allowed_misses(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.allowed_misses = input; + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +define_one_global_rw(target_residency); +define_one_global_rw(allowed_misses); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, + &target_residency.attr, + &allowed_misses.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "seladang", +}; + +/************************** sysfs end ************************/ + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (dbs_tuners_ins.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) + return; + + __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + unsigned long total_idletime, total_usage; + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + total_idletime = 0; + total_usage = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + struct cpuidle_device * j_cpuidle_dev = NULL; +// struct cpuidle_state * deepidle_state = NULL; +// unsigned long long deepidle_time, deepidle_usage; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) (cur_wall_time - j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) (cur_idle_time - j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) (cur_iowait_time - j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of seladang, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + + j_cpuidle_dev = per_cpu(cpuidle_devices, j); + +/* + if (j_cpuidle_dev) + deepidle_state = &j_cpuidle_dev->states[j_cpuidle_dev->state_count - 1]; + + if (deepidle_state) { + deepidle_time = deepidle_state->time; + deepidle_usage = deepidle_state->usage; + + total_idletime += (unsigned long)(deepidle_time - j_dbs_info->prev_idletime); + total_usage += (unsigned long)(deepidle_usage - j_dbs_info->prev_idleusage); + + j_dbs_info->prev_idletime = deepidle_time; + j_dbs_info->prev_idleusage = deepidle_usage; + } +*/ + } + + if (total_usage > 0 && total_idletime / total_usage >= dbs_tuners_ins.target_residency) { + if (num_misses > 0) + num_misses--; + } else { + if (num_misses <= dbs_tuners_ins.allowed_misses) + num_misses++; + } + + /* Check for frequency increase */ + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur + || num_misses <= dbs_tuners_ins.allowed_misses) { + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, policy->max); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; + } + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + seladang_powersave_bias_init_cpu(cpu); + num_misses = 0; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + u64 idle_time; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, NULL); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + return cpufreq_register_governor(&cpufreq_gov_seladang); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_seladang); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_AUTHOR("Ezekeel "); +MODULE_DESCRIPTION("'cpufreq_seladang' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SELADANG +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 7ee305f10109c..40a8f8b8f46d7 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -44,6 +44,10 @@ #define INIT_UDELAY 200 #define MAX_UDELAY 2000 +#ifdef CONFIG_CPU_FREQ_GOV_ELEMENTALX +int graphics_boost = 6; +#endif + struct clk_pair { const char *name; uint map; @@ -188,6 +192,10 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq); + +#ifdef CONFIG_CPU_FREQ_GOV_ELEMENTALX + graphics_boost = pwr->active_pwrlevel; +#endif } EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 19dc27f107f2c..32358d2d53190 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -672,6 +672,12 @@ config TOUCH_WAKE help Say Y here to enable Touch Wake +config BLX + bool "Support for Battery Life eXtender" + default y + help + Say Y here to enable Battery Live eXtender + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index e67e4769fe338..a0a7ee0c6d792 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -73,3 +73,4 @@ obj-y += tspdrv/ obj-$(CONFIG_BU52031NVX) += pm8xxx-cradle.o obj-$(CONFIG_TOUCH_WAKE) += touch_wake.o obj-$(CONFIG_SLIMPORT_ANX7808) += slimport_anx7808/ +obj-$(CONFIG_BLX) += blx.o diff --git a/drivers/misc/blx.c b/drivers/misc/blx.c new file mode 100644 index 0000000000000..0ffb738f93c5c --- /dev/null +++ b/drivers/misc/blx.c @@ -0,0 +1,100 @@ +/* drivers/misc/blx.c + * + * Copyright 2011 Ezekeel + * + * Simple port to Nexus 4: mathkid95 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +static int charging_limit = MAX_CHARGINGLIMIT; + +static ssize_t blx_charginglimit_read(struct device * dev, struct device_attribute * attr, char * buf) +{ + return sprintf(buf, "%u\n", charging_limit); +} + +static ssize_t blx_charginglimit_write(struct device * dev, struct device_attribute * attr, const char * buf, size_t size) +{ + unsigned int data; + + if(sscanf(buf, "%u\n", &data) == 1) + { + if (data >= 0 && data <= MAX_CHARGINGLIMIT) + { + charging_limit = data; + + pr_info("BLX charging limit set to %u\n", charging_limit); + } + else + { + pr_info("%s: Invalid input range %u\n", __FUNCTION__, data); + } + } + else + { + pr_info("%s: Invalid input\n", __FUNCTION__); + } + + return size; +} + +static DEVICE_ATTR(charging_limit, S_IRUGO | S_IWUGO, blx_charginglimit_read, blx_charginglimit_write); + +static struct attribute *blx_attributes[] = + { + &dev_attr_charging_limit.attr, + NULL + }; + +static struct attribute_group blx_group = + { + .attrs = blx_attributes, + }; + +static struct miscdevice blx_device = + { + .minor = MISC_DYNAMIC_MINOR, + .name = "batterylifeextender", + }; + +int get_charginglimit(void) +{ + return charging_limit; +} +EXPORT_SYMBOL(get_charginglimit); + +static int __init blx_init(void) +{ + int ret; + + pr_info("%s misc_register(%s)\n", __FUNCTION__, blx_device.name); + + ret = misc_register(&blx_device); + + if (ret) + { + pr_err("%s misc_register(%s) fail\n", __FUNCTION__, blx_device.name); + + return 1; + } + + if (sysfs_create_group(&blx_device.this_device->kobj, &blx_group) < 0) + { + pr_err("%s sysfs_create_group fail\n", __FUNCTION__); + pr_err("Failed to create sysfs group for device (%s)!\n", blx_device.name); + } + + return 0; +} + +device_initcall(blx_init); diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c index b3b8fa22d7089..557d2d39e7f69 100644 --- a/drivers/power/pm8921-charger.c +++ b/drivers/power/pm8921-charger.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -1562,14 +1563,17 @@ static int get_prop_batt_status(struct pm8921_chg_chip *chip) } if (chip->eoc_check_soc) { - if (get_prop_batt_capacity(chip) == 100) { + #ifdef CONFIG_BLX + if (get_prop_batt_capacity(chip) >= get_charginglimit()) + #else + if (get_prop_batt_capacity(chip) == 100) + #endif if (batt_state == POWER_SUPPLY_STATUS_CHARGING) batt_state = POWER_SUPPLY_STATUS_FULL; } else { if (batt_state == POWER_SUPPLY_STATUS_FULL) batt_state = POWER_SUPPLY_STATUS_CHARGING; } - } pr_debug("batt_state = %d fsm_state = %d \n",batt_state, fsm_state); return batt_state; @@ -3396,7 +3400,11 @@ static void eoc_worker(struct work_struct *work) if (chip->eoc_check_soc) { percent_soc = get_prop_batt_capacity(chip); - if (percent_soc == 100) + #ifdef CONFIG_BLX + if (percent_soc >= get_charginglimit()) + #else + if (percent_soc == 100) + #endif count = CONSECUTIVE_COUNT; } @@ -3408,11 +3416,17 @@ static void eoc_worker(struct work_struct *work) if (is_ext_charging(chip)) chip->ext_charge_done = true; - - if (chip->is_bat_warm || chip->is_bat_cool) - chip->bms_notify.is_battery_full = 0; - else - chip->bms_notify.is_battery_full = 1; +#ifdef CONFIG_BLX +//if (chip->is_bat_warm || chip->is_bat_cool) +// chip->bms_notify.is_battery_full = 0; +//else +// chip->bms_notify.is_battery_full = 1; +#else + if (chip->is_bat_warm || chip->is_bat_cool) + chip->bms_notify.is_battery_full = 0; + else + chip->bms_notify.is_battery_full = 1; +#endif /* declare end of charging by invoking chgdone interrupt */ chgdone_irq_handler(chip->pmic_chg_irq[CHGDONE_IRQ], chip); wake_unlock(&chip->eoc_wake_lock); diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c index 50d2209753e7f..f6feca14fb0a0 100644 --- a/drivers/usb/otg/msm_otg.c +++ b/drivers/usb/otg/msm_otg.c @@ -51,6 +51,11 @@ #include #include +#ifdef CONFIG_FORCE_FAST_CHARGE +#include +#define USB_FASTCHG_LOAD 1000 /* uA */ +#endif + #define MSM_USB_BASE (motg->regs) #define DRIVER_NAME "msm_otg" @@ -1147,7 +1152,14 @@ static void msm_otg_notify_charger(struct msm_otg *motg, unsigned mA) if (motg->cur_power == mA) return; - +#ifdef CONFIG_FORCE_FAST_CHARGE + if (force_fast_charge == 1) { + mA = USB_FASTCHG_LOAD; + pr_info("USB fast charging is ON - 1000mA.\n"); + } else { + pr_info("USB fast charging is OFF.\n"); + } +#endif dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA); pm8921_charger_vbus_draw(mA); diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c index b0cef309ed711..b79bfe0ea817f 100644 --- a/drivers/video/msm/mdp4_overlay.c +++ b/drivers/video/msm/mdp4_overlay.c @@ -857,6 +857,7 @@ static void mdp4_overlay_vg_get_src_offset(struct mdp4_overlay_pipe *pipe, (pipe->src_y * pipe->srcp1_ystride); break; + case MDP_YCBYCR_H2V1: case MDP_YCRYCB_H2V1: if (pipe->src_x & 0x1) pipe->src_x += 1; @@ -1056,6 +1057,7 @@ int mdp4_overlay_format2type(uint32 format) case MDP_BGRA_8888: case MDP_RGBX_8888: return OVERLAY_TYPE_RGB; + case MDP_YCBYCR_H2V1: case MDP_YCRYCB_H2V1: case MDP_Y_CRCB_H2V1: case MDP_Y_CBCR_H2V1: @@ -1222,6 +1224,7 @@ int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe) pipe->element0 = C1_B_Cb; /* B */ pipe->bpp = 4; /* 4 bpp */ break; + case MDP_YCBYCR_H2V1: case MDP_YCRYCB_H2V1: pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR; pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED; @@ -1233,10 +1236,17 @@ int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe) pipe->unpack_tight = 1; pipe->unpack_align_msb = 0; pipe->unpack_count = 3; - pipe->element3 = C0_G_Y; /* G */ - pipe->element2 = C2_R_Cr; /* R */ - pipe->element1 = C0_G_Y; /* G */ - pipe->element0 = C1_B_Cb; /* B */ + if (pipe->src_format == MDP_YCRYCB_H2V1) { + pipe->element3 = C0_G_Y; /* G */ + pipe->element2 = C2_R_Cr; /* R */ + pipe->element1 = C0_G_Y; /* G */ + pipe->element0 = C1_B_Cb; /* B */ + } else if (pipe->src_format == MDP_YCBYCR_H2V1) { + pipe->element3 = C0_G_Y; /* G */ + pipe->element2 = C1_B_Cb; /* B */ + pipe->element1 = C0_G_Y; /* G */ + pipe->element0 = C2_R_Cr; /* R */ + } pipe->bpp = 2; /* 2 bpp */ pipe->chroma_sample = MDP4_CHROMA_H2V1; break; diff --git a/fs/Kconfig b/fs/Kconfig index 1dd49481854d5..52ee3ef886d53 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -224,6 +224,7 @@ source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" source "fs/ufs/Kconfig" source "fs/exofs/Kconfig" +source "fs/f2fs/Kconfig" endif # MISC_FILESYSTEMS diff --git a/fs/Makefile b/fs/Makefile index 95cf9de6ae021..cf508e5240be7 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -122,6 +122,7 @@ obj-$(CONFIG_DEBUG_FS) += debugfs/ obj-$(CONFIG_OCFS2_FS) += ocfs2/ obj-$(CONFIG_BTRFS_FS) += btrfs/ obj-$(CONFIG_GFS2_FS) += gfs2/ +obj-$(CONFIG_F2FS_FS) += f2fs/ obj-y += exofs/ # Multiple modules obj-$(CONFIG_CEPH_FS) += ceph/ obj-$(CONFIG_PSTORE) += pstore/ diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig new file mode 100644 index 0000000000000..214fe1054fcee --- /dev/null +++ b/fs/f2fs/Kconfig @@ -0,0 +1,73 @@ +config F2FS_FS + tristate "F2FS filesystem support (EXPERIMENTAL)" + depends on BLOCK + help + F2FS is based on Log-structured File System (LFS), which supports + versatile "flash-friendly" features. The design has been focused on + addressing the fundamental issues in LFS, which are snowball effect + of wandering tree and high cleaning overhead. + + Since flash-based storages show different characteristics according to + the internal geometry or flash memory management schemes aka FTL, F2FS + and tools support various parameters not only for configuring on-disk + layout, but also for selecting allocation and cleaning algorithms. + + If unsure, say N. + +config F2FS_STAT_FS + bool "F2FS Status Information" + depends on F2FS_FS && DEBUG_FS + default y + help + /sys/kernel/debug/f2fs/ contains information about all the partitions + mounted as f2fs. Each file shows the whole f2fs information. + + /sys/kernel/debug/f2fs/status includes: + - major file system information managed by f2fs currently + - average SIT information about whole segments + - current memory footprint consumed by f2fs. + +config F2FS_FS_XATTR + bool "F2FS extended attributes" + depends on F2FS_FS + default y + help + Extended attributes are name:value pairs associated with inodes by + the kernel or by users (see the attr(5) manual page, or visit + for details). + + If unsure, say N. + +config F2FS_FS_POSIX_ACL + bool "F2FS Access Control Lists" + depends on F2FS_FS_XATTR + select FS_POSIX_ACL + default y + help + Posix Access Control Lists (ACLs) support permissions for users and + gourps beyond the owner/group/world scheme. + + To learn more about Access Control Lists, visit the POSIX ACLs for + Linux website . + + If you don't know what Access Control Lists are, say N + +config F2FS_FS_SECURITY + bool "F2FS Security Labels" + depends on F2FS_FS_XATTR + help + Security labels provide an access control facility to support Linux + Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO + Linux. This option enables an extended attribute handler for file + security labels in the f2fs filesystem, so that it requires enabling + the extended attribute support in advance. + + If you are not using a security module, say N. + +config F2FS_CHECK_FS + bool "F2FS consistency checking feature" + depends on F2FS_FS + help + Enables BUG_ONs which check the file system consistency in runtime. + + If you want to improve the performance, say N. diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile new file mode 100644 index 0000000000000..2e35da12d292e --- /dev/null +++ b/fs/f2fs/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_F2FS_FS) += f2fs.o + +f2fs-y := dir.o file.o inode.o namei.o hash.o super.o inline.o +f2fs-y += checkpoint.o gc.o data.o node.o segment.o recovery.o +f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o +f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o +f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c new file mode 100644 index 0000000000000..afc979d4dbb64 --- /dev/null +++ b/fs/f2fs/acl.c @@ -0,0 +1,425 @@ +/* + * fs/f2fs/acl.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * Portions of this code from linux/fs/ext2/acl.c + * + * Copyright (C) 2001-2003 Andreas Gruenbacher, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include "f2fs.h" +#include "xattr.h" +#include "acl.h" + +#define get_inode_mode(i) ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \ + (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) + +static inline size_t f2fs_acl_size(int count) +{ + if (count <= 4) { + return sizeof(struct f2fs_acl_header) + + count * sizeof(struct f2fs_acl_entry_short); + } else { + return sizeof(struct f2fs_acl_header) + + 4 * sizeof(struct f2fs_acl_entry_short) + + (count - 4) * sizeof(struct f2fs_acl_entry); + } +} + +static inline int f2fs_acl_count(size_t size) +{ + ssize_t s; + size -= sizeof(struct f2fs_acl_header); + s = size - 4 * sizeof(struct f2fs_acl_entry_short); + if (s < 0) { + if (size % sizeof(struct f2fs_acl_entry_short)) + return -1; + return size / sizeof(struct f2fs_acl_entry_short); + } else { + if (s % sizeof(struct f2fs_acl_entry)) + return -1; + return s / sizeof(struct f2fs_acl_entry) + 4; + } +} + +static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size) +{ + int i, count; + struct posix_acl *acl; + struct f2fs_acl_header *hdr = (struct f2fs_acl_header *)value; + struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1); + const char *end = value + size; + + if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION)) + return ERR_PTR(-EINVAL); + + count = f2fs_acl_count(size); + if (count < 0) + return ERR_PTR(-EINVAL); + if (count == 0) + return NULL; + + acl = posix_acl_alloc(count, GFP_KERNEL); + if (!acl) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < count; i++) { + + if ((char *)entry > end) + goto fail; + + acl->a_entries[i].e_tag = le16_to_cpu(entry->e_tag); + acl->a_entries[i].e_perm = le16_to_cpu(entry->e_perm); + + switch (acl->a_entries[i].e_tag) { + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: + case ACL_MASK: + case ACL_OTHER: + entry = (struct f2fs_acl_entry *)((char *)entry + + sizeof(struct f2fs_acl_entry_short)); + break; + + case ACL_USER: + case ACL_GROUP: + acl->a_entries[i].e_id = le32_to_cpu(entry->e_id); + entry = (struct f2fs_acl_entry *)((char *)entry + + sizeof(struct f2fs_acl_entry)); + break; + default: + goto fail; + } + } + if ((char *)entry != end) + goto fail; + return acl; +fail: + posix_acl_release(acl); + return ERR_PTR(-EINVAL); +} + +static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size) +{ + struct f2fs_acl_header *f2fs_acl; + struct f2fs_acl_entry *entry; + int i; + + f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count * + sizeof(struct f2fs_acl_entry), GFP_KERNEL); + if (!f2fs_acl) + return ERR_PTR(-ENOMEM); + + f2fs_acl->a_version = cpu_to_le32(F2FS_ACL_VERSION); + entry = (struct f2fs_acl_entry *)(f2fs_acl + 1); + + for (i = 0; i < acl->a_count; i++) { + + entry->e_tag = cpu_to_le16(acl->a_entries[i].e_tag); + entry->e_perm = cpu_to_le16(acl->a_entries[i].e_perm); + + switch (acl->a_entries[i].e_tag) { + case ACL_USER: + case ACL_GROUP: + entry->e_id = cpu_to_le32(acl->a_entries[i].e_id); + entry = (struct f2fs_acl_entry *)((char *)entry + + sizeof(struct f2fs_acl_entry)); + break; + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: + case ACL_MASK: + case ACL_OTHER: + entry = (struct f2fs_acl_entry *)((char *)entry + + sizeof(struct f2fs_acl_entry_short)); + break; + default: + goto fail; + } + } + *size = f2fs_acl_size(acl->a_count); + return (void *)f2fs_acl; + +fail: + kfree(f2fs_acl); + return ERR_PTR(-EINVAL); +} + +struct posix_acl *f2fs_get_acl(struct inode *inode, int type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + int name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT; + void *value = NULL; + struct posix_acl *acl; + int retval; + + if (!test_opt(sbi, POSIX_ACL)) + return NULL; + + acl = get_cached_acl(inode, type); + if (acl != ACL_NOT_CACHED) + return acl; + + if (type == ACL_TYPE_ACCESS) + name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; + + retval = f2fs_getxattr(inode, name_index, "", NULL, 0); + if (retval > 0) { + value = kmalloc(retval, GFP_KERNEL); + if (!value) + return ERR_PTR(-ENOMEM); + retval = f2fs_getxattr(inode, name_index, "", value, retval); + } + + if (retval > 0) + acl = f2fs_acl_from_disk(value, retval); + else if (retval == -ENODATA) + acl = NULL; + else + acl = ERR_PTR(retval); + kfree(value); + + if (!IS_ERR(acl)) + set_cached_acl(inode, type, acl); + + return acl; +} + +static int f2fs_set_acl(struct inode *inode, int type, + struct posix_acl *acl, struct page *ipage) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct f2fs_inode_info *fi = F2FS_I(inode); + int name_index; + void *value = NULL; + size_t size = 0; + int error; + + if (!test_opt(sbi, POSIX_ACL)) + return 0; + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + + switch (type) { + case ACL_TYPE_ACCESS: + name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { + error = posix_acl_equiv_mode(acl, &inode->i_mode); + if (error < 0) + return error; + set_acl_inode(fi, inode->i_mode); + if (error == 0) + acl = NULL; + } + break; + + case ACL_TYPE_DEFAULT: + name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT; + if (!S_ISDIR(inode->i_mode)) + return acl ? -EACCES : 0; + break; + + default: + return -EINVAL; + } + + if (acl) { + value = f2fs_acl_to_disk(acl, &size); + if (IS_ERR(value)) { + cond_clear_inode_flag(fi, FI_ACL_MODE); + return (int)PTR_ERR(value); + } + } + + error = f2fs_setxattr(inode, name_index, "", value, size, ipage); + + kfree(value); + if (!error) + set_cached_acl(inode, type, acl); + + cond_clear_inode_flag(fi, FI_ACL_MODE); + return error; +} + +int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); + struct posix_acl *acl = NULL; + int error = 0; + + if (!S_ISLNK(inode->i_mode)) { + if (test_opt(sbi, POSIX_ACL)) { + acl = f2fs_get_acl(dir, ACL_TYPE_DEFAULT); + if (IS_ERR(acl)) + return PTR_ERR(acl); + } + if (!acl && !(test_opt(sbi, ANDROID_EMU) && + F2FS_I(inode)->i_advise & FADVISE_ANDROID_EMU)) + inode->i_mode &= ~current_umask(); + } + + if (!test_opt(sbi, POSIX_ACL) || !acl) + goto cleanup; + + if (S_ISDIR(inode->i_mode)) { + error = f2fs_set_acl(inode, ACL_TYPE_DEFAULT, acl, ipage); + if (error) + goto cleanup; + } + error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode); + if (error < 0) + return error; + if (error > 0) + error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, ipage); +cleanup: + posix_acl_release(acl); + return error; +} + +int f2fs_acl_chmod(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct posix_acl *acl; + int error; + mode_t mode = get_inode_mode(inode); + + if (!test_opt(sbi, POSIX_ACL)) + return 0; + if (S_ISLNK(mode)) + return -EOPNOTSUPP; + + acl = f2fs_get_acl(inode, ACL_TYPE_ACCESS); + if (IS_ERR(acl) || !acl) + return PTR_ERR(acl); + + error = posix_acl_chmod(&acl, GFP_KERNEL, mode); + if (error) + return error; + + error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, NULL); + posix_acl_release(acl); + return error; +} + +int f2fs_android_emu(struct f2fs_sb_info *sbi, struct inode *inode, + u32 *uid, u32 *gid, umode_t *mode) +{ + F2FS_I(inode)->i_advise |= FADVISE_ANDROID_EMU; + + if (uid) + *uid = sbi->android_emu_uid; + if (gid) + *gid = sbi->android_emu_gid; + if (mode) { + *mode = (*mode & ~S_IRWXUGO) | sbi->android_emu_mode; + if (F2FS_I(inode)->i_advise & FADVISE_ANDROID_EMU_ROOT) + *mode &= ~S_IRWXO; + if (S_ISDIR(*mode)) { + if (*mode & S_IRUSR) + *mode |= S_IXUSR; + if (*mode & S_IRGRP) + *mode |= S_IXGRP; + if (*mode & S_IROTH) + *mode |= S_IXOTH; + } + } + + return 0; +} + +static size_t f2fs_xattr_list_acl(struct dentry *dentry, char *list, + size_t list_size, const char *name, size_t name_len, int type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + const char *xname = POSIX_ACL_XATTR_DEFAULT; + size_t size; + + if (!test_opt(sbi, POSIX_ACL)) + return 0; + + if (type == ACL_TYPE_ACCESS) + xname = POSIX_ACL_XATTR_ACCESS; + + size = strlen(xname) + 1; + if (list && size <= list_size) + memcpy(list, xname, size); + return size; +} + +static int f2fs_xattr_get_acl(struct dentry *dentry, const char *name, + void *buffer, size_t size, int type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + struct posix_acl *acl; + int error; + + if (strcmp(name, "") != 0) + return -EINVAL; + if (!test_opt(sbi, POSIX_ACL)) + return -EOPNOTSUPP; + + acl = f2fs_get_acl(dentry->d_inode, type); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (!acl) + return -ENODATA; + error = posix_acl_to_xattr(acl, buffer, size); + posix_acl_release(acl); + + return error; +} + +static int f2fs_xattr_set_acl(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, int type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + struct inode *inode = dentry->d_inode; + struct posix_acl *acl = NULL; + int error; + + if (strcmp(name, "") != 0) + return -EINVAL; + if (!test_opt(sbi, POSIX_ACL)) + return -EOPNOTSUPP; + if (!inode_owner_or_capable(inode)) + return -EPERM; + + if (value) { + acl = posix_acl_from_xattr(value, size); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl) { + error = posix_acl_valid(acl); + if (error) + goto release_and_out; + } + } else { + acl = NULL; + } + + error = f2fs_set_acl(inode, type, acl, NULL); + +release_and_out: + posix_acl_release(acl); + return error; +} + +const struct xattr_handler f2fs_xattr_acl_default_handler = { + .prefix = POSIX_ACL_XATTR_DEFAULT, + .flags = ACL_TYPE_DEFAULT, + .list = f2fs_xattr_list_acl, + .get = f2fs_xattr_get_acl, + .set = f2fs_xattr_set_acl, +}; + +const struct xattr_handler f2fs_xattr_acl_access_handler = { + .prefix = POSIX_ACL_XATTR_ACCESS, + .flags = ACL_TYPE_ACCESS, + .list = f2fs_xattr_list_acl, + .get = f2fs_xattr_get_acl, + .set = f2fs_xattr_set_acl, +}; diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h new file mode 100644 index 0000000000000..49633131e038d --- /dev/null +++ b/fs/f2fs/acl.h @@ -0,0 +1,58 @@ +/* + * fs/f2fs/acl.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * Portions of this code from linux/fs/ext2/acl.h + * + * Copyright (C) 2001-2003 Andreas Gruenbacher, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __F2FS_ACL_H__ +#define __F2FS_ACL_H__ + +#include + +#define F2FS_ACL_VERSION 0x0001 + +struct f2fs_acl_entry { + __le16 e_tag; + __le16 e_perm; + __le32 e_id; +}; + +struct f2fs_acl_entry_short { + __le16 e_tag; + __le16 e_perm; +}; + +struct f2fs_acl_header { + __le32 a_version; +}; + +#ifdef CONFIG_F2FS_FS_POSIX_ACL + +extern struct posix_acl *f2fs_get_acl(struct inode *, int); +extern int f2fs_acl_chmod(struct inode *); +extern int f2fs_init_acl(struct inode *, struct inode *, struct page *); +#else +#define f2fs_check_acl NULL +#define f2fs_get_acl NULL +#define f2fs_set_acl NULL + +static inline int f2fs_acl_chmod(struct inode *inode) +{ + return 0; +} + +static inline int f2fs_init_acl(struct inode *inode, struct inode *dir, + struct page *page) +{ + return 0; +} +#endif +#endif /* __F2FS_ACL_H__ */ diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c new file mode 100644 index 0000000000000..677b23b3429ae --- /dev/null +++ b/fs/f2fs/checkpoint.c @@ -0,0 +1,878 @@ +/* + * fs/f2fs/checkpoint.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "node.h" +#include "segment.h" +#include + +static struct kmem_cache *orphan_entry_slab; +static struct kmem_cache *inode_entry_slab; + +/* + * We guarantee no failure on the returned page. + */ +struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) +{ + struct address_space *mapping = META_MAPPING(sbi); + struct page *page = NULL; +repeat: + page = grab_cache_page(mapping, index); + if (!page) { + cond_resched(); + goto repeat; + } + + /* We wait writeback only inside grab_meta_page() */ + wait_on_page_writeback(page); + SetPageUptodate(page); + return page; +} + +/* + * We guarantee no failure on the returned page. + */ +struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) +{ + struct address_space *mapping = META_MAPPING(sbi); + struct page *page; +repeat: + page = grab_cache_page(mapping, index); + if (!page) { + cond_resched(); + goto repeat; + } + if (PageUptodate(page)) + goto out; + + if (f2fs_submit_page_bio(sbi, page, index, + READ_SYNC | REQ_META | REQ_PRIO)) + goto repeat; + + lock_page(page); + if (unlikely(page->mapping != mapping)) { + f2fs_put_page(page, 1); + goto repeat; + } +out: + mark_page_accessed(page); + return page; +} + +static int f2fs_write_meta_page(struct page *page, + struct writeback_control *wbc) +{ + struct inode *inode = page->mapping->host; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + + /* Should not write any meta pages, if any IO error was occurred */ + if (unlikely(sbi->por_doing || + is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG))) + goto redirty_out; + + if (wbc->for_reclaim) + goto redirty_out; + + wait_on_page_writeback(page); + + write_meta_page(sbi, page); + dec_page_count(sbi, F2FS_DIRTY_META); + unlock_page(page); + return 0; + +redirty_out: + dec_page_count(sbi, F2FS_DIRTY_META); + wbc->pages_skipped++; + set_page_dirty(page); + return AOP_WRITEPAGE_ACTIVATE; +} + +static int f2fs_write_meta_pages(struct address_space *mapping, + struct writeback_control *wbc) +{ + struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); + int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); + long written; + + if (wbc->for_kupdate) + return 0; + + /* collect a number of dirty meta pages and write together */ + if (get_pages(sbi, F2FS_DIRTY_META) < nrpages) + return 0; + + /* if mounting is failed, skip writing node pages */ + mutex_lock(&sbi->cp_mutex); + written = sync_meta_pages(sbi, META, nrpages); + mutex_unlock(&sbi->cp_mutex); + wbc->nr_to_write -= written; + return 0; +} + +long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, + long nr_to_write) +{ + struct address_space *mapping = META_MAPPING(sbi); + pgoff_t index = 0, end = LONG_MAX; + struct pagevec pvec; + long nwritten = 0; + struct writeback_control wbc = { + .for_reclaim = 0, + }; + + pagevec_init(&pvec, 0); + + while (index <= end) { + int i, nr_pages; + nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, + PAGECACHE_TAG_DIRTY, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); + if (unlikely(nr_pages == 0)) + break; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + lock_page(page); + f2fs_bug_on(page->mapping != mapping); + f2fs_bug_on(!PageDirty(page)); + clear_page_dirty_for_io(page); + if (f2fs_write_meta_page(page, &wbc)) { + unlock_page(page); + break; + } + nwritten++; + if (unlikely(nwritten >= nr_to_write)) + break; + } + pagevec_release(&pvec); + cond_resched(); + } + + if (nwritten) + f2fs_submit_merged_bio(sbi, type, WRITE); + + return nwritten; +} + +static int f2fs_set_meta_page_dirty(struct page *page) +{ + struct address_space *mapping = page->mapping; + struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); + + trace_f2fs_set_page_dirty(page, META); + + SetPageUptodate(page); + if (!PageDirty(page)) { + __set_page_dirty_nobuffers(page); + inc_page_count(sbi, F2FS_DIRTY_META); + return 1; + } + return 0; +} + +const struct address_space_operations f2fs_meta_aops = { + .writepage = f2fs_write_meta_page, + .writepages = f2fs_write_meta_pages, + .set_page_dirty = f2fs_set_meta_page_dirty, +}; + +int acquire_orphan_inode(struct f2fs_sb_info *sbi) +{ + int err = 0; + + spin_lock(&sbi->orphan_inode_lock); + if (unlikely(sbi->n_orphans >= sbi->max_orphans)) + err = -ENOSPC; + else + sbi->n_orphans++; + spin_unlock(&sbi->orphan_inode_lock); + + return err; +} + +void release_orphan_inode(struct f2fs_sb_info *sbi) +{ + spin_lock(&sbi->orphan_inode_lock); + if (sbi->n_orphans == 0) { + f2fs_msg(sbi->sb, KERN_ERR, "releasing " + "unacquired orphan inode"); + f2fs_handle_error(sbi); + } else + sbi->n_orphans--; + spin_unlock(&sbi->orphan_inode_lock); +} + +void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) +{ + struct list_head *head, *this; + struct orphan_inode_entry *new = NULL, *orphan = NULL; + + new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC); + new->ino = ino; + + spin_lock(&sbi->orphan_inode_lock); + head = &sbi->orphan_inode_list; + list_for_each(this, head) { + orphan = list_entry(this, struct orphan_inode_entry, list); + if (orphan->ino == ino) { + spin_unlock(&sbi->orphan_inode_lock); + kmem_cache_free(orphan_entry_slab, new); + return; + } + + if (orphan->ino > ino) + break; + orphan = NULL; + } + + /* add new_oentry into list which is sorted by inode number */ + if (orphan) + list_add(&new->list, this->prev); + else + list_add_tail(&new->list, head); + spin_unlock(&sbi->orphan_inode_lock); +} + +void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) +{ + struct list_head *head; + struct orphan_inode_entry *orphan; + + spin_lock(&sbi->orphan_inode_lock); + head = &sbi->orphan_inode_list; + list_for_each_entry(orphan, head, list) { + if (orphan->ino == ino) { + list_del(&orphan->list); + kmem_cache_free(orphan_entry_slab, orphan); + if (sbi->n_orphans == 0) { + f2fs_msg(sbi->sb, KERN_ERR, "removing " + "unacquired orphan inode %d", + ino); + f2fs_handle_error(sbi); + } else + sbi->n_orphans--; + break; + } + } + spin_unlock(&sbi->orphan_inode_lock); +} + +static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) +{ + struct inode *inode = f2fs_iget(sbi->sb, ino); + if (IS_ERR(inode)) { + f2fs_msg(sbi->sb, KERN_ERR, "unable to recover orphan inode %d", + ino); + f2fs_handle_error(sbi); + return; + } + clear_nlink(inode); + + /* truncate all the data during iput */ + iput(inode); +} + +void recover_orphan_inodes(struct f2fs_sb_info *sbi) +{ + block_t start_blk, orphan_blkaddr, i, j; + + if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) + return; + + sbi->por_doing = true; + start_blk = __start_cp_addr(sbi) + 1; + orphan_blkaddr = __start_sum_addr(sbi) - 1; + + for (i = 0; i < orphan_blkaddr; i++) { + struct page *page = get_meta_page(sbi, start_blk + i); + struct f2fs_orphan_block *orphan_blk; + + orphan_blk = (struct f2fs_orphan_block *)page_address(page); + for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { + nid_t ino = le32_to_cpu(orphan_blk->ino[j]); + recover_orphan_inode(sbi, ino); + } + f2fs_put_page(page, 1); + } + /* clear Orphan Flag */ + clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG); + sbi->por_doing = false; + return; +} + +static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) +{ + struct list_head *head; + struct f2fs_orphan_block *orphan_blk = NULL; + unsigned int nentries = 0; + unsigned short index; + unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans + + (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK); + struct page *page = NULL; + struct orphan_inode_entry *orphan = NULL; + + for (index = 0; index < orphan_blocks; index++) + grab_meta_page(sbi, start_blk + index); + + index = 1; + spin_lock(&sbi->orphan_inode_lock); + head = &sbi->orphan_inode_list; + + /* loop for each orphan inode entry and write them in Jornal block */ + list_for_each_entry(orphan, head, list) { + if (!page) { + page = find_get_page(META_MAPPING(sbi), start_blk++); + f2fs_bug_on(!page); + orphan_blk = + (struct f2fs_orphan_block *)page_address(page); + memset(orphan_blk, 0, sizeof(*orphan_blk)); + f2fs_put_page(page, 0); + } + + orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino); + + if (nentries == F2FS_ORPHANS_PER_BLOCK) { + /* + * an orphan block is full of 1020 entries, + * then we need to flush current orphan blocks + * and bring another one in memory + */ + orphan_blk->blk_addr = cpu_to_le16(index); + orphan_blk->blk_count = cpu_to_le16(orphan_blocks); + orphan_blk->entry_count = cpu_to_le32(nentries); + set_page_dirty(page); + f2fs_put_page(page, 1); + index++; + nentries = 0; + page = NULL; + } + } + + if (page) { + orphan_blk->blk_addr = cpu_to_le16(index); + orphan_blk->blk_count = cpu_to_le16(orphan_blocks); + orphan_blk->entry_count = cpu_to_le32(nentries); + set_page_dirty(page); + f2fs_put_page(page, 1); + } + + spin_unlock(&sbi->orphan_inode_lock); +} + +static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, + block_t cp_addr, unsigned long long *version) +{ + struct page *cp_page_1, *cp_page_2 = NULL; + unsigned long blk_size = sbi->blocksize; + struct f2fs_checkpoint *cp_block; + unsigned long long cur_version = 0, pre_version = 0; + size_t crc_offset; + __u32 crc = 0; + + /* Read the 1st cp block in this CP pack */ + cp_page_1 = get_meta_page(sbi, cp_addr); + + /* get the version number */ + cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1); + crc_offset = le32_to_cpu(cp_block->checksum_offset); + if (crc_offset >= blk_size) + goto invalid_cp1; + + crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); + if (!f2fs_crc_valid(crc, cp_block, crc_offset)) + goto invalid_cp1; + + pre_version = cur_cp_version(cp_block); + + /* Read the 2nd cp block in this CP pack */ + cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; + cp_page_2 = get_meta_page(sbi, cp_addr); + + cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2); + crc_offset = le32_to_cpu(cp_block->checksum_offset); + if (crc_offset >= blk_size) + goto invalid_cp2; + + crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); + if (!f2fs_crc_valid(crc, cp_block, crc_offset)) + goto invalid_cp2; + + cur_version = cur_cp_version(cp_block); + + if (cur_version == pre_version) { + *version = cur_version; + f2fs_put_page(cp_page_2, 1); + return cp_page_1; + } +invalid_cp2: + f2fs_put_page(cp_page_2, 1); +invalid_cp1: + f2fs_put_page(cp_page_1, 1); + return NULL; +} + +int get_valid_checkpoint(struct f2fs_sb_info *sbi) +{ + struct f2fs_checkpoint *cp_block; + struct f2fs_super_block *fsb = sbi->raw_super; + struct page *cp1, *cp2, *cur_page; + unsigned long blk_size = sbi->blocksize; + unsigned long long cp1_version = 0, cp2_version = 0; + unsigned long long cp_start_blk_no; + + sbi->ckpt = kzalloc(blk_size, GFP_KERNEL); + if (!sbi->ckpt) + return -ENOMEM; + /* + * Finding out valid cp block involves read both + * sets( cp pack1 and cp pack 2) + */ + cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); + cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); + + /* The second checkpoint pack should start at the next segment */ + cp_start_blk_no += ((unsigned long long)1) << + le32_to_cpu(fsb->log_blocks_per_seg); + cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); + + if (cp1 && cp2) { + if (ver_after(cp2_version, cp1_version)) + cur_page = cp2; + else + cur_page = cp1; + } else if (cp1) { + cur_page = cp1; + } else if (cp2) { + cur_page = cp2; + } else { + goto fail_no_cp; + } + + cp_block = (struct f2fs_checkpoint *)page_address(cur_page); + memcpy(sbi->ckpt, cp_block, blk_size); + + f2fs_put_page(cp1, 1); + f2fs_put_page(cp2, 1); + return 0; + +fail_no_cp: + kfree(sbi->ckpt); + return -EINVAL; +} + +static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct list_head *head = &sbi->dir_inode_list; + struct list_head *this; + + list_for_each(this, head) { + struct dir_inode_entry *entry; + entry = list_entry(this, struct dir_inode_entry, list); + if (unlikely(entry->inode == inode)) + return -EEXIST; + } + list_add_tail(&new->list, head); + stat_inc_dirty_dir(sbi); + return 0; +} + +void set_dirty_dir_page(struct inode *inode, struct page *page) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct dir_inode_entry *new; + + if (!S_ISDIR(inode->i_mode)) + return; + + new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS); + new->inode = inode; + INIT_LIST_HEAD(&new->list); + + spin_lock(&sbi->dir_inode_lock); + if (__add_dirty_inode(inode, new)) + kmem_cache_free(inode_entry_slab, new); + + inc_page_count(sbi, F2FS_DIRTY_DENTS); + inode_inc_dirty_dents(inode); + SetPagePrivate(page); + spin_unlock(&sbi->dir_inode_lock); +} + +void add_dirty_dir_inode(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct dir_inode_entry *new = + f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS); + + new->inode = inode; + INIT_LIST_HEAD(&new->list); + + spin_lock(&sbi->dir_inode_lock); + if (__add_dirty_inode(inode, new)) + kmem_cache_free(inode_entry_slab, new); + spin_unlock(&sbi->dir_inode_lock); +} + +void remove_dirty_dir_inode(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + + struct list_head *this, *head; + + if (!S_ISDIR(inode->i_mode)) + return; + + spin_lock(&sbi->dir_inode_lock); + if (atomic_read(&F2FS_I(inode)->dirty_dents)) { + spin_unlock(&sbi->dir_inode_lock); + return; + } + + head = &sbi->dir_inode_list; + list_for_each(this, head) { + struct dir_inode_entry *entry; + entry = list_entry(this, struct dir_inode_entry, list); + if (entry->inode == inode) { + list_del(&entry->list); + kmem_cache_free(inode_entry_slab, entry); + stat_dec_dirty_dir(sbi); + break; + } + } + spin_unlock(&sbi->dir_inode_lock); + + /* Only from the recovery routine */ + if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) { + clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT); + iput(inode); + } +} + +struct inode *check_dirty_dir_inode(struct f2fs_sb_info *sbi, nid_t ino) +{ + + struct list_head *this, *head; + struct inode *inode = NULL; + + spin_lock(&sbi->dir_inode_lock); + + head = &sbi->dir_inode_list; + list_for_each(this, head) { + struct dir_inode_entry *entry; + entry = list_entry(this, struct dir_inode_entry, list); + if (entry->inode->i_ino == ino) { + inode = entry->inode; + break; + } + } + spin_unlock(&sbi->dir_inode_lock); + return inode; +} + +void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) +{ + struct list_head *head; + struct dir_inode_entry *entry; + struct inode *inode; +retry: + spin_lock(&sbi->dir_inode_lock); + + head = &sbi->dir_inode_list; + if (list_empty(head)) { + spin_unlock(&sbi->dir_inode_lock); + return; + } + entry = list_entry(head->next, struct dir_inode_entry, list); + inode = igrab(entry->inode); + spin_unlock(&sbi->dir_inode_lock); + if (inode) { + filemap_flush(inode->i_mapping); + iput(inode); + } else { + /* + * We should submit bio, since it exists several + * wribacking dentry pages in the freeing inode. + */ + f2fs_submit_merged_bio(sbi, DATA, WRITE); + } + goto retry; +} + +/* + * Freeze all the FS-operations for checkpoint. + */ +static void block_operations(struct f2fs_sb_info *sbi) +{ + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .for_reclaim = 0, + }; + struct blk_plug plug; + + blk_start_plug(&plug); + +retry_flush_dents: + f2fs_lock_all(sbi); + /* write all the dirty dentry pages */ + if (get_pages(sbi, F2FS_DIRTY_DENTS)) { + f2fs_unlock_all(sbi); + sync_dirty_dir_inodes(sbi); + goto retry_flush_dents; + } + + /* + * POR: we should ensure that there is no dirty node pages + * until finishing nat/sit flush. + */ +retry_flush_nodes: + mutex_lock(&sbi->node_write); + + if (get_pages(sbi, F2FS_DIRTY_NODES)) { + mutex_unlock(&sbi->node_write); + sync_node_pages(sbi, 0, &wbc); + goto retry_flush_nodes; + } + blk_finish_plug(&plug); +} + +static void unblock_operations(struct f2fs_sb_info *sbi) +{ + mutex_unlock(&sbi->node_write); + f2fs_unlock_all(sbi); +} + +static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) +{ + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); + + if (!get_pages(sbi, F2FS_WRITEBACK)) + break; + + io_schedule(); + } + finish_wait(&sbi->cp_wait, &wait); +} + +static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) +{ + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + nid_t last_nid = 0; + block_t start_blk; + struct page *cp_page; + unsigned int data_sum_blocks, orphan_blocks; + __u32 crc32 = 0; + void *kaddr; + int i; + + /* Flush all the NAT/SIT pages */ + while (get_pages(sbi, F2FS_DIRTY_META)) + sync_meta_pages(sbi, META, LONG_MAX); + + next_free_nid(sbi, &last_nid); + + /* + * modify checkpoint + * version number is already updated + */ + ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); + ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); + ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); + for (i = 0; i < 3; i++) { + ckpt->cur_node_segno[i] = + cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); + ckpt->cur_node_blkoff[i] = + cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); + ckpt->alloc_type[i + CURSEG_HOT_NODE] = + curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); + } + for (i = 0; i < 3; i++) { + ckpt->cur_data_segno[i] = + cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); + ckpt->cur_data_blkoff[i] = + cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); + ckpt->alloc_type[i + CURSEG_HOT_DATA] = + curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); + } + + ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); + ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); + ckpt->next_free_nid = cpu_to_le32(last_nid); + + /* 2 cp + n data seg summary + orphan inode blocks */ + data_sum_blocks = npages_for_summary_flush(sbi); + if (data_sum_blocks < 3) + set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); + else + clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); + + orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) + / F2FS_ORPHANS_PER_BLOCK; + ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks); + + if (is_umount) { + set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + ckpt->cp_pack_total_block_count = cpu_to_le32(2 + + data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE); + } else { + clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + ckpt->cp_pack_total_block_count = cpu_to_le32(2 + + data_sum_blocks + orphan_blocks); + } + + if (sbi->n_orphans) + set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); + else + clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); + + /* update SIT/NAT bitmap */ + get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); + get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); + + crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset)); + *((__le32 *)((unsigned char *)ckpt + + le32_to_cpu(ckpt->checksum_offset))) + = cpu_to_le32(crc32); + + start_blk = __start_cp_addr(sbi); + + /* write out checkpoint buffer at block 0 */ + cp_page = grab_meta_page(sbi, start_blk++); + kaddr = page_address(cp_page); + memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); + set_page_dirty(cp_page); + f2fs_put_page(cp_page, 1); + + if (sbi->n_orphans) { + write_orphan_inodes(sbi, start_blk); + start_blk += orphan_blocks; + } + + write_data_summaries(sbi, start_blk); + start_blk += data_sum_blocks; + if (is_umount) { + write_node_summaries(sbi, start_blk); + start_blk += NR_CURSEG_NODE_TYPE; + } + + /* writeout checkpoint block */ + cp_page = grab_meta_page(sbi, start_blk); + kaddr = page_address(cp_page); + memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); + set_page_dirty(cp_page); + f2fs_put_page(cp_page, 1); + + /* wait for previous submitted node/meta pages writeback */ + wait_on_all_pages_writeback(sbi); + + filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX); + filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX); + + /* update user_block_counts */ + sbi->last_valid_block_count = sbi->total_valid_block_count; + sbi->alloc_valid_block_count = 0; + + /* Here, we only have one bio having CP pack */ + sync_meta_pages(sbi, META_FLUSH, LONG_MAX); + + if (unlikely(!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))) { + clear_prefree_segments(sbi); + F2FS_RESET_SB_DIRT(sbi); + } +} + +/* + * We guarantee that this checkpoint procedure should not fail. + */ +void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) +{ + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + unsigned long long ckpt_ver; + + trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops"); + + mutex_lock(&sbi->cp_mutex); + block_operations(sbi); + + trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); + + f2fs_submit_merged_bio(sbi, DATA, WRITE); + f2fs_submit_merged_bio(sbi, NODE, WRITE); + f2fs_submit_merged_bio(sbi, META, WRITE); + + /* + * update checkpoint pack index + * Increase the version number so that + * SIT entries and seg summaries are written at correct place + */ + ckpt_ver = cur_cp_version(ckpt); + ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); + + /* write cached NAT/SIT entries to NAT/SIT area */ + flush_nat_entries(sbi); + flush_sit_entries(sbi); + + /* unlock all the fs_lock[] in do_checkpoint() */ + do_checkpoint(sbi, is_umount); + + unblock_operations(sbi); + mutex_unlock(&sbi->cp_mutex); + + trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint"); +} + +void init_orphan_info(struct f2fs_sb_info *sbi) +{ + spin_lock_init(&sbi->orphan_inode_lock); + INIT_LIST_HEAD(&sbi->orphan_inode_list); + sbi->n_orphans = 0; + /* + * considering 512 blocks in a segment 8 blocks are needed for cp + * and log segment summaries. Remaining blocks are used to keep + * orphan entries with the limitation one reserved segment + * for cp pack we can have max 1020*504 orphan entries + */ + sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE) + * F2FS_ORPHANS_PER_BLOCK; +} + +int __init create_checkpoint_caches(void) +{ + orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", + sizeof(struct orphan_inode_entry), NULL); + if (!orphan_entry_slab) + return -ENOMEM; + inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry", + sizeof(struct dir_inode_entry), NULL); + if (!inode_entry_slab) { + kmem_cache_destroy(orphan_entry_slab); + return -ENOMEM; + } + return 0; +} + +void destroy_checkpoint_caches(void) +{ + kmem_cache_destroy(orphan_entry_slab); + kmem_cache_destroy(inode_entry_slab); +} diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c new file mode 100644 index 0000000000000..cf80e2b7ee123 --- /dev/null +++ b/fs/f2fs/data.c @@ -0,0 +1,1088 @@ +/* + * fs/f2fs/data.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "node.h" +#include "segment.h" +#include + +static void f2fs_read_end_io(struct bio *bio, int err) +{ + const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + + do { + struct page *page = bvec->bv_page; + + if (--bvec >= bio->bi_io_vec) + prefetchw(&bvec->bv_page->flags); + + if (unlikely(!uptodate)) { + ClearPageUptodate(page); + SetPageError(page); + } else { + SetPageUptodate(page); + } + unlock_page(page); + } while (bvec >= bio->bi_io_vec); + + bio_put(bio); +} + +static void f2fs_write_end_io(struct bio *bio, int err) +{ + const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb); + + do { + struct page *page = bvec->bv_page; + + if (--bvec >= bio->bi_io_vec) + prefetchw(&bvec->bv_page->flags); + + if (unlikely(!uptodate)) { + SetPageError(page); + set_bit(AS_EIO, &page->mapping->flags); + set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); + sbi->sb->s_flags |= MS_RDONLY; + } + end_page_writeback(page); + dec_page_count(sbi, F2FS_WRITEBACK); + } while (bvec >= bio->bi_io_vec); + + if (bio->bi_private) + complete(bio->bi_private); + + if (!get_pages(sbi, F2FS_WRITEBACK) && + !list_empty(&sbi->cp_wait.task_list)) + wake_up(&sbi->cp_wait); + + bio_put(bio); +} + +/* + * Low-level block read/write IO operations. + */ +static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, + int npages, bool is_read) +{ + struct bio *bio; + + /* No failure on bio allocation */ + bio = bio_alloc(GFP_NOIO, npages); + + bio->bi_bdev = sbi->sb->s_bdev; + bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); + bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; + + return bio; +} + +static void __submit_merged_bio(struct f2fs_bio_info *io) +{ + struct f2fs_io_info *fio = &io->fio; + int rw; + + if (!io->bio) + return; + + rw = fio->rw; + + if (is_read_io(rw)) { + trace_f2fs_submit_read_bio(io->sbi->sb, rw, + fio->type, io->bio); + submit_bio(rw, io->bio); + } else { + trace_f2fs_submit_write_bio(io->sbi->sb, rw, + fio->type, io->bio); + /* + * META_FLUSH is only from the checkpoint procedure, and we + * should wait this metadata bio for FS consistency. + */ + if (fio->type == META_FLUSH) { + DECLARE_COMPLETION_ONSTACK(wait); + io->bio->bi_private = &wait; + submit_bio(rw, io->bio); + wait_for_completion(&wait); + } else { + submit_bio(rw, io->bio); + } + } + + io->bio = NULL; +} + +void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, + enum page_type type, int rw) +{ + enum page_type btype = PAGE_TYPE_OF_BIO(type); + struct f2fs_bio_info *io; + + io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; + + mutex_lock(&io->io_mutex); + + /* change META to META_FLUSH in the checkpoint procedure */ + if (type >= META_FLUSH) { + io->fio.type = META_FLUSH; + io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; + } + __submit_merged_bio(io); + mutex_unlock(&io->io_mutex); +} + +/* + * Fill the locked page with data located in the block address. + * Return unlocked page. + */ +int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, + block_t blk_addr, int rw) +{ + struct bio *bio; + + trace_f2fs_submit_page_bio(page, blk_addr, rw); + + /* Allocate a new bio */ + bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw)); + + if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { + bio_put(bio); + f2fs_put_page(page, 1); + return -EFAULT; + } + + submit_bio(rw, bio); + return 0; +} + +void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, + block_t blk_addr, struct f2fs_io_info *fio) +{ + enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); + struct f2fs_bio_info *io; + bool is_read = is_read_io(fio->rw); + + io = is_read ? &sbi->read_io : &sbi->write_io[btype]; + + verify_block_addr(sbi, blk_addr); + + mutex_lock(&io->io_mutex); + + if (!is_read) + inc_page_count(sbi, F2FS_WRITEBACK); + + if (io->bio && (io->last_block_in_bio != blk_addr - 1 || + io->fio.rw != fio->rw)) + __submit_merged_bio(io); +alloc_new: + if (io->bio == NULL) { + int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); + + io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read); + io->fio = *fio; + } + + if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < + PAGE_CACHE_SIZE) { + __submit_merged_bio(io); + goto alloc_new; + } + + io->last_block_in_bio = blk_addr; + + mutex_unlock(&io->io_mutex); + trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr); +} + +/* + * Lock ordering for the change of data block address: + * ->data_page + * ->node_page + * update block addresses in the node page + */ +static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr) +{ + struct f2fs_node *rn; + __le32 *addr_array; + struct page *node_page = dn->node_page; + unsigned int ofs_in_node = dn->ofs_in_node; + + f2fs_wait_on_page_writeback(node_page, NODE); + + rn = F2FS_NODE(node_page); + + /* Get physical address of data block */ + addr_array = blkaddr_in_node(rn); + addr_array[ofs_in_node] = cpu_to_le32(new_addr); + set_page_dirty(node_page); +} + +int reserve_new_block(struct dnode_of_data *dn) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + + if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) + return -EPERM; + if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) + return -ENOSPC; + + trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); + + __set_data_blkaddr(dn, NEW_ADDR); + dn->data_blkaddr = NEW_ADDR; + mark_inode_dirty(dn->inode); + sync_inode_page(dn); + return 0; +} + +int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) +{ + bool need_put = dn->inode_page ? false : true; + int err; + + /* if inode_page exists, index should be zero */ + f2fs_bug_on(!need_put && index); + + err = get_dnode_of_data(dn, index, ALLOC_NODE); + if (err) + return err; + + if (dn->data_blkaddr == NULL_ADDR) + err = reserve_new_block(dn); + if (err || need_put) + f2fs_put_dnode(dn); + return err; +} + +static int check_extent_cache(struct inode *inode, pgoff_t pgofs, + struct buffer_head *bh_result) +{ + struct f2fs_inode_info *fi = F2FS_I(inode); + pgoff_t start_fofs, end_fofs; + block_t start_blkaddr; + + if (is_inode_flag_set(fi, FI_NO_EXTENT)) + return 0; + + read_lock(&fi->ext.ext_lock); + if (fi->ext.len == 0) { + read_unlock(&fi->ext.ext_lock); + return 0; + } + + stat_inc_total_hit(inode->i_sb); + + start_fofs = fi->ext.fofs; + end_fofs = fi->ext.fofs + fi->ext.len - 1; + start_blkaddr = fi->ext.blk_addr; + + if (pgofs >= start_fofs && pgofs <= end_fofs) { + unsigned int blkbits = inode->i_sb->s_blocksize_bits; + size_t count; + + clear_buffer_new(bh_result); + map_bh(bh_result, inode->i_sb, + start_blkaddr + pgofs - start_fofs); + count = end_fofs - pgofs + 1; + if (count < (UINT_MAX >> blkbits)) + bh_result->b_size = (count << blkbits); + else + bh_result->b_size = UINT_MAX; + + stat_inc_read_hit(inode->i_sb); + read_unlock(&fi->ext.ext_lock); + return 1; + } + read_unlock(&fi->ext.ext_lock); + return 0; +} + +void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn) +{ + struct f2fs_inode_info *fi = F2FS_I(dn->inode); + pgoff_t fofs, start_fofs, end_fofs; + block_t start_blkaddr, end_blkaddr; + int need_update = true; + + f2fs_bug_on(blk_addr == NEW_ADDR); + fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + + dn->ofs_in_node; + + /* Update the page address in the parent node */ + __set_data_blkaddr(dn, blk_addr); + + if (is_inode_flag_set(fi, FI_NO_EXTENT)) + return; + + write_lock(&fi->ext.ext_lock); + + start_fofs = fi->ext.fofs; + end_fofs = fi->ext.fofs + fi->ext.len - 1; + start_blkaddr = fi->ext.blk_addr; + end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1; + + /* Drop and initialize the matched extent */ + if (fi->ext.len == 1 && fofs == start_fofs) + fi->ext.len = 0; + + /* Initial extent */ + if (fi->ext.len == 0) { + if (blk_addr != NULL_ADDR) { + fi->ext.fofs = fofs; + fi->ext.blk_addr = blk_addr; + fi->ext.len = 1; + } + goto end_update; + } + + /* Front merge */ + if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) { + fi->ext.fofs--; + fi->ext.blk_addr--; + fi->ext.len++; + goto end_update; + } + + /* Back merge */ + if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) { + fi->ext.len++; + goto end_update; + } + + /* Split the existing extent */ + if (fi->ext.len > 1 && + fofs >= start_fofs && fofs <= end_fofs) { + if ((end_fofs - fofs) < (fi->ext.len >> 1)) { + fi->ext.len = fofs - start_fofs; + } else { + fi->ext.fofs = fofs + 1; + fi->ext.blk_addr = start_blkaddr + + fofs - start_fofs + 1; + fi->ext.len -= fofs - start_fofs + 1; + } + } else { + need_update = false; + } + + /* Finally, if the extent is very fragmented, let's drop the cache. */ + if (fi->ext.len < F2FS_MIN_EXTENT_LEN) { + fi->ext.len = 0; + set_inode_flag(fi, FI_NO_EXTENT); + need_update = true; + } +end_update: + write_unlock(&fi->ext.ext_lock); + if (need_update) + sync_inode_page(dn); + return; +} + +struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct address_space *mapping = inode->i_mapping; + struct dnode_of_data dn; + struct page *page; + int err; + + page = find_get_page(mapping, index); + if (page && PageUptodate(page)) + return page; + f2fs_put_page(page, 0); + + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = get_dnode_of_data(&dn, index, LOOKUP_NODE); + if (err) + return ERR_PTR(err); + f2fs_put_dnode(&dn); + + if (dn.data_blkaddr == NULL_ADDR) + return ERR_PTR(-ENOENT); + + /* By fallocate(), there is no cached page, but with NEW_ADDR */ + if (unlikely(dn.data_blkaddr == NEW_ADDR)) + return ERR_PTR(-EINVAL); + + page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); + if (!page) + return ERR_PTR(-ENOMEM); + + if (PageUptodate(page)) { + unlock_page(page); + return page; + } + + err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, + sync ? READ_SYNC : READA); + if (err) + return ERR_PTR(err); + + if (sync) { + wait_on_page_locked(page); + if (unlikely(!PageUptodate(page))) { + f2fs_put_page(page, 0); + return ERR_PTR(-EIO); + } + } + return page; +} + +/* + * If it tries to access a hole, return an error. + * Because, the callers, functions in dir.c and GC, should be able to know + * whether this page exists or not. + */ +struct page *get_lock_data_page(struct inode *inode, pgoff_t index) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct address_space *mapping = inode->i_mapping; + struct dnode_of_data dn; + struct page *page; + int err; + +repeat: + page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); + if (!page) + return ERR_PTR(-ENOMEM); + + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = get_dnode_of_data(&dn, index, LOOKUP_NODE); + if (err) { + f2fs_put_page(page, 1); + return ERR_PTR(err); + } + f2fs_put_dnode(&dn); + + if (unlikely(dn.data_blkaddr == NULL_ADDR)) { + f2fs_put_page(page, 1); + return ERR_PTR(-ENOENT); + } + + if (PageUptodate(page)) + return page; + + /* + * A new dentry page is allocated but not able to be written, since its + * new inode page couldn't be allocated due to -ENOSPC. + * In such the case, its blkaddr can be remained as NEW_ADDR. + * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. + */ + if (dn.data_blkaddr == NEW_ADDR) { + zero_user_segment(page, 0, PAGE_CACHE_SIZE); + SetPageUptodate(page); + return page; + } + + err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC); + if (err) + return ERR_PTR(err); + + lock_page(page); + if (unlikely(!PageUptodate(page))) { + f2fs_put_page(page, 1); + return ERR_PTR(-EIO); + } + if (unlikely(page->mapping != mapping)) { + f2fs_put_page(page, 1); + goto repeat; + } + return page; +} + +/* + * Caller ensures that this data page is never allocated. + * A new zero-filled data page is allocated in the page cache. + * + * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and + * f2fs_unlock_op(). + * Note that, ipage is set only by make_empty_dir. + */ +struct page *get_new_data_page(struct inode *inode, + struct page *ipage, pgoff_t index, bool new_i_size) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct address_space *mapping = inode->i_mapping; + struct page *page; + struct dnode_of_data dn; + int err; + + set_new_dnode(&dn, inode, ipage, NULL, 0); + err = f2fs_reserve_block(&dn, index); + if (err) + return ERR_PTR(err); +repeat: + page = grab_cache_page(mapping, index); + if (!page) { + err = -ENOMEM; + goto put_err; + } + + if (PageUptodate(page)) + return page; + + if (dn.data_blkaddr == NEW_ADDR) { + zero_user_segment(page, 0, PAGE_CACHE_SIZE); + SetPageUptodate(page); + } else { + err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, + READ_SYNC); + if (err) + goto put_err; + + lock_page(page); + if (unlikely(!PageUptodate(page))) { + f2fs_put_page(page, 1); + err = -EIO; + goto put_err; + } + if (unlikely(page->mapping != mapping)) { + f2fs_put_page(page, 1); + goto repeat; + } + } + + if (new_i_size && + i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { + i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); + /* Only the directory inode sets new_i_size */ + set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); + } + return page; + +put_err: + f2fs_put_dnode(&dn); + return ERR_PTR(err); +} + +static int __allocate_data_block(struct dnode_of_data *dn) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + struct f2fs_summary sum; + block_t new_blkaddr; + struct node_info ni; + int type; + + if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) + return -EPERM; + if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) + return -ENOSPC; + + __set_data_blkaddr(dn, NEW_ADDR); + dn->data_blkaddr = NEW_ADDR; + + get_node_info(sbi, dn->nid, &ni); + set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); + + type = CURSEG_WARM_DATA; + + allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type); + + /* direct IO doesn't use extent cache to maximize the performance */ + set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); + update_extent_cache(new_blkaddr, dn); + clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); + + dn->data_blkaddr = new_blkaddr; + return 0; +} + +/* + * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh. + * If original data blocks are allocated, then give them to blockdev. + * Otherwise, + * a. preallocate requested block addresses + * b. do not use extent cache for better performance + * c. give the block addresses to blockdev + */ +static int get_data_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + unsigned int blkbits = inode->i_sb->s_blocksize_bits; + unsigned maxblocks = bh_result->b_size >> blkbits; + struct dnode_of_data dn; + int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; + pgoff_t pgofs, end_offset; + int err = 0, ofs = 1; + bool allocated = false; + + /* Get the page offset from the block offset(iblock) */ + pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits)); + + if (check_extent_cache(inode, pgofs, bh_result)) + goto out; + + if (create) + f2fs_lock_op(sbi); + + /* When reading holes, we need its node page */ + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = get_dnode_of_data(&dn, pgofs, mode); + if (err) { + if (err == -ENOENT) + err = 0; + goto unlock_out; + } + if (dn.data_blkaddr == NEW_ADDR) + goto put_out; + + if (dn.data_blkaddr != NULL_ADDR) { + map_bh(bh_result, inode->i_sb, dn.data_blkaddr); + } else if (create) { + err = __allocate_data_block(&dn); + if (err) + goto put_out; + allocated = true; + map_bh(bh_result, inode->i_sb, dn.data_blkaddr); + } else { + goto put_out; + } + + end_offset = IS_INODE(dn.node_page) ? + ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK; + bh_result->b_size = (((size_t)1) << blkbits); + dn.ofs_in_node++; + pgofs++; + +get_next: + if (dn.ofs_in_node >= end_offset) { + if (allocated) + sync_inode_page(&dn); + allocated = false; + f2fs_put_dnode(&dn); + + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = get_dnode_of_data(&dn, pgofs, mode); + if (err) { + if (err == -ENOENT) + err = 0; + goto unlock_out; + } + if (dn.data_blkaddr == NEW_ADDR) + goto put_out; + + end_offset = IS_INODE(dn.node_page) ? + ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK; + } + + if (maxblocks > (bh_result->b_size >> blkbits)) { + block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); + if (blkaddr == NULL_ADDR && create) { + err = __allocate_data_block(&dn); + if (err) + goto sync_out; + allocated = true; + blkaddr = dn.data_blkaddr; + } + /* Give more consecutive addresses for the read ahead */ + if (blkaddr == (bh_result->b_blocknr + ofs)) { + ofs++; + dn.ofs_in_node++; + pgofs++; + bh_result->b_size += (((size_t)1) << blkbits); + goto get_next; + } + } +sync_out: + if (allocated) + sync_inode_page(&dn); +put_out: + f2fs_put_dnode(&dn); +unlock_out: + if (create) + f2fs_unlock_op(sbi); +out: + trace_f2fs_get_data_block(inode, iblock, bh_result, err); + return err; +} + +static int f2fs_read_data_page(struct file *file, struct page *page) +{ + struct inode *inode = page->mapping->host; + int ret; + + /* If the file has inline data, try to read it directlly */ + if (f2fs_has_inline_data(inode)) + ret = f2fs_read_inline_data(inode, page); + else + ret = mpage_readpage(page, get_data_block); + + return ret; +} + +static int f2fs_read_data_pages(struct file *file, + struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + struct inode *inode = file->f_mapping->host; + + /* If the file has inline data, skip readpages */ + if (f2fs_has_inline_data(inode)) + return 0; + + return mpage_readpages(mapping, pages, nr_pages, get_data_block); +} + +int do_write_data_page(struct page *page, struct f2fs_io_info *fio) +{ + struct inode *inode = page->mapping->host; + block_t old_blkaddr, new_blkaddr; + struct dnode_of_data dn; + int err = 0; + + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); + if (err) + return err; + + old_blkaddr = dn.data_blkaddr; + + /* This page is already truncated */ + if (old_blkaddr == NULL_ADDR) + goto out_writepage; + + set_page_writeback(page); + + /* + * If current allocation needs SSR, + * it had better in-place writes for updated data. + */ + if (unlikely(old_blkaddr != NEW_ADDR && + !is_cold_data(page) && + need_inplace_update(inode))) { + rewrite_data_page(page, old_blkaddr, fio); + } else { + write_data_page(page, &dn, &new_blkaddr, fio); + update_extent_cache(new_blkaddr, &dn); + } +out_writepage: + f2fs_put_dnode(&dn); + return err; +} + +static int f2fs_write_data_page(struct page *page, + struct writeback_control *wbc) +{ + struct inode *inode = page->mapping->host; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + loff_t i_size = i_size_read(inode); + const pgoff_t end_index = ((unsigned long long) i_size) + >> PAGE_CACHE_SHIFT; + unsigned offset = 0; + bool need_balance_fs = false; + int err = 0; + struct f2fs_io_info fio = { + .type = DATA, + .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, + }; + + if (page->index < end_index) + goto write; + + /* + * If the offset is out-of-range of file size, + * this page does not have to be written to disk. + */ + offset = i_size & (PAGE_CACHE_SIZE - 1); + if ((page->index >= end_index + 1) || !offset) { + if (S_ISDIR(inode->i_mode)) { + dec_page_count(sbi, F2FS_DIRTY_DENTS); + inode_dec_dirty_dents(inode); + } + goto out; + } + + zero_user_segment(page, offset, PAGE_CACHE_SIZE); +write: + if (unlikely(sbi->por_doing)) { + err = AOP_WRITEPAGE_ACTIVATE; + goto redirty_out; + } + + /* Dentry blocks are controlled by checkpoint */ + if (S_ISDIR(inode->i_mode)) { + dec_page_count(sbi, F2FS_DIRTY_DENTS); + inode_dec_dirty_dents(inode); + err = do_write_data_page(page, &fio); + } else { + f2fs_lock_op(sbi); + + if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode)) { + err = f2fs_write_inline_data(inode, page, offset); + f2fs_unlock_op(sbi); + goto out; + } else { + err = do_write_data_page(page, &fio); + } + + f2fs_unlock_op(sbi); + need_balance_fs = true; + } + if (err == -ENOENT) + goto out; + else if (err) + goto redirty_out; + + if (wbc->for_reclaim) { + f2fs_submit_merged_bio(sbi, DATA, WRITE); + need_balance_fs = false; + } + + clear_cold_data(page); +out: + unlock_page(page); + if (need_balance_fs) + f2fs_balance_fs(sbi); + return 0; + +redirty_out: + wbc->pages_skipped++; + set_page_dirty(page); + return err; +} + +#define MAX_DESIRED_PAGES_WP 4096 + +static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, + void *data) +{ + struct address_space *mapping = data; + int ret = mapping->a_ops->writepage(page, wbc); + mapping_set_error(mapping, ret); + return ret; +} + +static int f2fs_write_data_pages(struct address_space *mapping, + struct writeback_control *wbc) +{ + struct inode *inode = mapping->host; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + bool locked = false; + int ret; + long excess_nrtw = 0, desired_nrtw; + + /* deal with chardevs and other special file */ + if (!mapping->a_ops->writepage) + return 0; + + if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) { + desired_nrtw = MAX_DESIRED_PAGES_WP; + excess_nrtw = desired_nrtw - wbc->nr_to_write; + wbc->nr_to_write = desired_nrtw; + } + + if (!S_ISDIR(inode->i_mode)) { + mutex_lock(&sbi->writepages); + locked = true; + } + ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); + if (locked) + mutex_unlock(&sbi->writepages); + + f2fs_submit_merged_bio(sbi, DATA, WRITE); + + remove_dirty_dir_inode(inode); + + wbc->nr_to_write -= excess_nrtw; + return ret; +} + +static int f2fs_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + struct inode *inode = mapping->host; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct page *page; + pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; + struct dnode_of_data dn; + int err = 0; + + f2fs_balance_fs(sbi); +repeat: + err = f2fs_convert_inline_data(inode, pos + len); + if (err) + return err; + + page = grab_cache_page_write_begin(mapping, index, flags); + if (!page) + return -ENOMEM; + *pagep = page; + + if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA) + goto inline_data; + + f2fs_lock_op(sbi); + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = f2fs_reserve_block(&dn, index); + f2fs_unlock_op(sbi); + + if (err) { + f2fs_put_page(page, 1); + return err; + } +inline_data: + if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) + return 0; + + if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { + unsigned start = pos & (PAGE_CACHE_SIZE - 1); + unsigned end = start + len; + + /* Reading beyond i_size is simple: memset to zero */ + zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); + goto out; + } + + if (dn.data_blkaddr == NEW_ADDR) { + zero_user_segment(page, 0, PAGE_CACHE_SIZE); + } else { + if (f2fs_has_inline_data(inode)) + err = f2fs_read_inline_data(inode, page); + else + err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, + READ_SYNC); + if (err) + return err; + lock_page(page); + if (unlikely(!PageUptodate(page))) { + f2fs_put_page(page, 1); + return -EIO; + } + if (unlikely(page->mapping != mapping)) { + f2fs_put_page(page, 1); + goto repeat; + } + } +out: + SetPageUptodate(page); + clear_cold_data(page); + return 0; +} + +static int f2fs_write_end(struct file *file, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + struct inode *inode = page->mapping->host; + + SetPageUptodate(page); + set_page_dirty(page); + + if (pos + copied > i_size_read(inode)) { + i_size_write(inode, pos + copied); + mark_inode_dirty(inode); + update_inode_page(inode); + } + + f2fs_put_page(page, 1); + return copied; +} + +static int check_direct_IO(struct inode *inode, int rw, + const struct iovec *iov, loff_t offset, unsigned long nr_segs) +{ + unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; + int i; + + if (rw == READ) + return 0; + + if (offset & blocksize_mask) + return -EINVAL; + + for (i = 0; i < nr_segs; i++) + if (iov[i].iov_len & blocksize_mask) + return -EINVAL; + return 0; +} + +static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, + const struct iovec *iov, loff_t offset, unsigned long nr_segs) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_mapping->host; + + /* Let buffer I/O handle the inline data case. */ + if (f2fs_has_inline_data(inode)) + return 0; + + if (check_direct_IO(inode, rw, iov, offset, nr_segs)) + return 0; + + return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, + get_data_block); +} + +static void f2fs_invalidate_data_page(struct page *page, unsigned long offset) +{ + struct inode *inode = page->mapping->host; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + if (S_ISDIR(inode->i_mode) && PageDirty(page)) { + dec_page_count(sbi, F2FS_DIRTY_DENTS); + inode_dec_dirty_dents(inode); + } + ClearPagePrivate(page); +} + +static int f2fs_release_data_page(struct page *page, gfp_t wait) +{ + ClearPagePrivate(page); + return 1; +} + +static int f2fs_set_data_page_dirty(struct page *page) +{ + struct address_space *mapping = page->mapping; + struct inode *inode = mapping->host; + + trace_f2fs_set_page_dirty(page, DATA); + + SetPageUptodate(page); + mark_inode_dirty(inode); + + if (!PageDirty(page)) { + __set_page_dirty_nobuffers(page); + set_dirty_dir_page(inode, page); + return 1; + } + return 0; +} + +static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) +{ + return generic_block_bmap(mapping, block, get_data_block); +} + +const struct address_space_operations f2fs_dblock_aops = { + .readpage = f2fs_read_data_page, + .readpages = f2fs_read_data_pages, + .writepage = f2fs_write_data_page, + .writepages = f2fs_write_data_pages, + .write_begin = f2fs_write_begin, + .write_end = f2fs_write_end, + .set_page_dirty = f2fs_set_data_page_dirty, + .invalidatepage = f2fs_invalidate_data_page, + .releasepage = f2fs_release_data_page, + .direct_IO = f2fs_direct_IO, + .bmap = f2fs_bmap, +}; diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c new file mode 100644 index 0000000000000..3de9d20d0c14a --- /dev/null +++ b/fs/f2fs/debug.c @@ -0,0 +1,374 @@ +/* + * f2fs debugging statistics + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * Copyright (c) 2012 Linux Foundation + * Copyright (c) 2012 Greg Kroah-Hartman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "node.h" +#include "segment.h" +#include "gc.h" + +static LIST_HEAD(f2fs_stat_list); +static struct dentry *f2fs_debugfs_root; +static DEFINE_MUTEX(f2fs_stat_mutex); + +static void update_general_status(struct f2fs_sb_info *sbi) +{ + struct f2fs_stat_info *si = F2FS_STAT(sbi); + int i; + + /* valid check of the segment numbers */ + si->hit_ext = sbi->read_hit_ext; + si->total_ext = sbi->total_hit_ext; + si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); + si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS); + si->ndirty_dirs = sbi->n_dirty_dirs; + si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META); + si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg; + si->rsvd_segs = reserved_segments(sbi); + si->overp_segs = overprovision_segments(sbi); + si->valid_count = valid_user_blocks(sbi); + si->valid_node_count = valid_node_count(sbi); + si->valid_inode_count = valid_inode_count(sbi); + si->inline_inode = sbi->inline_inode; + si->utilization = utilization(sbi); + + si->free_segs = free_segments(sbi); + si->free_secs = free_sections(sbi); + si->prefree_count = prefree_segments(sbi); + si->dirty_count = dirty_segments(sbi); + si->node_pages = NODE_MAPPING(sbi)->nrpages; + si->meta_pages = META_MAPPING(sbi)->nrpages; + si->nats = NM_I(sbi)->nat_cnt; + si->sits = SIT_I(sbi)->dirty_sentries; + si->fnids = NM_I(sbi)->fcnt; + si->bg_gc = sbi->bg_gc; + si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg) + * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg) + / 2; + si->util_valid = (int)(written_block_count(sbi) >> + sbi->log_blocks_per_seg) + * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg) + / 2; + si->util_invalid = 50 - si->util_free - si->util_valid; + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_NODE; i++) { + struct curseg_info *curseg = CURSEG_I(sbi, i); + si->curseg[i] = curseg->segno; + si->cursec[i] = curseg->segno / sbi->segs_per_sec; + si->curzone[i] = si->cursec[i] / sbi->secs_per_zone; + } + + for (i = 0; i < 2; i++) { + si->segment_count[i] = sbi->segment_count[i]; + si->block_count[i] = sbi->block_count[i]; + } +} + +/* + * This function calculates BDF of every segments + */ +static void update_sit_info(struct f2fs_sb_info *sbi) +{ + struct f2fs_stat_info *si = F2FS_STAT(sbi); + unsigned int blks_per_sec, hblks_per_sec, total_vblocks, bimodal, dist; + struct sit_info *sit_i = SIT_I(sbi); + unsigned int segno, vblocks; + int ndirty = 0; + + bimodal = 0; + total_vblocks = 0; + blks_per_sec = sbi->segs_per_sec * (1 << sbi->log_blocks_per_seg); + hblks_per_sec = blks_per_sec / 2; + mutex_lock(&sit_i->sentry_lock); + for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) { + vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); + dist = abs(vblocks - hblks_per_sec); + bimodal += dist * dist; + + if (vblocks > 0 && vblocks < blks_per_sec) { + total_vblocks += vblocks; + ndirty++; + } + } + mutex_unlock(&sit_i->sentry_lock); + dist = TOTAL_SECS(sbi) * hblks_per_sec * hblks_per_sec / 100; + si->bimodal = bimodal / dist; + if (si->dirty_count) + si->avg_vblocks = total_vblocks / ndirty; + else + si->avg_vblocks = 0; +} + +/* + * This function calculates memory footprint. + */ +static void update_mem_info(struct f2fs_sb_info *sbi) +{ + struct f2fs_stat_info *si = F2FS_STAT(sbi); + unsigned npages; + + if (si->base_mem) + goto get_cache; + + si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize; + si->base_mem += 2 * sizeof(struct f2fs_inode_info); + si->base_mem += sizeof(*sbi->ckpt); + + /* build sm */ + si->base_mem += sizeof(struct f2fs_sm_info); + + /* build sit */ + si->base_mem += sizeof(struct sit_info); + si->base_mem += TOTAL_SEGS(sbi) * sizeof(struct seg_entry); + si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi)); + si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * TOTAL_SEGS(sbi); + if (sbi->segs_per_sec > 1) + si->base_mem += TOTAL_SECS(sbi) * sizeof(struct sec_entry); + si->base_mem += __bitmap_size(sbi, SIT_BITMAP); + + /* build free segmap */ + si->base_mem += sizeof(struct free_segmap_info); + si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi)); + si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi)); + + /* build curseg */ + si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE; + si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE; + + /* build dirty segmap */ + si->base_mem += sizeof(struct dirty_seglist_info); + si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi)); + si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi)); + + /* buld nm */ + si->base_mem += sizeof(struct f2fs_nm_info); + si->base_mem += __bitmap_size(sbi, NAT_BITMAP); + + /* build gc */ + si->base_mem += sizeof(struct f2fs_gc_kthread); + +get_cache: + /* free nids */ + si->cache_mem = NM_I(sbi)->fcnt; + si->cache_mem += NM_I(sbi)->nat_cnt; + npages = NODE_MAPPING(sbi)->nrpages; + si->cache_mem += npages << PAGE_CACHE_SHIFT; + npages = META_MAPPING(sbi)->nrpages; + si->cache_mem += npages << PAGE_CACHE_SHIFT; + si->cache_mem += sbi->n_orphans * sizeof(struct orphan_inode_entry); + si->cache_mem += sbi->n_dirty_dirs * sizeof(struct dir_inode_entry); +} + +static int stat_show(struct seq_file *s, void *v) +{ + struct f2fs_stat_info *si; + int i = 0; + int j; + + mutex_lock(&f2fs_stat_mutex); + list_for_each_entry(si, &f2fs_stat_list, stat_list) { + char devname[BDEVNAME_SIZE]; + + update_general_status(si->sbi); + + seq_printf(s, "\n=====[ partition info(%s). #%d ]=====\n", + bdevname(si->sbi->sb->s_bdev, devname), i++); + seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ", + si->sit_area_segs, si->nat_area_segs); + seq_printf(s, "[SSA: %d] [MAIN: %d", + si->ssa_area_segs, si->main_area_segs); + seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", + si->overp_segs, si->rsvd_segs); + seq_printf(s, "Utilization: %d%% (%d valid blocks)\n", + si->utilization, si->valid_count); + seq_printf(s, " - Node: %u (Inode: %u, ", + si->valid_node_count, si->valid_inode_count); + seq_printf(s, "Other: %u)\n - Data: %u\n", + si->valid_node_count - si->valid_inode_count, + si->valid_count - si->valid_node_count); + seq_printf(s, " - Inline_data Inode: %u\n", + si->inline_inode); + seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n", + si->main_area_segs, si->main_area_sections, + si->main_area_zones); + seq_printf(s, " - COLD data: %d, %d, %d\n", + si->curseg[CURSEG_COLD_DATA], + si->cursec[CURSEG_COLD_DATA], + si->curzone[CURSEG_COLD_DATA]); + seq_printf(s, " - WARM data: %d, %d, %d\n", + si->curseg[CURSEG_WARM_DATA], + si->cursec[CURSEG_WARM_DATA], + si->curzone[CURSEG_WARM_DATA]); + seq_printf(s, " - HOT data: %d, %d, %d\n", + si->curseg[CURSEG_HOT_DATA], + si->cursec[CURSEG_HOT_DATA], + si->curzone[CURSEG_HOT_DATA]); + seq_printf(s, " - Dir dnode: %d, %d, %d\n", + si->curseg[CURSEG_HOT_NODE], + si->cursec[CURSEG_HOT_NODE], + si->curzone[CURSEG_HOT_NODE]); + seq_printf(s, " - File dnode: %d, %d, %d\n", + si->curseg[CURSEG_WARM_NODE], + si->cursec[CURSEG_WARM_NODE], + si->curzone[CURSEG_WARM_NODE]); + seq_printf(s, " - Indir nodes: %d, %d, %d\n", + si->curseg[CURSEG_COLD_NODE], + si->cursec[CURSEG_COLD_NODE], + si->curzone[CURSEG_COLD_NODE]); + seq_printf(s, "\n - Valid: %d\n - Dirty: %d\n", + si->main_area_segs - si->dirty_count - + si->prefree_count - si->free_segs, + si->dirty_count); + seq_printf(s, " - Prefree: %d\n - Free: %d (%d)\n\n", + si->prefree_count, si->free_segs, si->free_secs); + seq_printf(s, "GC calls: %d (BG: %d)\n", + si->call_count, si->bg_gc); + seq_printf(s, " - data segments : %d\n", si->data_segs); + seq_printf(s, " - node segments : %d\n", si->node_segs); + seq_printf(s, "Try to move %d blocks\n", si->tot_blks); + seq_printf(s, " - data blocks : %d\n", si->data_blks); + seq_printf(s, " - node blocks : %d\n", si->node_blks); + seq_printf(s, "\nExtent Hit Ratio: %d / %d\n", + si->hit_ext, si->total_ext); + seq_puts(s, "\nBalancing F2FS Async:\n"); + seq_printf(s, " - nodes: %4d in %4d\n", + si->ndirty_node, si->node_pages); + seq_printf(s, " - dents: %4d in dirs:%4d\n", + si->ndirty_dent, si->ndirty_dirs); + seq_printf(s, " - meta: %4d in %4d\n", + si->ndirty_meta, si->meta_pages); + seq_printf(s, " - NATs: %5d > %lu\n", + si->nats, NM_WOUT_THRESHOLD); + seq_printf(s, " - SITs: %5d\n - free_nids: %5d\n", + si->sits, si->fnids); + seq_puts(s, "\nDistribution of User Blocks:"); + seq_puts(s, " [ valid | invalid | free ]\n"); + seq_puts(s, " ["); + + for (j = 0; j < si->util_valid; j++) + seq_putc(s, '-'); + seq_putc(s, '|'); + + for (j = 0; j < si->util_invalid; j++) + seq_putc(s, '-'); + seq_putc(s, '|'); + + for (j = 0; j < si->util_free; j++) + seq_putc(s, '-'); + seq_puts(s, "]\n\n"); + seq_printf(s, "SSR: %u blocks in %u segments\n", + si->block_count[SSR], si->segment_count[SSR]); + seq_printf(s, "LFS: %u blocks in %u segments\n", + si->block_count[LFS], si->segment_count[LFS]); + + /* segment usage info */ + update_sit_info(si->sbi); + seq_printf(s, "\nBDF: %u, avg. vblocks: %u\n", + si->bimodal, si->avg_vblocks); + + /* memory footprint */ + update_mem_info(si->sbi); + seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n", + (si->base_mem + si->cache_mem) >> 10, + si->base_mem >> 10, si->cache_mem >> 10); + } + mutex_unlock(&f2fs_stat_mutex); + return 0; +} + +static int stat_open(struct inode *inode, struct file *file) +{ + return single_open(file, stat_show, inode->i_private); +} + +static const struct file_operations stat_fops = { + .open = stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int f2fs_build_stats(struct f2fs_sb_info *sbi) +{ + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); + struct f2fs_stat_info *si; + + si = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL); + if (!si) + return -ENOMEM; + + si->all_area_segs = le32_to_cpu(raw_super->segment_count); + si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit); + si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat); + si->ssa_area_segs = le32_to_cpu(raw_super->segment_count_ssa); + si->main_area_segs = le32_to_cpu(raw_super->segment_count_main); + si->main_area_sections = le32_to_cpu(raw_super->section_count); + si->main_area_zones = si->main_area_sections / + le32_to_cpu(raw_super->secs_per_zone); + si->sbi = sbi; + sbi->stat_info = si; + + mutex_lock(&f2fs_stat_mutex); + list_add_tail(&si->stat_list, &f2fs_stat_list); + mutex_unlock(&f2fs_stat_mutex); + + return 0; +} + +void f2fs_destroy_stats(struct f2fs_sb_info *sbi) +{ + struct f2fs_stat_info *si = F2FS_STAT(sbi); + + mutex_lock(&f2fs_stat_mutex); + list_del(&si->stat_list); + mutex_unlock(&f2fs_stat_mutex); + + kfree(si); +} + +void __init f2fs_create_root_stats(void) +{ + struct dentry *file; + + f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL); + if (!f2fs_debugfs_root) + goto bail; + + file = debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root, + NULL, &stat_fops); + if (!file) + goto free_debugfs_dir; + + return; + +free_debugfs_dir: + debugfs_remove(f2fs_debugfs_root); + +bail: + f2fs_debugfs_root = NULL; + return; +} + +void f2fs_destroy_root_stats(void) +{ + if (!f2fs_debugfs_root) + return; + + debugfs_remove_recursive(f2fs_debugfs_root); + f2fs_debugfs_root = NULL; +} diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c new file mode 100644 index 0000000000000..852fd1d57defb --- /dev/null +++ b/fs/f2fs/dir.c @@ -0,0 +1,707 @@ +/* + * fs/f2fs/dir.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include "f2fs.h" +#include "node.h" +#include "acl.h" +#include "xattr.h" + +static unsigned long dir_blocks(struct inode *inode) +{ + return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1)) + >> PAGE_CACHE_SHIFT; +} + +static unsigned int dir_buckets(unsigned int level) +{ + if (level < MAX_DIR_HASH_DEPTH / 2) + return 1 << level; + else + return 1 << ((MAX_DIR_HASH_DEPTH / 2) - 1); +} + +static unsigned int bucket_blocks(unsigned int level) +{ + if (level < MAX_DIR_HASH_DEPTH / 2) + return 2; + else + return 4; +} + +static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = { + [F2FS_FT_UNKNOWN] = DT_UNKNOWN, + [F2FS_FT_REG_FILE] = DT_REG, + [F2FS_FT_DIR] = DT_DIR, + [F2FS_FT_CHRDEV] = DT_CHR, + [F2FS_FT_BLKDEV] = DT_BLK, + [F2FS_FT_FIFO] = DT_FIFO, + [F2FS_FT_SOCK] = DT_SOCK, + [F2FS_FT_SYMLINK] = DT_LNK, +}; + +#define S_SHIFT 12 +static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = { + [S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE, + [S_IFDIR >> S_SHIFT] = F2FS_FT_DIR, + [S_IFCHR >> S_SHIFT] = F2FS_FT_CHRDEV, + [S_IFBLK >> S_SHIFT] = F2FS_FT_BLKDEV, + [S_IFIFO >> S_SHIFT] = F2FS_FT_FIFO, + [S_IFSOCK >> S_SHIFT] = F2FS_FT_SOCK, + [S_IFLNK >> S_SHIFT] = F2FS_FT_SYMLINK, +}; + +static void set_de_type(struct f2fs_dir_entry *de, struct inode *inode) +{ + mode_t mode = inode->i_mode; + de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT]; +} + +static unsigned long dir_block_index(unsigned int level, unsigned int idx) +{ + unsigned long i; + unsigned long bidx = 0; + + for (i = 0; i < level; i++) + bidx += dir_buckets(i) * bucket_blocks(i); + bidx += idx * bucket_blocks(level); + return bidx; +} + +static bool early_match_name(const char *name, size_t namelen, + f2fs_hash_t namehash, struct f2fs_dir_entry *de) +{ + if (le16_to_cpu(de->name_len) != namelen) + return false; + + if (de->hash_code != namehash) + return false; + + return true; +} + +static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, + const char *name, size_t namelen, int *max_slots, + f2fs_hash_t namehash, struct page **res_page, + bool nocase) +{ + struct f2fs_dir_entry *de; + unsigned long bit_pos, end_pos, next_pos; + struct f2fs_dentry_block *dentry_blk = kmap(dentry_page); + int slots; + + bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, + NR_DENTRY_IN_BLOCK, 0); + while (bit_pos < NR_DENTRY_IN_BLOCK) { + de = &dentry_blk->dentry[bit_pos]; + slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); + + if (nocase) { + if ((le16_to_cpu(de->name_len) == namelen) && + !strncasecmp(dentry_blk->filename[bit_pos], + name, namelen)) { + *res_page = dentry_page; + goto found; + } + } else if (early_match_name(name, namelen, namehash, de)) { + if (!memcmp(dentry_blk->filename[bit_pos], + name, namelen)) { + *res_page = dentry_page; + goto found; + } + } + next_pos = bit_pos + slots; + bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, + NR_DENTRY_IN_BLOCK, next_pos); + if (bit_pos >= NR_DENTRY_IN_BLOCK) + end_pos = NR_DENTRY_IN_BLOCK; + else + end_pos = bit_pos; + if (*max_slots < end_pos - next_pos) + *max_slots = end_pos - next_pos; + } + + de = NULL; + kunmap(dentry_page); +found: + return de; +} + +static struct f2fs_dir_entry *find_in_level(struct inode *dir, + unsigned int level, const char *name, size_t namelen, + f2fs_hash_t namehash, struct page **res_page) +{ + int s = GET_DENTRY_SLOTS(namelen); + unsigned int nbucket, nblock; + unsigned int bidx, end_block; + struct page *dentry_page; + struct f2fs_dir_entry *de = NULL; + struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); + bool room = false; + int max_slots = 0; + + f2fs_bug_on(level > MAX_DIR_HASH_DEPTH); + + nbucket = dir_buckets(level); + nblock = bucket_blocks(level); + + bidx = dir_block_index(level, le32_to_cpu(namehash) % nbucket); + end_block = bidx + nblock; + + for (; bidx < end_block; bidx++) { + bool nocase = false; + + /* no need to allocate new dentry pages to all the indices */ + dentry_page = find_data_page(dir, bidx, true); + if (IS_ERR(dentry_page)) { + room = true; + continue; + } + + if (test_opt(sbi, ANDROID_EMU) && + (sbi->android_emu_flags & F2FS_ANDROID_EMU_NOCASE) && + F2FS_I(dir)->i_advise & FADVISE_ANDROID_EMU) + nocase = true; + + de = find_in_block(dentry_page, name, namelen, + &max_slots, namehash, res_page, + nocase); + if (de) + break; + + if (max_slots >= s) + room = true; + f2fs_put_page(dentry_page, 0); + } + + if (!de && room && F2FS_I(dir)->chash != namehash) { + F2FS_I(dir)->chash = namehash; + F2FS_I(dir)->clevel = level; + } + + return de; +} + +/* + * Find an entry in the specified directory with the wanted name. + * It returns the page where the entry was found (as a parameter - res_page), + * and the entry itself. Page is returned mapped and unlocked. + * Entry is guaranteed to be valid. + */ +struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, + struct qstr *child, struct page **res_page) +{ + const char *name = child->name; + size_t namelen = child->len; + unsigned long npages = dir_blocks(dir); + struct f2fs_dir_entry *de = NULL; + f2fs_hash_t name_hash; + unsigned int max_depth; + unsigned int level; + + if (npages == 0) + return NULL; + + *res_page = NULL; + + name_hash = f2fs_dentry_hash(name, namelen); + max_depth = F2FS_I(dir)->i_current_depth; + + for (level = 0; level < max_depth; level++) { + de = find_in_level(dir, level, name, + namelen, name_hash, res_page); + if (de) + break; + } + if (!de && F2FS_I(dir)->chash != name_hash) { + F2FS_I(dir)->chash = name_hash; + F2FS_I(dir)->clevel = level - 1; + } + return de; +} + +struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p) +{ + struct page *page; + struct f2fs_dir_entry *de; + struct f2fs_dentry_block *dentry_blk; + + page = get_lock_data_page(dir, 0); + if (IS_ERR(page)) + return NULL; + + dentry_blk = kmap(page); + de = &dentry_blk->dentry[1]; + *p = page; + unlock_page(page); + return de; +} + +ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr) +{ + ino_t res = 0; + struct f2fs_dir_entry *de; + struct page *page; + + de = f2fs_find_entry(dir, qstr, &page); + if (de) { + res = le32_to_cpu(de->ino); + kunmap(page); + f2fs_put_page(page, 0); + } + + return res; +} + +void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, + struct page *page, struct inode *inode) +{ + lock_page(page); + wait_on_page_writeback(page); + de->ino = cpu_to_le32(inode->i_ino); + set_de_type(de, inode); + kunmap(page); + set_page_dirty(page); + dir->i_mtime = dir->i_ctime = CURRENT_TIME; + mark_inode_dirty(dir); + + f2fs_put_page(page, 1); +} + +static void init_dent_inode(const struct qstr *name, struct page *ipage) +{ + struct f2fs_inode *ri; + + /* copy name info. to this inode page */ + ri = F2FS_INODE(ipage); + ri->i_namelen = cpu_to_le32(name->len); + memcpy(ri->i_name, name->name, name->len); + set_page_dirty(ipage); +} + +int update_dent_inode(struct inode *inode, const struct qstr *name) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct page *page; + + page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(page)) + return PTR_ERR(page); + + init_dent_inode(name, page); + f2fs_put_page(page, 1); + + return 0; +} + +static int make_empty_dir(struct inode *inode, + struct inode *parent, struct page *page) +{ + struct page *dentry_page; + struct f2fs_dentry_block *dentry_blk; + struct f2fs_dir_entry *de; + void *kaddr; + + dentry_page = get_new_data_page(inode, page, 0, true); + if (IS_ERR(dentry_page)) + return PTR_ERR(dentry_page); + + kaddr = kmap_atomic(dentry_page); + dentry_blk = (struct f2fs_dentry_block *)kaddr; + + de = &dentry_blk->dentry[0]; + de->name_len = cpu_to_le16(1); + de->hash_code = 0; + de->ino = cpu_to_le32(inode->i_ino); + memcpy(dentry_blk->filename[0], ".", 1); + set_de_type(de, inode); + + de = &dentry_blk->dentry[1]; + de->hash_code = 0; + de->name_len = cpu_to_le16(2); + de->ino = cpu_to_le32(parent->i_ino); + memcpy(dentry_blk->filename[1], "..", 2); + set_de_type(de, inode); + + test_and_set_bit_le(0, &dentry_blk->dentry_bitmap); + test_and_set_bit_le(1, &dentry_blk->dentry_bitmap); + kunmap_atomic(kaddr); + + set_page_dirty(dentry_page); + f2fs_put_page(dentry_page, 1); + return 0; +} + +static struct page *init_inode_metadata(struct inode *inode, + struct inode *dir, const struct qstr *name) +{ + struct page *page; + int err; + + if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { + page = new_inode_page(inode, name); + if (IS_ERR(page)) + return page; + + if (S_ISDIR(inode->i_mode)) { + err = make_empty_dir(inode, dir, page); + if (err) + goto error; + } + + err = f2fs_init_acl(inode, dir, page); + if (err) + goto put_error; + + err = f2fs_init_security(inode, dir, name, page); + if (err) + goto put_error; + + wait_on_page_writeback(page); + } else { + page = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino); + if (IS_ERR(page)) + return page; + + wait_on_page_writeback(page); + set_cold_node(inode, page); + } + + init_dent_inode(name, page); + + /* + * This file should be checkpointed during fsync. + * We lost i_pino from now on. + */ + if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) { + file_lost_pino(inode); + inc_nlink(inode); + } + return page; + +put_error: + f2fs_put_page(page, 1); +error: + remove_inode_page(inode); + return ERR_PTR(err); +} + +static void update_parent_metadata(struct inode *dir, struct inode *inode, + unsigned int current_depth) +{ + if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { + if (S_ISDIR(inode->i_mode)) { + inc_nlink(dir); + set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); + } + clear_inode_flag(F2FS_I(inode), FI_NEW_INODE); + } + dir->i_mtime = dir->i_ctime = CURRENT_TIME; + mark_inode_dirty(dir); + + if (F2FS_I(dir)->i_current_depth != current_depth) { + F2FS_I(dir)->i_current_depth = current_depth; + set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); + } + + if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) + update_inode_page(dir); + + if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) + clear_inode_flag(F2FS_I(inode), FI_INC_LINK); +} + +static int room_for_filename(struct f2fs_dentry_block *dentry_blk, int slots) +{ + int bit_start = 0; + int zero_start, zero_end; +next: + zero_start = find_next_zero_bit_le(&dentry_blk->dentry_bitmap, + NR_DENTRY_IN_BLOCK, + bit_start); + if (zero_start >= NR_DENTRY_IN_BLOCK) + return NR_DENTRY_IN_BLOCK; + + zero_end = find_next_bit_le(&dentry_blk->dentry_bitmap, + NR_DENTRY_IN_BLOCK, + zero_start); + if (zero_end - zero_start >= slots) + return zero_start; + + bit_start = zero_end + 1; + + if (zero_end + 1 >= NR_DENTRY_IN_BLOCK) + return NR_DENTRY_IN_BLOCK; + goto next; +} + +/* + * Caller should grab and release a rwsem by calling f2fs_lock_op() and + * f2fs_unlock_op(). + */ +int __f2fs_add_link(struct inode *dir, const struct qstr *name, + struct inode *inode) +{ + unsigned int bit_pos; + unsigned int level; + unsigned int current_depth; + unsigned long bidx, block; + f2fs_hash_t dentry_hash; + struct f2fs_dir_entry *de; + unsigned int nbucket, nblock; + size_t namelen = name->len; + struct page *dentry_page = NULL; + struct f2fs_dentry_block *dentry_blk = NULL; + int slots = GET_DENTRY_SLOTS(namelen); + struct page *page; + int err = 0; + int i; + + dentry_hash = f2fs_dentry_hash(name->name, name->len); + level = 0; + current_depth = F2FS_I(dir)->i_current_depth; + if (F2FS_I(dir)->chash == dentry_hash) { + level = F2FS_I(dir)->clevel; + F2FS_I(dir)->chash = 0; + } + +start: + if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) + return -ENOSPC; + + /* Increase the depth, if required */ + if (level == current_depth) + ++current_depth; + + nbucket = dir_buckets(level); + nblock = bucket_blocks(level); + + bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket)); + + for (block = bidx; block <= (bidx + nblock - 1); block++) { + dentry_page = get_new_data_page(dir, NULL, block, true); + if (IS_ERR(dentry_page)) + return PTR_ERR(dentry_page); + + dentry_blk = kmap(dentry_page); + bit_pos = room_for_filename(dentry_blk, slots); + if (bit_pos < NR_DENTRY_IN_BLOCK) + goto add_dentry; + + kunmap(dentry_page); + f2fs_put_page(dentry_page, 1); + } + + /* Move to next level to find the empty slot for new dentry */ + ++level; + goto start; +add_dentry: + wait_on_page_writeback(dentry_page); + + page = init_inode_metadata(inode, dir, name); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto fail; + } + de = &dentry_blk->dentry[bit_pos]; + de->hash_code = dentry_hash; + de->name_len = cpu_to_le16(namelen); + memcpy(dentry_blk->filename[bit_pos], name->name, name->len); + de->ino = cpu_to_le32(inode->i_ino); + set_de_type(de, inode); + for (i = 0; i < slots; i++) + test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); + set_page_dirty(dentry_page); + + /* we don't need to mark_inode_dirty now */ + F2FS_I(inode)->i_pino = dir->i_ino; + update_inode(inode, page); + f2fs_put_page(page, 1); + + update_parent_metadata(dir, inode, current_depth); +fail: + clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); + kunmap(dentry_page); + f2fs_put_page(dentry_page, 1); + return err; +} + +/* + * It only removes the dentry from the dentry page,corresponding name + * entry in name page does not need to be touched during deletion. + */ +void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, + struct inode *inode) +{ + struct f2fs_dentry_block *dentry_blk; + unsigned int bit_pos; + struct address_space *mapping = page->mapping; + struct inode *dir = mapping->host; + struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); + int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); + void *kaddr = page_address(page); + int i; + + lock_page(page); + wait_on_page_writeback(page); + + dentry_blk = (struct f2fs_dentry_block *)kaddr; + bit_pos = dentry - (struct f2fs_dir_entry *)dentry_blk->dentry; + for (i = 0; i < slots; i++) + test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); + + /* Let's check and deallocate this dentry page */ + bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, + NR_DENTRY_IN_BLOCK, + 0); + kunmap(page); /* kunmap - pair of f2fs_find_entry */ + set_page_dirty(page); + + dir->i_ctime = dir->i_mtime = CURRENT_TIME; + + if (inode) { + if (S_ISDIR(inode->i_mode)) { + drop_nlink(dir); + update_inode_page(dir); + } + inode->i_ctime = CURRENT_TIME; + drop_nlink(inode); + if (S_ISDIR(inode->i_mode)) { + drop_nlink(inode); + i_size_write(inode, 0); + } + update_inode_page(inode); + + if (inode->i_nlink == 0) + add_orphan_inode(sbi, inode->i_ino); + else + release_orphan_inode(sbi); + } + + if (bit_pos == NR_DENTRY_IN_BLOCK) { + truncate_hole(dir, page->index, page->index + 1); + clear_page_dirty_for_io(page); + ClearPageUptodate(page); + dec_page_count(sbi, F2FS_DIRTY_DENTS); + inode_dec_dirty_dents(dir); + } + f2fs_put_page(page, 1); +} + +bool f2fs_empty_dir(struct inode *dir) +{ + unsigned long bidx; + struct page *dentry_page; + unsigned int bit_pos; + struct f2fs_dentry_block *dentry_blk; + unsigned long nblock = dir_blocks(dir); + + for (bidx = 0; bidx < nblock; bidx++) { + void *kaddr; + dentry_page = get_lock_data_page(dir, bidx); + if (IS_ERR(dentry_page)) { + if (PTR_ERR(dentry_page) == -ENOENT) + continue; + else + return false; + } + + kaddr = kmap_atomic(dentry_page); + dentry_blk = (struct f2fs_dentry_block *)kaddr; + if (bidx == 0) + bit_pos = 2; + else + bit_pos = 0; + bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, + NR_DENTRY_IN_BLOCK, + bit_pos); + kunmap_atomic(kaddr); + + f2fs_put_page(dentry_page, 1); + + if (bit_pos < NR_DENTRY_IN_BLOCK) + return false; + } + return true; +} + +static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir) +{ + unsigned long pos = file->f_pos; + struct inode *inode = file->f_dentry->d_inode; + unsigned long npages = dir_blocks(inode); + unsigned char *types = NULL; + unsigned int bit_pos = 0, start_bit_pos = 0; + int over = 0; + struct f2fs_dentry_block *dentry_blk = NULL; + struct f2fs_dir_entry *de = NULL; + struct page *dentry_page = NULL; + unsigned int n = 0; + unsigned char d_type = DT_UNKNOWN; + int slots; + + types = f2fs_filetype_table; + bit_pos = (pos % NR_DENTRY_IN_BLOCK); + n = (pos / NR_DENTRY_IN_BLOCK); + + for (; n < npages; n++) { + dentry_page = get_lock_data_page(inode, n); + if (IS_ERR(dentry_page)) + continue; + + start_bit_pos = bit_pos; + dentry_blk = kmap(dentry_page); + while (bit_pos < NR_DENTRY_IN_BLOCK) { + d_type = DT_UNKNOWN; + bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, + NR_DENTRY_IN_BLOCK, + bit_pos); + if (bit_pos >= NR_DENTRY_IN_BLOCK) + break; + + de = &dentry_blk->dentry[bit_pos]; + if (types && de->file_type < F2FS_FT_MAX) + d_type = types[de->file_type]; + + over = filldir(dirent, + dentry_blk->filename[bit_pos], + le16_to_cpu(de->name_len), + (n * NR_DENTRY_IN_BLOCK) + bit_pos, + le32_to_cpu(de->ino), d_type); + if (over) { + file->f_pos += bit_pos - start_bit_pos; + goto success; + } + slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); + bit_pos += slots; + } + bit_pos = 0; + file->f_pos = (n + 1) * NR_DENTRY_IN_BLOCK; + kunmap(dentry_page); + f2fs_put_page(dentry_page, 1); + dentry_page = NULL; + } +success: + if (dentry_page && !IS_ERR(dentry_page)) { + kunmap(dentry_page); + f2fs_put_page(dentry_page, 1); + } + + return 0; +} + +const struct file_operations f2fs_dir_operations = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .readdir = f2fs_readdir, + .fsync = f2fs_sync_file, + .unlocked_ioctl = f2fs_ioctl, +}; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h new file mode 100644 index 0000000000000..2f2c652e62a25 --- /dev/null +++ b/fs/f2fs/f2fs.h @@ -0,0 +1,1383 @@ +/* + * fs/f2fs/f2fs.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_F2FS_H +#define _LINUX_F2FS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_F2FS_CHECK_FS +#define f2fs_bug_on(condition) BUG_ON(condition) +#else +#define f2fs_bug_on(condition) +#endif + +/* + * For mount options + */ +#define F2FS_MOUNT_BG_GC 0x00000001 +#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 +#define F2FS_MOUNT_DISCARD 0x00000004 +#define F2FS_MOUNT_NOHEAP 0x00000008 +#define F2FS_MOUNT_XATTR_USER 0x00000010 +#define F2FS_MOUNT_POSIX_ACL 0x00000020 +#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 +#define F2FS_MOUNT_INLINE_XATTR 0x00000080 +#define F2FS_MOUNT_ANDROID_EMU 0x00001000 +#define F2FS_MOUNT_ERRORS_PANIC 0x00002000 +#define F2FS_MOUNT_ERRORS_RECOVER 0x00004000 +#define F2FS_MOUNT_INLINE_DATA 0x00000100 + +#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) +#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) +#define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) + +#define ver_after(a, b) (typecheck(unsigned long long, a) && \ + typecheck(unsigned long long, b) && \ + ((long long)((a) - (b)) > 0)) + +typedef u32 block_t; /* + * should not change u32, since it is the on-disk block + * address format, __le32. + */ +typedef u32 nid_t; + +struct f2fs_mount_info { + unsigned int opt; +}; + +#define CRCPOLY_LE 0xedb88320 + +static inline __u32 f2fs_crc32(void *buf, size_t len) +{ + unsigned char *p = (unsigned char *)buf; + __u32 crc = F2FS_SUPER_MAGIC; + int i; + + while (len--) { + crc ^= *p++; + for (i = 0; i < 8; i++) + crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); + } + return crc; +} + +static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size) +{ + return f2fs_crc32(buf, buf_size) == blk_crc; +} + +/* + * For checkpoint manager + */ +enum { + NAT_BITMAP, + SIT_BITMAP +}; + +/* for the list of orphan inodes */ +struct orphan_inode_entry { + struct list_head list; /* list head */ + nid_t ino; /* inode number */ +}; + +/* for the list of directory inodes */ +struct dir_inode_entry { + struct list_head list; /* list head */ + struct inode *inode; /* vfs inode pointer */ +}; + +/* for the list of blockaddresses to be discarded */ +struct discard_entry { + struct list_head list; /* list head */ + block_t blkaddr; /* block address to be discarded */ + int len; /* # of consecutive blocks of the discard */ +}; + +/* for the list of fsync inodes, used only during recovery */ +struct fsync_inode_entry { + struct list_head list; /* list head */ + struct inode *inode; /* vfs inode pointer */ + block_t blkaddr; /* block address locating the last inode */ +}; + +#define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats)) +#define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits)) + +#define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne) +#define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid) +#define sit_in_journal(sum, i) (sum->sit_j.entries[i].se) +#define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno) + +static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i) +{ + int before = nats_in_cursum(rs); + rs->n_nats = cpu_to_le16(before + i); + return before; +} + +static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i) +{ + int before = sits_in_cursum(rs); + rs->n_sits = cpu_to_le16(before + i); + return before; +} + +/* + * ioctl commands + */ +#define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS +#define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS + +#if defined(__KERNEL__) && defined(CONFIG_COMPAT) +/* + * ioctl commands in 32 bit emulation + */ +#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS +#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS +#endif + +/* + * For INODE and NODE manager + */ +/* + * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 + * as its node offset to distinguish from index node blocks. + * But some bits are used to mark the node block. + */ +#define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ + >> OFFSET_BIT_SHIFT) +enum { + ALLOC_NODE, /* allocate a new node page if needed */ + LOOKUP_NODE, /* look up a node without readahead */ + LOOKUP_NODE_RA, /* + * look up a node with readahead called + * by get_data_block. + */ +}; + +#define F2FS_LINK_MAX 32000 /* maximum link count per file */ + +/* for in-memory extent cache entry */ +#define F2FS_MIN_EXTENT_LEN 16 /* minimum extent length */ + +struct extent_info { + rwlock_t ext_lock; /* rwlock for consistency */ + unsigned int fofs; /* start offset in a file */ + u32 blk_addr; /* start block address of the extent */ + unsigned int len; /* length of the extent */ +}; + +/* + * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. + */ +#define FADVISE_COLD_BIT 0x01 +#define FADVISE_LOST_PINO_BIT 0x02 +#define FADVISE_ANDROID_EMU 0x10 +#define FADVISE_ANDROID_EMU_ROOT 0x20 + +struct f2fs_inode_info { + struct inode vfs_inode; /* serve a vfs inode */ + unsigned long i_flags; /* keep an inode flags for ioctl */ + unsigned char i_advise; /* use to give file attribute hints */ + unsigned int i_current_depth; /* use only in directory structure */ + unsigned int i_pino; /* parent inode number */ + umode_t i_acl_mode; /* keep file acl mode temporarily */ + + /* Use below internally in f2fs*/ + unsigned long flags; /* use to pass per-file flags */ + atomic_t dirty_dents; /* # of dirty dentry pages */ + f2fs_hash_t chash; /* hash value of given file name */ + unsigned int clevel; /* maximum level of given file name */ + nid_t i_xattr_nid; /* node id that contains xattrs */ + unsigned long long xattr_ver; /* cp version of xattr modification */ + struct extent_info ext; /* in-memory extent cache entry */ +}; + +static inline void get_extent_info(struct extent_info *ext, + struct f2fs_extent i_ext) +{ + write_lock(&ext->ext_lock); + ext->fofs = le32_to_cpu(i_ext.fofs); + ext->blk_addr = le32_to_cpu(i_ext.blk_addr); + ext->len = le32_to_cpu(i_ext.len); + write_unlock(&ext->ext_lock); +} + +static inline void set_raw_extent(struct extent_info *ext, + struct f2fs_extent *i_ext) +{ + read_lock(&ext->ext_lock); + i_ext->fofs = cpu_to_le32(ext->fofs); + i_ext->blk_addr = cpu_to_le32(ext->blk_addr); + i_ext->len = cpu_to_le32(ext->len); + read_unlock(&ext->ext_lock); +} + +struct f2fs_nm_info { + block_t nat_blkaddr; /* base disk address of NAT */ + nid_t max_nid; /* maximum possible node ids */ + nid_t next_scan_nid; /* the next nid to be scanned */ + + /* NAT cache management */ + struct radix_tree_root nat_root;/* root of the nat entry cache */ + rwlock_t nat_tree_lock; /* protect nat_tree_lock */ + unsigned int nat_cnt; /* the # of cached nat entries */ + struct list_head nat_entries; /* cached nat entry list (clean) */ + struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */ + + /* free node ids management */ + struct list_head free_nid_list; /* a list for free nids */ + spinlock_t free_nid_list_lock; /* protect free nid list */ + unsigned int fcnt; /* the number of free node id */ + struct mutex build_lock; /* lock for build free nids */ + + /* for checkpoint */ + char *nat_bitmap; /* NAT bitmap pointer */ + int bitmap_size; /* bitmap size */ +}; + +/* + * this structure is used as one of function parameters. + * all the information are dedicated to a given direct node block determined + * by the data offset in a file. + */ +struct dnode_of_data { + struct inode *inode; /* vfs inode pointer */ + struct page *inode_page; /* its inode page, NULL is possible */ + struct page *node_page; /* cached direct node page */ + nid_t nid; /* node id of the direct node block */ + unsigned int ofs_in_node; /* data offset in the node page */ + bool inode_page_locked; /* inode page is locked or not */ + block_t data_blkaddr; /* block address of the node block */ +}; + +static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, + struct page *ipage, struct page *npage, nid_t nid) +{ + memset(dn, 0, sizeof(*dn)); + dn->inode = inode; + dn->inode_page = ipage; + dn->node_page = npage; + dn->nid = nid; +} + +/* + * For SIT manager + * + * By default, there are 6 active log areas across the whole main area. + * When considering hot and cold data separation to reduce cleaning overhead, + * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, + * respectively. + * In the current design, you should not change the numbers intentionally. + * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 + * logs individually according to the underlying devices. (default: 6) + * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for + * data and 8 for node logs. + */ +#define NR_CURSEG_DATA_TYPE (3) +#define NR_CURSEG_NODE_TYPE (3) +#define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) + +enum { + CURSEG_HOT_DATA = 0, /* directory entry blocks */ + CURSEG_WARM_DATA, /* data blocks */ + CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ + CURSEG_HOT_NODE, /* direct node blocks of directory files */ + CURSEG_WARM_NODE, /* direct node blocks of normal files */ + CURSEG_COLD_NODE, /* indirect node blocks */ + NO_CHECK_TYPE +}; + +struct f2fs_sm_info { + struct sit_info *sit_info; /* whole segment information */ + struct free_segmap_info *free_info; /* free segment information */ + struct dirty_seglist_info *dirty_info; /* dirty segment information */ + struct curseg_info *curseg_array; /* active segment information */ + + struct list_head wblist_head; /* list of under-writeback pages */ + spinlock_t wblist_lock; /* lock for checkpoint */ + + block_t seg0_blkaddr; /* block address of 0'th segment */ + block_t main_blkaddr; /* start block address of main area */ + block_t ssa_blkaddr; /* start block address of SSA area */ + + unsigned int segment_count; /* total # of segments */ + unsigned int main_segments; /* # of segments in main area */ + unsigned int reserved_segments; /* # of reserved segments */ + unsigned int ovp_segments; /* # of overprovision segments */ + + /* a threshold to reclaim prefree segments */ + unsigned int rec_prefree_segments; + + /* for small discard management */ + struct list_head discard_list; /* 4KB discard list */ + int nr_discards; /* # of discards in the list */ + int max_discards; /* max. discards to be issued */ + + unsigned int ipu_policy; /* in-place-update policy */ + unsigned int min_ipu_util; /* in-place-update threshold */ +}; + +/* + * For superblock + */ +/* + * COUNT_TYPE for monitoring + * + * f2fs monitors the number of several block types such as on-writeback, + * dirty dentry blocks, dirty node blocks, and dirty meta blocks. + */ +enum count_type { + F2FS_WRITEBACK, + F2FS_DIRTY_DENTS, + F2FS_DIRTY_NODES, + F2FS_DIRTY_META, + NR_COUNT_TYPE, +}; + +/* + * The below are the page types of bios used in submti_bio(). + * The available types are: + * DATA User data pages. It operates as async mode. + * NODE Node pages. It operates as async mode. + * META FS metadata pages such as SIT, NAT, CP. + * NR_PAGE_TYPE The number of page types. + * META_FLUSH Make sure the previous pages are written + * with waiting the bio's completion + * ... Only can be used with META. + */ +#define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) +enum page_type { + DATA, + NODE, + META, + NR_PAGE_TYPE, + META_FLUSH, +}; + +/* + * Android sdcard emulation flags + */ +#define F2FS_ANDROID_EMU_NOCASE 0x00000001 + +struct f2fs_io_info { + enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ + int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */ +}; + +#define is_read_io(rw) (((rw) & 1) == READ) +struct f2fs_bio_info { + struct f2fs_sb_info *sbi; /* f2fs superblock */ + struct bio *bio; /* bios to merge */ + sector_t last_block_in_bio; /* last block number */ + struct f2fs_io_info fio; /* store buffered io info. */ + struct mutex io_mutex; /* mutex for bio */ +}; + +struct f2fs_sb_info { + struct super_block *sb; /* pointer to VFS super block */ + struct proc_dir_entry *s_proc; /* proc entry */ + struct buffer_head *raw_super_buf; /* buffer head of raw sb */ + struct f2fs_super_block *raw_super; /* raw super block pointer */ + int s_dirty; /* dirty flag for checkpoint */ + + /* for node-related operations */ + struct f2fs_nm_info *nm_info; /* node manager */ + struct inode *node_inode; /* cache node blocks */ + + /* for segment-related operations */ + struct f2fs_sm_info *sm_info; /* segment manager */ + + /* for bio operations */ + struct f2fs_bio_info read_io; /* for read bios */ + struct f2fs_bio_info write_io[NR_PAGE_TYPE]; /* for write bios */ + + /* for checkpoint */ + struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ + struct inode *meta_inode; /* cache meta blocks */ + struct mutex cp_mutex; /* checkpoint procedure lock */ + struct rw_semaphore cp_rwsem; /* blocking FS operations */ + struct mutex node_write; /* locking node writes */ + struct mutex writepages; /* mutex for writepages() */ + bool por_doing; /* recovery is doing or not */ + bool on_build_free_nids; /* build_free_nids is doing */ + wait_queue_head_t cp_wait; + + /* for orphan inode management */ + struct list_head orphan_inode_list; /* orphan inode list */ + spinlock_t orphan_inode_lock; /* for orphan inode list */ + unsigned int n_orphans; /* # of orphan inodes */ + unsigned int max_orphans; /* max orphan inodes */ + + /* for directory inode management */ + struct list_head dir_inode_list; /* dir inode list */ + spinlock_t dir_inode_lock; /* for dir inode list lock */ + + /* basic file system units */ + unsigned int log_sectors_per_block; /* log2 sectors per block */ + unsigned int log_blocksize; /* log2 block size */ + unsigned int blocksize; /* block size */ + unsigned int root_ino_num; /* root inode number*/ + unsigned int node_ino_num; /* node inode number*/ + unsigned int meta_ino_num; /* meta inode number*/ + unsigned int log_blocks_per_seg; /* log2 blocks per segment */ + unsigned int blocks_per_seg; /* blocks per segment */ + unsigned int segs_per_sec; /* segments per section */ + unsigned int secs_per_zone; /* sections per zone */ + unsigned int total_sections; /* total section count */ + unsigned int total_node_count; /* total node block count */ + unsigned int total_valid_node_count; /* valid node block count */ + unsigned int total_valid_inode_count; /* valid inode count */ + int active_logs; /* # of active logs */ + + block_t user_block_count; /* # of user blocks */ + block_t total_valid_block_count; /* # of valid blocks */ + block_t alloc_valid_block_count; /* # of allocated blocks */ + block_t last_valid_block_count; /* for recovery */ + u32 s_next_generation; /* for NFS support */ + atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ + + struct f2fs_mount_info mount_opt; /* mount options */ + + /* for cleaning operations */ + struct mutex gc_mutex; /* mutex for GC */ + struct f2fs_gc_kthread *gc_thread; /* GC thread */ + unsigned int cur_victim_sec; /* current victim section num */ + + /* maximum # of trials to find a victim segment for SSR and GC */ + unsigned int max_victim_search; + + /* + * for stat information. + * one is for the LFS mode, and the other is for the SSR mode. + */ +#ifdef CONFIG_F2FS_STAT_FS + struct f2fs_stat_info *stat_info; /* FS status information */ + unsigned int segment_count[2]; /* # of allocated segments */ + unsigned int block_count[2]; /* # of allocated blocks */ + int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ + int inline_inode; /* # of inline_data inodes */ + int bg_gc; /* background gc calls */ + unsigned int n_dirty_dirs; /* # of dir inodes */ +#endif + unsigned int last_victim[2]; /* last victim segment # */ + spinlock_t stat_lock; /* lock for stat operations */ + + /* For sysfs suppport */ + struct kobject s_kobj; + struct completion s_kobj_unregister; + + /* For Android sdcard emulation */ + u32 android_emu_uid; + u32 android_emu_gid; + umode_t android_emu_mode; + int android_emu_flags; +}; + +/* + * Inline functions + */ +static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) +{ + return container_of(inode, struct f2fs_inode_info, vfs_inode); +} + +static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + +static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) +{ + return (struct f2fs_super_block *)(sbi->raw_super); +} + +static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) +{ + return (struct f2fs_checkpoint *)(sbi->ckpt); +} + +static inline struct f2fs_node *F2FS_NODE(struct page *page) +{ + return (struct f2fs_node *)page_address(page); +} + +static inline struct f2fs_inode *F2FS_INODE(struct page *page) +{ + return &((struct f2fs_node *)page_address(page))->i; +} + +static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) +{ + return (struct f2fs_nm_info *)(sbi->nm_info); +} + +static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) +{ + return (struct f2fs_sm_info *)(sbi->sm_info); +} + +static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) +{ + return (struct sit_info *)(SM_I(sbi)->sit_info); +} + +static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) +{ + return (struct free_segmap_info *)(SM_I(sbi)->free_info); +} + +static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) +{ + return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); +} + +static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) +{ + return sbi->meta_inode->i_mapping; +} + +static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) +{ + return sbi->node_inode->i_mapping; +} + +static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) +{ + sbi->s_dirty = 1; +} + +static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) +{ + sbi->s_dirty = 0; +} + +static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) +{ + return le64_to_cpu(cp->checkpoint_ver); +} + +static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) +{ + unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); + return ckpt_flags & f; +} + +static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) +{ + unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); + ckpt_flags |= f; + cp->ckpt_flags = cpu_to_le32(ckpt_flags); +} + +static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) +{ + unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); + ckpt_flags &= (~f); + cp->ckpt_flags = cpu_to_le32(ckpt_flags); +} + +static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) +{ + down_read(&sbi->cp_rwsem); +} + +static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) +{ + up_read(&sbi->cp_rwsem); +} + +static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) +{ + down_write(&sbi->cp_rwsem); +} + +static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) +{ + up_write(&sbi->cp_rwsem); +} + +/* + * Check whether the given nid is within node id range. + */ +static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) +{ + WARN_ON((nid >= NM_I(sbi)->max_nid)); + if (unlikely(nid >= NM_I(sbi)->max_nid)) + return -EINVAL; + return 0; +} + +#define F2FS_DEFAULT_ALLOCATED_BLOCKS 1 + +/* + * Check whether the inode has blocks or not + */ +static inline int F2FS_HAS_BLOCKS(struct inode *inode) +{ + if (F2FS_I(inode)->i_xattr_nid) + return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1; + else + return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS; +} + +static inline int f2fs_handle_error(struct f2fs_sb_info *sbi) +{ + if (test_opt(sbi, ERRORS_PANIC)) + BUG(); + if (test_opt(sbi, ERRORS_RECOVER)) + return 1; + return 0; +} + +static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, + struct inode *inode, blkcnt_t count) +{ + block_t valid_block_count; + + spin_lock(&sbi->stat_lock); + valid_block_count = + sbi->total_valid_block_count + (block_t)count; + if (unlikely(valid_block_count > sbi->user_block_count)) { + spin_unlock(&sbi->stat_lock); + return false; + } + inode->i_blocks += count; + sbi->total_valid_block_count = valid_block_count; + sbi->alloc_valid_block_count += (block_t)count; + spin_unlock(&sbi->stat_lock); + return true; +} + +static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, + struct inode *inode, + blkcnt_t count) +{ + spin_lock(&sbi->stat_lock); + + if (sbi->total_valid_block_count < (block_t)count) { + pr_crit("F2FS-fs (%s): block accounting error: %u < %llu\n", + sbi->sb->s_id, sbi->total_valid_block_count, count); + f2fs_handle_error(sbi); + sbi->total_valid_block_count = count; + } + if (inode->i_blocks < count) { + pr_crit("F2FS-fs (%s): inode accounting error: %llu < %llu\n", + sbi->sb->s_id, inode->i_blocks, count); + f2fs_handle_error(sbi); + inode->i_blocks = count; + } + + inode->i_blocks -= count; + sbi->total_valid_block_count -= (block_t)count; + spin_unlock(&sbi->stat_lock); +} + +static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) +{ + atomic_inc(&sbi->nr_pages[count_type]); + F2FS_SET_SB_DIRT(sbi); +} + +static inline void inode_inc_dirty_dents(struct inode *inode) +{ + atomic_inc(&F2FS_I(inode)->dirty_dents); +} + +static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) +{ + atomic_dec(&sbi->nr_pages[count_type]); +} + +static inline void inode_dec_dirty_dents(struct inode *inode) +{ + atomic_dec(&F2FS_I(inode)->dirty_dents); +} + +static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) +{ + return atomic_read(&sbi->nr_pages[count_type]); +} + +static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) +{ + unsigned int pages_per_sec = sbi->segs_per_sec * + (1 << sbi->log_blocks_per_seg); + return ((get_pages(sbi, block_type) + pages_per_sec - 1) + >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; +} + +static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) +{ + block_t ret; + spin_lock(&sbi->stat_lock); + ret = sbi->total_valid_block_count; + spin_unlock(&sbi->stat_lock); + return ret; +} + +static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) +{ + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + + /* return NAT or SIT bitmap */ + if (flag == NAT_BITMAP) + return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); + else if (flag == SIT_BITMAP) + return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); + + return 0; +} + +static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) +{ + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + int offset = (flag == NAT_BITMAP) ? + le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; + return &ckpt->sit_nat_version_bitmap + offset; +} + +static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) +{ + block_t start_addr; + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + unsigned long long ckpt_version = cur_cp_version(ckpt); + + start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); + + /* + * odd numbered checkpoint should at cp segment 0 + * and even segent must be at cp segment 1 + */ + if (!(ckpt_version & 1)) + start_addr += sbi->blocks_per_seg; + + return start_addr; +} + +static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) +{ + return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); +} + +static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, + struct inode *inode) +{ + block_t valid_block_count; + unsigned int valid_node_count; + + spin_lock(&sbi->stat_lock); + + valid_block_count = sbi->total_valid_block_count + 1; + if (unlikely(valid_block_count > sbi->user_block_count)) { + spin_unlock(&sbi->stat_lock); + return false; + } + + valid_node_count = sbi->total_valid_node_count + 1; + if (unlikely(valid_node_count > sbi->total_node_count)) { + spin_unlock(&sbi->stat_lock); + return false; + } + + if (inode) + inode->i_blocks++; + + sbi->alloc_valid_block_count++; + sbi->total_valid_node_count++; + sbi->total_valid_block_count++; + spin_unlock(&sbi->stat_lock); + + return true; +} + +static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, + struct inode *inode) +{ + spin_lock(&sbi->stat_lock); + + f2fs_bug_on(!sbi->total_valid_block_count); + f2fs_bug_on(!sbi->total_valid_node_count); + f2fs_bug_on(!inode->i_blocks); + + inode->i_blocks--; + sbi->total_valid_node_count--; + sbi->total_valid_block_count--; + + spin_unlock(&sbi->stat_lock); +} + +static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) +{ + unsigned int ret; + spin_lock(&sbi->stat_lock); + ret = sbi->total_valid_node_count; + spin_unlock(&sbi->stat_lock); + return ret; +} + +static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) +{ + spin_lock(&sbi->stat_lock); + f2fs_bug_on(sbi->total_valid_inode_count == sbi->total_node_count); + sbi->total_valid_inode_count++; + spin_unlock(&sbi->stat_lock); +} + +static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) +{ + spin_lock(&sbi->stat_lock); + f2fs_bug_on(!sbi->total_valid_inode_count); + sbi->total_valid_inode_count--; + spin_unlock(&sbi->stat_lock); +} + +static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) +{ + unsigned int ret; + spin_lock(&sbi->stat_lock); + ret = sbi->total_valid_inode_count; + spin_unlock(&sbi->stat_lock); + return ret; +} + +static inline void f2fs_put_page(struct page *page, int unlock) +{ + if (!page) + return; + + if (unlock) { + f2fs_bug_on(!PageLocked(page)); + unlock_page(page); + } + page_cache_release(page); +} + +static inline void f2fs_put_dnode(struct dnode_of_data *dn) +{ + if (dn->node_page) + f2fs_put_page(dn->node_page, 1); + if (dn->inode_page && dn->node_page != dn->inode_page) + f2fs_put_page(dn->inode_page, 0); + dn->node_page = NULL; + dn->inode_page = NULL; +} + +static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, + size_t size, void (*ctor)(void *)) +{ + return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor); +} + +static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, + gfp_t flags) +{ + void *entry; +retry: + entry = kmem_cache_alloc(cachep, flags); + if (!entry) { + cond_resched(); + goto retry; + } + + return entry; +} + +#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) + +static inline bool IS_INODE(struct page *page) +{ + struct f2fs_node *p = F2FS_NODE(page); + return RAW_IS_INODE(p); +} + +static inline __le32 *blkaddr_in_node(struct f2fs_node *node) +{ + return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; +} + +static inline block_t datablock_addr(struct page *node_page, + unsigned int offset) +{ + struct f2fs_node *raw_node; + __le32 *addr_array; + raw_node = F2FS_NODE(node_page); + addr_array = blkaddr_in_node(raw_node); + return le32_to_cpu(addr_array[offset]); +} + +static inline int f2fs_test_bit(unsigned int nr, char *addr) +{ + int mask; + + addr += (nr >> 3); + mask = 1 << (7 - (nr & 0x07)); + return mask & *addr; +} + +static inline int f2fs_set_bit(unsigned int nr, char *addr) +{ + int mask; + int ret; + + addr += (nr >> 3); + mask = 1 << (7 - (nr & 0x07)); + ret = mask & *addr; + *addr |= mask; + return ret; +} + +static inline int f2fs_clear_bit(unsigned int nr, char *addr) +{ + int mask; + int ret; + + addr += (nr >> 3); + mask = 1 << (7 - (nr & 0x07)); + ret = mask & *addr; + *addr &= ~mask; + return ret; +} + +/* used for f2fs_inode_info->flags */ +enum { + FI_NEW_INODE, /* indicate newly allocated inode */ + FI_DIRTY_INODE, /* indicate inode is dirty or not */ + FI_INC_LINK, /* need to increment i_nlink */ + FI_ACL_MODE, /* indicate acl mode */ + FI_NO_ALLOC, /* should not allocate any blocks */ + FI_UPDATE_DIR, /* should update inode block for consistency */ + FI_DELAY_IPUT, /* used for the recovery */ + FI_NO_EXTENT, /* not to use the extent cache */ + FI_INLINE_XATTR, /* used for inline xattr */ + FI_INLINE_DATA, /* used for inline data*/ +}; + +static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) +{ + set_bit(flag, &fi->flags); +} + +static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag) +{ + return test_bit(flag, &fi->flags); +} + +static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag) +{ + clear_bit(flag, &fi->flags); +} + +static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode) +{ + fi->i_acl_mode = mode; + set_inode_flag(fi, FI_ACL_MODE); +} + +static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) +{ + if (is_inode_flag_set(fi, FI_ACL_MODE)) { + clear_inode_flag(fi, FI_ACL_MODE); + return 1; + } + return 0; +} + +int f2fs_android_emu(struct f2fs_sb_info *, struct inode *, u32 *, u32 *, + umode_t *); + +#define IS_ANDROID_EMU(sbi, fi, pfi) \ + (test_opt((sbi), ANDROID_EMU) && \ + (((fi)->i_advise & FADVISE_ANDROID_EMU) || \ + ((pfi)->i_advise & FADVISE_ANDROID_EMU))) + +static inline void get_inline_info(struct f2fs_inode_info *fi, + struct f2fs_inode *ri) +{ + if (ri->i_inline & F2FS_INLINE_XATTR) + set_inode_flag(fi, FI_INLINE_XATTR); + if (ri->i_inline & F2FS_INLINE_DATA) + set_inode_flag(fi, FI_INLINE_DATA); +} + +static inline void set_raw_inline(struct f2fs_inode_info *fi, + struct f2fs_inode *ri) +{ + ri->i_inline = 0; + + if (is_inode_flag_set(fi, FI_INLINE_XATTR)) + ri->i_inline |= F2FS_INLINE_XATTR; + if (is_inode_flag_set(fi, FI_INLINE_DATA)) + ri->i_inline |= F2FS_INLINE_DATA; +} + +static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi) +{ + if (is_inode_flag_set(fi, FI_INLINE_XATTR)) + return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS; + return DEF_ADDRS_PER_INODE; +} + +static inline void *inline_xattr_addr(struct page *page) +{ + struct f2fs_inode *ri; + ri = (struct f2fs_inode *)page_address(page); + return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - + F2FS_INLINE_XATTR_ADDRS]); +} + +static inline int inline_xattr_size(struct inode *inode) +{ + if (is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR)) + return F2FS_INLINE_XATTR_ADDRS << 2; + else + return 0; +} + +static inline int f2fs_has_inline_data(struct inode *inode) +{ + return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DATA); +} + +static inline void *inline_data_addr(struct page *page) +{ + struct f2fs_inode *ri; + ri = (struct f2fs_inode *)page_address(page); + return (void *)&(ri->i_addr[1]); +} + +static inline int f2fs_readonly(struct super_block *sb) +{ + return sb->s_flags & MS_RDONLY; +} + +/* + * file.c + */ +int f2fs_sync_file(struct file *, loff_t, loff_t, int); +void truncate_data_blocks(struct dnode_of_data *); +int truncate_blocks(struct inode *, u64); +void f2fs_truncate(struct inode *); +int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +int f2fs_setattr(struct dentry *, struct iattr *); +int truncate_hole(struct inode *, pgoff_t, pgoff_t); +int truncate_data_blocks_range(struct dnode_of_data *, int); +long f2fs_ioctl(struct file *, unsigned int, unsigned long); +long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); + +/* + * inode.c + */ +void f2fs_set_inode_flags(struct inode *); +struct inode *f2fs_iget(struct super_block *, unsigned long); +int try_to_free_nats(struct f2fs_sb_info *, int); +void update_inode(struct inode *, struct page *); +int update_inode_page(struct inode *); +int f2fs_write_inode(struct inode *, struct writeback_control *); +void f2fs_evict_inode(struct inode *); + +/* + * namei.c + */ +struct dentry *f2fs_get_parent(struct dentry *child); + +/* + * dir.c + */ +struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *, + struct page **); +struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); +ino_t f2fs_inode_by_name(struct inode *, struct qstr *); +void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, + struct page *, struct inode *); +int update_dent_inode(struct inode *, const struct qstr *); +int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *); +void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); +int f2fs_make_empty(struct inode *, struct inode *); +bool f2fs_empty_dir(struct inode *); + +static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) +{ + return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name, + inode); +} + +/* + * super.c + */ +int f2fs_sync_fs(struct super_block *, int); +extern __printf(3, 4) +void f2fs_msg(struct super_block *, const char *, const char *, ...); + +/* + * hash.c + */ +f2fs_hash_t f2fs_dentry_hash(const char *, size_t); + +/* + * node.c + */ +struct dnode_of_data; +struct node_info; + +int is_checkpointed_node(struct f2fs_sb_info *, nid_t); +void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); +int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); +int truncate_inode_blocks(struct inode *, pgoff_t); +int truncate_xattr_node(struct inode *, struct page *); +int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t); +void remove_inode_page(struct inode *); +struct page *new_inode_page(struct inode *, const struct qstr *); +struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *); +void ra_node_page(struct f2fs_sb_info *, nid_t); +struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); +struct page *get_node_page_ra(struct page *, int); +void sync_inode_page(struct dnode_of_data *); +int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); +bool alloc_nid(struct f2fs_sb_info *, nid_t *); +void alloc_nid_done(struct f2fs_sb_info *, nid_t); +void alloc_nid_failed(struct f2fs_sb_info *, nid_t); +void recover_node_page(struct f2fs_sb_info *, struct page *, + struct f2fs_summary *, struct node_info *, block_t); +int recover_inode_page(struct f2fs_sb_info *, struct page *); +int restore_node_summary(struct f2fs_sb_info *, unsigned int, + struct f2fs_summary_block *); +void flush_nat_entries(struct f2fs_sb_info *); +int build_node_manager(struct f2fs_sb_info *); +void destroy_node_manager(struct f2fs_sb_info *); +int __init create_node_manager_caches(void); +void destroy_node_manager_caches(void); + +/* + * segment.c + */ +void f2fs_balance_fs(struct f2fs_sb_info *); +void f2fs_balance_fs_bg(struct f2fs_sb_info *); +void invalidate_blocks(struct f2fs_sb_info *, block_t); +void clear_prefree_segments(struct f2fs_sb_info *); +int npages_for_summary_flush(struct f2fs_sb_info *); +void allocate_new_segments(struct f2fs_sb_info *); +struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); +void write_meta_page(struct f2fs_sb_info *, struct page *); +void write_node_page(struct f2fs_sb_info *, struct page *, + struct f2fs_io_info *, unsigned int, block_t, block_t *); +void write_data_page(struct page *, struct dnode_of_data *, block_t *, + struct f2fs_io_info *); +void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *); +void recover_data_page(struct f2fs_sb_info *, struct page *, + struct f2fs_summary *, block_t, block_t); +void rewrite_node_page(struct f2fs_sb_info *, struct page *, + struct f2fs_summary *, block_t, block_t); +void allocate_data_block(struct f2fs_sb_info *, struct page *, + block_t, block_t *, struct f2fs_summary *, int); +void f2fs_wait_on_page_writeback(struct page *, enum page_type); +void write_data_summaries(struct f2fs_sb_info *, block_t); +void write_node_summaries(struct f2fs_sb_info *, block_t); +int lookup_journal_in_cursum(struct f2fs_summary_block *, + int, unsigned int, int); +void flush_sit_entries(struct f2fs_sb_info *); +int build_segment_manager(struct f2fs_sb_info *); +void destroy_segment_manager(struct f2fs_sb_info *); +int __init create_segment_manager_caches(void); +void destroy_segment_manager_caches(void); + +/* + * checkpoint.c + */ +struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); +struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); +long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); +int acquire_orphan_inode(struct f2fs_sb_info *); +void release_orphan_inode(struct f2fs_sb_info *); +void add_orphan_inode(struct f2fs_sb_info *, nid_t); +void remove_orphan_inode(struct f2fs_sb_info *, nid_t); +void recover_orphan_inodes(struct f2fs_sb_info *); +int get_valid_checkpoint(struct f2fs_sb_info *); +void set_dirty_dir_page(struct inode *, struct page *); +void add_dirty_dir_inode(struct inode *); +void remove_dirty_dir_inode(struct inode *); +struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t); +void sync_dirty_dir_inodes(struct f2fs_sb_info *); +void write_checkpoint(struct f2fs_sb_info *, bool); +void init_orphan_info(struct f2fs_sb_info *); +int __init create_checkpoint_caches(void); +void destroy_checkpoint_caches(void); + +/* + * data.c + */ +void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int); +int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *, block_t, int); +void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, block_t, + struct f2fs_io_info *); +int reserve_new_block(struct dnode_of_data *); +int f2fs_reserve_block(struct dnode_of_data *, pgoff_t); +void update_extent_cache(block_t, struct dnode_of_data *); +struct page *find_data_page(struct inode *, pgoff_t, bool); +struct page *get_lock_data_page(struct inode *, pgoff_t); +struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); +int do_write_data_page(struct page *, struct f2fs_io_info *); + +/* + * gc.c + */ +int start_gc_thread(struct f2fs_sb_info *); +void stop_gc_thread(struct f2fs_sb_info *); +block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *); +int f2fs_gc(struct f2fs_sb_info *); +void build_gc_manager(struct f2fs_sb_info *); +int __init create_gc_caches(void); +void destroy_gc_caches(void); + +/* + * recovery.c + */ +int recover_fsync_data(struct f2fs_sb_info *); +bool space_for_roll_forward(struct f2fs_sb_info *); + +/* + * debug.c + */ +#ifdef CONFIG_F2FS_STAT_FS +struct f2fs_stat_info { + struct list_head stat_list; + struct f2fs_sb_info *sbi; + struct mutex stat_lock; + int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; + int main_area_segs, main_area_sections, main_area_zones; + int hit_ext, total_ext; + int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; + int nats, sits, fnids; + int total_count, utilization; + int bg_gc, inline_inode; + unsigned int valid_count, valid_node_count, valid_inode_count; + unsigned int bimodal, avg_vblocks; + int util_free, util_valid, util_invalid; + int rsvd_segs, overp_segs; + int dirty_count, node_pages, meta_pages; + int prefree_count, call_count; + int tot_segs, node_segs, data_segs, free_segs, free_secs; + int tot_blks, data_blks, node_blks; + int curseg[NR_CURSEG_TYPE]; + int cursec[NR_CURSEG_TYPE]; + int curzone[NR_CURSEG_TYPE]; + + unsigned int segment_count[2]; + unsigned int block_count[2]; + unsigned base_mem, cache_mem; +}; + +static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) +{ + return (struct f2fs_stat_info *)sbi->stat_info; +} + +#define stat_inc_call_count(si) ((si)->call_count++) +#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++) +#define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++) +#define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--) +#define stat_inc_total_hit(sb) ((F2FS_SB(sb))->total_hit_ext++) +#define stat_inc_read_hit(sb) ((F2FS_SB(sb))->read_hit_ext++) +#define stat_inc_inline_inode(inode) \ + do { \ + if (f2fs_has_inline_data(inode)) \ + ((F2FS_SB(inode->i_sb))->inline_inode++); \ + } while (0) +#define stat_dec_inline_inode(inode) \ + do { \ + if (f2fs_has_inline_data(inode)) \ + ((F2FS_SB(inode->i_sb))->inline_inode--); \ + } while (0) + +#define stat_inc_seg_type(sbi, curseg) \ + ((sbi)->segment_count[(curseg)->alloc_type]++) +#define stat_inc_block_count(sbi, curseg) \ + ((sbi)->block_count[(curseg)->alloc_type]++) + +#define stat_inc_seg_count(sbi, type) \ + do { \ + struct f2fs_stat_info *si = F2FS_STAT(sbi); \ + (si)->tot_segs++; \ + if (type == SUM_TYPE_DATA) \ + si->data_segs++; \ + else \ + si->node_segs++; \ + } while (0) + +#define stat_inc_tot_blk_count(si, blks) \ + (si->tot_blks += (blks)) + +#define stat_inc_data_blk_count(sbi, blks) \ + do { \ + struct f2fs_stat_info *si = F2FS_STAT(sbi); \ + stat_inc_tot_blk_count(si, blks); \ + si->data_blks += (blks); \ + } while (0) + +#define stat_inc_node_blk_count(sbi, blks) \ + do { \ + struct f2fs_stat_info *si = F2FS_STAT(sbi); \ + stat_inc_tot_blk_count(si, blks); \ + si->node_blks += (blks); \ + } while (0) + +int f2fs_build_stats(struct f2fs_sb_info *); +void f2fs_destroy_stats(struct f2fs_sb_info *); +void __init f2fs_create_root_stats(void); +void f2fs_destroy_root_stats(void); +#else +#define stat_inc_call_count(si) +#define stat_inc_bggc_count(si) +#define stat_inc_dirty_dir(sbi) +#define stat_dec_dirty_dir(sbi) +#define stat_inc_total_hit(sb) +#define stat_inc_read_hit(sb) +#define stat_inc_inline_inode(inode) +#define stat_dec_inline_inode(inode) +#define stat_inc_seg_type(sbi, curseg) +#define stat_inc_block_count(sbi, curseg) +#define stat_inc_seg_count(si, type) +#define stat_inc_tot_blk_count(si, blks) +#define stat_inc_data_blk_count(si, blks) +#define stat_inc_node_blk_count(sbi, blks) + +static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } +static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } +static inline void __init f2fs_create_root_stats(void) { } +static inline void f2fs_destroy_root_stats(void) { } +#endif + +extern const struct file_operations f2fs_dir_operations; +extern const struct file_operations f2fs_file_operations; +extern const struct inode_operations f2fs_file_inode_operations; +extern const struct address_space_operations f2fs_dblock_aops; +extern const struct address_space_operations f2fs_node_aops; +extern const struct address_space_operations f2fs_meta_aops; +extern const struct inode_operations f2fs_dir_inode_operations; +extern const struct inode_operations f2fs_symlink_inode_operations; +extern const struct inode_operations f2fs_special_inode_operations; + +/* + * inline.c + */ +bool f2fs_may_inline(struct inode *); +int f2fs_read_inline_data(struct inode *, struct page *); +int f2fs_convert_inline_data(struct inode *, pgoff_t); +int f2fs_write_inline_data(struct inode *, struct page *, unsigned int); +int recover_inline_data(struct inode *, struct page *); +#endif diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c new file mode 100644 index 0000000000000..6192c225d682e --- /dev/null +++ b/fs/f2fs/file.c @@ -0,0 +1,696 @@ +/* + * fs/f2fs/file.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "node.h" +#include "segment.h" +#include "xattr.h" +#include "acl.h" +#include + +static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct page *page = vmf->page; + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct dnode_of_data dn; + int err; + + f2fs_balance_fs(sbi); + + /* Wait if fs is frozen. This is racy so we check again later on + * and retry if the fs has been frozen after the page lock has + * been acquired + */ + vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + + /* block allocation */ + f2fs_lock_op(sbi); + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = f2fs_reserve_block(&dn, page->index); + f2fs_unlock_op(sbi); + if (err) + goto out; + + file_update_time(vma->vm_file); + lock_page(page); + if (unlikely(page->mapping != inode->i_mapping || + page_offset(page) > i_size_read(inode) || + !PageUptodate(page))) { + unlock_page(page); + err = -EFAULT; + goto out; + } + + /* + * check to see if the page is mapped already (no holes) + */ + if (PageMappedToDisk(page)) + goto mapped; + + /* page is wholly or partially inside EOF */ + if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { + unsigned offset; + offset = i_size_read(inode) & ~PAGE_CACHE_MASK; + zero_user_segment(page, offset, PAGE_CACHE_SIZE); + } + set_page_dirty(page); + SetPageUptodate(page); + + trace_f2fs_vm_page_mkwrite(page, DATA); +mapped: + /* fill the page */ + wait_on_page_writeback(page); +out: + return block_page_mkwrite_return(err); +} + +static const struct vm_operations_struct f2fs_file_vm_ops = { + .fault = filemap_fault, + .page_mkwrite = f2fs_vm_page_mkwrite, +}; + +static int get_parent_ino(struct inode *inode, nid_t *pino) +{ + struct dentry *dentry; + + inode = igrab(inode); + dentry = d_find_any_alias(inode); + iput(inode); + if (!dentry) + return 0; + + if (update_dent_inode(inode, &dentry->d_name)) { + dput(dentry); + return 0; + } + + *pino = parent_ino(dentry); + dput(dentry); + return 1; +} + +int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) +{ + struct inode *inode = file->f_mapping->host; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + int ret = 0; + bool need_cp = false; + struct writeback_control wbc = { + .sync_mode = WB_SYNC_NONE, + .nr_to_write = LONG_MAX, + .for_reclaim = 0, + }; + + if (unlikely(f2fs_readonly(inode->i_sb))) + return 0; + + trace_f2fs_sync_file_enter(inode); + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) { + trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); + return ret; + } + + /* guarantee free sections for fsync */ + f2fs_balance_fs(sbi); + + mutex_lock(&inode->i_mutex); + + /* + * Both of fdatasync() and fsync() are able to be recovered from + * sudden-power-off. + */ + if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) + need_cp = true; + else if (file_wrong_pino(inode)) + need_cp = true; + else if (!space_for_roll_forward(sbi)) + need_cp = true; + else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) + need_cp = true; + else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi))) + need_cp = true; + + if (need_cp) { + nid_t pino; + + F2FS_I(inode)->xattr_ver = 0; + + /* all the dirty node pages should be flushed for POR */ + ret = f2fs_sync_fs(inode->i_sb, 1); + if (file_wrong_pino(inode) && inode->i_nlink == 1 && + get_parent_ino(inode, &pino)) { + F2FS_I(inode)->i_pino = pino; + file_got_pino(inode); + mark_inode_dirty_sync(inode); + ret = f2fs_write_inode(inode, NULL); + if (ret) + goto out; + } + } else { + /* if there is no written node page, write its inode page */ + while (!sync_node_pages(sbi, inode->i_ino, &wbc)) { + mark_inode_dirty_sync(inode); + ret = f2fs_write_inode(inode, NULL); + if (ret) + goto out; + } + ret = wait_on_node_pages_writeback(sbi, inode->i_ino); + if (ret) + goto out; + ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + } +out: + mutex_unlock(&inode->i_mutex); + trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); + return ret; +} + +static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) +{ + file_accessed(file); + vma->vm_ops = &f2fs_file_vm_ops; + return 0; +} + +int truncate_data_blocks_range(struct dnode_of_data *dn, int count) +{ + int nr_free = 0, ofs = dn->ofs_in_node; + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + struct f2fs_node *raw_node; + __le32 *addr; + + raw_node = F2FS_NODE(dn->node_page); + addr = blkaddr_in_node(raw_node) + ofs; + + for (; count > 0; count--, addr++, dn->ofs_in_node++) { + block_t blkaddr = le32_to_cpu(*addr); + if (blkaddr == NULL_ADDR) + continue; + + update_extent_cache(NULL_ADDR, dn); + invalidate_blocks(sbi, blkaddr); + nr_free++; + } + if (nr_free) { + dec_valid_block_count(sbi, dn->inode, nr_free); + set_page_dirty(dn->node_page); + sync_inode_page(dn); + } + dn->ofs_in_node = ofs; + + trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, + dn->ofs_in_node, nr_free); + return nr_free; +} + +void truncate_data_blocks(struct dnode_of_data *dn) +{ + truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); +} + +static void truncate_partial_data_page(struct inode *inode, u64 from) +{ + unsigned offset = from & (PAGE_CACHE_SIZE - 1); + struct page *page; + + if (!offset) + return; + + page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false); + if (IS_ERR(page)) + return; + + lock_page(page); + if (unlikely(page->mapping != inode->i_mapping)) { + f2fs_put_page(page, 1); + return; + } + wait_on_page_writeback(page); + zero_user(page, offset, PAGE_CACHE_SIZE - offset); + set_page_dirty(page); + f2fs_put_page(page, 1); +} + +int truncate_blocks(struct inode *inode, u64 from) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + unsigned int blocksize = inode->i_sb->s_blocksize; + struct dnode_of_data dn; + pgoff_t free_from; + int count = 0, err = 0; + + trace_f2fs_truncate_blocks_enter(inode, from); + + if (f2fs_has_inline_data(inode)) + goto done; + + free_from = (pgoff_t) + ((from + blocksize - 1) >> (sbi->log_blocksize)); + + f2fs_lock_op(sbi); + + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); + if (err) { + if (err == -ENOENT) + goto free_next; + f2fs_unlock_op(sbi); + trace_f2fs_truncate_blocks_exit(inode, err); + return err; + } + + if (IS_INODE(dn.node_page)) + count = ADDRS_PER_INODE(F2FS_I(inode)); + else + count = ADDRS_PER_BLOCK; + + count -= dn.ofs_in_node; + f2fs_bug_on(count < 0); + + if (dn.ofs_in_node || IS_INODE(dn.node_page)) { + truncate_data_blocks_range(&dn, count); + free_from += count; + } + + f2fs_put_dnode(&dn); +free_next: + err = truncate_inode_blocks(inode, free_from); + f2fs_unlock_op(sbi); +done: + /* lastly zero out the first data page */ + truncate_partial_data_page(inode, from); + + trace_f2fs_truncate_blocks_exit(inode, err); + return err; +} + +void f2fs_truncate(struct inode *inode) +{ + int err; + + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode))) + return; + + trace_f2fs_truncate(inode); + + err = truncate_blocks(inode, i_size_read(inode)); + if (err) { + f2fs_msg(inode->i_sb, KERN_ERR, "truncate failed with %d", + err); + f2fs_handle_error(F2FS_SB(inode->i_sb)); + } else { + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); + } +} + +int f2fs_getattr(struct vfsmount *mnt, + struct dentry *dentry, struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + generic_fillattr(inode, stat); + stat->blocks <<= 3; + return 0; +} + +#ifdef CONFIG_F2FS_FS_POSIX_ACL +static void __setattr_copy(struct inode *inode, const struct iattr *attr) +{ + struct f2fs_inode_info *fi = F2FS_I(inode); + unsigned int ia_valid = attr->ia_valid; + + if (ia_valid & ATTR_UID) + inode->i_uid = attr->ia_uid; + if (ia_valid & ATTR_GID) + inode->i_gid = attr->ia_gid; + if (ia_valid & ATTR_ATIME) + inode->i_atime = timespec_trunc(attr->ia_atime, + inode->i_sb->s_time_gran); + if (ia_valid & ATTR_MTIME) + inode->i_mtime = timespec_trunc(attr->ia_mtime, + inode->i_sb->s_time_gran); + if (ia_valid & ATTR_CTIME) + inode->i_ctime = timespec_trunc(attr->ia_ctime, + inode->i_sb->s_time_gran); + if (ia_valid & ATTR_MODE) { + umode_t mode = attr->ia_mode; + + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) + mode &= ~S_ISGID; + set_acl_inode(fi, mode); + } +} +#else +#define __setattr_copy setattr_copy +#endif + +int f2fs_setattr(struct dentry *dentry, struct iattr *attr) +{ + struct inode *inode = dentry->d_inode; + struct f2fs_inode_info *fi = F2FS_I(inode); + struct f2fs_inode_info *pfi = F2FS_I(dentry->d_parent->d_inode); + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + int err; + + err = inode_change_ok(inode, attr); + if (err) + return err; + + if (IS_ANDROID_EMU(sbi, fi, pfi)) + f2fs_android_emu(sbi, inode, &attr->ia_uid, &attr->ia_gid, + &attr->ia_mode); + + if ((attr->ia_valid & ATTR_SIZE) && + attr->ia_size != i_size_read(inode)) { + err = f2fs_convert_inline_data(inode, attr->ia_size); + if (err) + return err; + + truncate_setsize(inode, attr->ia_size); + f2fs_truncate(inode); + f2fs_balance_fs(F2FS_SB(inode->i_sb)); + } + + __setattr_copy(inode, attr); + + if (attr->ia_valid & ATTR_MODE) { + err = f2fs_acl_chmod(inode); + if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { + inode->i_mode = fi->i_acl_mode; + clear_inode_flag(fi, FI_ACL_MODE); + } + } + + mark_inode_dirty(inode); + return err; +} + +const struct inode_operations f2fs_file_inode_operations = { + .getattr = f2fs_getattr, + .setattr = f2fs_setattr, + .get_acl = f2fs_get_acl, +#ifdef CONFIG_F2FS_FS_XATTR + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = f2fs_listxattr, + .removexattr = generic_removexattr, +#endif +}; + +static void fill_zero(struct inode *inode, pgoff_t index, + loff_t start, loff_t len) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct page *page; + + if (!len) + return; + + f2fs_balance_fs(sbi); + + f2fs_lock_op(sbi); + page = get_new_data_page(inode, NULL, index, false); + f2fs_unlock_op(sbi); + + if (!IS_ERR(page)) { + wait_on_page_writeback(page); + zero_user(page, start, len); + set_page_dirty(page); + f2fs_put_page(page, 1); + } +} + +int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) +{ + pgoff_t index; + int err; + + for (index = pg_start; index < pg_end; index++) { + struct dnode_of_data dn; + + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = get_dnode_of_data(&dn, index, LOOKUP_NODE); + if (err) { + if (err == -ENOENT) + continue; + return err; + } + + if (dn.data_blkaddr != NULL_ADDR) + truncate_data_blocks_range(&dn, 1); + f2fs_put_dnode(&dn); + } + return 0; +} + +static int punch_hole(struct inode *inode, loff_t offset, loff_t len) +{ + pgoff_t pg_start, pg_end; + loff_t off_start, off_end; + int ret = 0; + + ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1); + if (ret) + return ret; + + pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; + + off_start = offset & (PAGE_CACHE_SIZE - 1); + off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); + + if (pg_start == pg_end) { + fill_zero(inode, pg_start, off_start, + off_end - off_start); + } else { + if (off_start) + fill_zero(inode, pg_start++, off_start, + PAGE_CACHE_SIZE - off_start); + if (off_end) + fill_zero(inode, pg_end, 0, off_end); + + if (pg_start < pg_end) { + struct address_space *mapping = inode->i_mapping; + loff_t blk_start, blk_end; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + + f2fs_balance_fs(sbi); + + blk_start = pg_start << PAGE_CACHE_SHIFT; + blk_end = pg_end << PAGE_CACHE_SHIFT; + truncate_inode_pages_range(mapping, blk_start, + blk_end - 1); + + f2fs_lock_op(sbi); + ret = truncate_hole(inode, pg_start, pg_end); + f2fs_unlock_op(sbi); + } + } + + return ret; +} + +static int expand_inode_data(struct inode *inode, loff_t offset, + loff_t len, int mode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + pgoff_t index, pg_start, pg_end; + loff_t new_size = i_size_read(inode); + loff_t off_start, off_end; + int ret = 0; + + ret = inode_newsize_ok(inode, (len + offset)); + if (ret) + return ret; + + ret = f2fs_convert_inline_data(inode, offset + len); + if (ret) + return ret; + + pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; + + off_start = offset & (PAGE_CACHE_SIZE - 1); + off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); + + for (index = pg_start; index <= pg_end; index++) { + struct dnode_of_data dn; + + f2fs_lock_op(sbi); + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = f2fs_reserve_block(&dn, index); + f2fs_unlock_op(sbi); + if (ret) + break; + + if (pg_start == pg_end) + new_size = offset + len; + else if (index == pg_start && off_start) + new_size = (index + 1) << PAGE_CACHE_SHIFT; + else if (index == pg_end) + new_size = (index << PAGE_CACHE_SHIFT) + off_end; + else + new_size += PAGE_CACHE_SIZE; + } + + if (!(mode & FALLOC_FL_KEEP_SIZE) && + i_size_read(inode) < new_size) { + i_size_write(inode, new_size); + mark_inode_dirty(inode); + } + + return ret; +} + +static long f2fs_fallocate(struct file *file, int mode, + loff_t offset, loff_t len) +{ + struct inode *inode = file->f_path.dentry->d_inode; + long ret; + + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + return -EOPNOTSUPP; + + if (mode & FALLOC_FL_PUNCH_HOLE) + ret = punch_hole(inode, offset, len); + else + ret = expand_inode_data(inode, offset, len, mode); + + if (!ret) { + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); + } + trace_f2fs_fallocate(inode, mode, offset, len, ret); + return ret; +} + +#define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL)) +#define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL) + +static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) +{ + if (S_ISDIR(mode)) + return flags; + else if (S_ISREG(mode)) + return flags & F2FS_REG_FLMASK; + else + return flags & F2FS_OTHER_FLMASK; +} + +long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct inode *inode = filp->f_dentry->d_inode; + struct f2fs_inode_info *fi = F2FS_I(inode); + unsigned int flags; + int ret; + + switch (cmd) { + case F2FS_IOC_GETFLAGS: + flags = fi->i_flags & FS_FL_USER_VISIBLE; + return put_user(flags, (int __user *) arg); + case F2FS_IOC_SETFLAGS: + { + unsigned int oldflags; + + ret = mnt_want_write(filp->f_path.mnt); + if (ret) + return ret; + + if (!inode_owner_or_capable(inode)) { + ret = -EACCES; + goto out; + } + + if (get_user(flags, (int __user *) arg)) { + ret = -EFAULT; + goto out; + } + + flags = f2fs_mask_flags(inode->i_mode, flags); + + mutex_lock(&inode->i_mutex); + + oldflags = fi->i_flags; + + if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { + if (!capable(CAP_LINUX_IMMUTABLE)) { + mutex_unlock(&inode->i_mutex); + ret = -EPERM; + goto out; + } + } + + flags = flags & FS_FL_USER_MODIFIABLE; + flags |= oldflags & ~FS_FL_USER_MODIFIABLE; + fi->i_flags = flags; + mutex_unlock(&inode->i_mutex); + + f2fs_set_inode_flags(inode); + inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(inode); +out: + mnt_drop_write(filp->f_path.mnt); + return ret; + } + default: + return -ENOTTY; + } +} + +#ifdef CONFIG_COMPAT +long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case F2FS_IOC32_GETFLAGS: + cmd = F2FS_IOC_GETFLAGS; + break; + case F2FS_IOC32_SETFLAGS: + cmd = F2FS_IOC_SETFLAGS; + break; + default: + return -ENOIOCTLCMD; + } + return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + +const struct file_operations f2fs_file_operations = { + .llseek = generic_file_llseek, + .read = do_sync_read, + .write = do_sync_write, + .aio_read = generic_file_aio_read, + .aio_write = generic_file_aio_write, + .open = generic_file_open, + .mmap = f2fs_file_mmap, + .fsync = f2fs_sync_file, + .fallocate = f2fs_fallocate, + .unlocked_ioctl = f2fs_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = f2fs_compat_ioctl, +#endif + .splice_read = generic_file_splice_read, + .splice_write = generic_file_splice_write, +}; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c new file mode 100644 index 0000000000000..65d7e44d7d225 --- /dev/null +++ b/fs/f2fs/gc.c @@ -0,0 +1,747 @@ +/* + * fs/f2fs/gc.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "node.h" +#include "segment.h" +#include "gc.h" +#include + +static struct kmem_cache *winode_slab; + +static int gc_thread_func(void *data) +{ + struct f2fs_sb_info *sbi = data; + struct f2fs_gc_kthread *gc_th = sbi->gc_thread; + wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; + long wait_ms; + + wait_ms = gc_th->min_sleep_time; + + do { + if (try_to_freeze()) + continue; + else + wait_event_interruptible_timeout(*wq, + kthread_should_stop(), + msecs_to_jiffies(wait_ms)); + if (kthread_should_stop()) + break; + + /* + * [GC triggering condition] + * 0. GC is not conducted currently. + * 1. There are enough dirty segments. + * 2. IO subsystem is idle by checking the # of writeback pages. + * 3. IO subsystem is idle by checking the # of requests in + * bdev's request list. + * + * Note) We have to avoid triggering GCs too much frequently. + * Because it is possible that some segments can be + * invalidated soon after by user update or deletion. + * So, I'd like to wait some time to collect dirty segments. + */ + if (!mutex_trylock(&sbi->gc_mutex)) + continue; + + if (!is_idle(sbi)) { + wait_ms = increase_sleep_time(gc_th, wait_ms); + mutex_unlock(&sbi->gc_mutex); + continue; + } + + if (has_enough_invalid_blocks(sbi)) + wait_ms = decrease_sleep_time(gc_th, wait_ms); + else + wait_ms = increase_sleep_time(gc_th, wait_ms); + + stat_inc_bggc_count(sbi); + + /* if return value is not zero, no victim was selected */ + if (f2fs_gc(sbi)) + wait_ms = gc_th->no_gc_sleep_time; + + /* balancing f2fs's metadata periodically */ + f2fs_balance_fs_bg(sbi); + + } while (!kthread_should_stop()); + return 0; +} + +int start_gc_thread(struct f2fs_sb_info *sbi) +{ + struct f2fs_gc_kthread *gc_th; + dev_t dev = sbi->sb->s_bdev->bd_dev; + int err = 0; + + if (!test_opt(sbi, BG_GC)) + goto out; + gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); + if (!gc_th) { + err = -ENOMEM; + goto out; + } + + gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; + gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; + gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; + + gc_th->gc_idle = 0; + + sbi->gc_thread = gc_th; + init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); + sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, + "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); + if (IS_ERR(gc_th->f2fs_gc_task)) { + err = PTR_ERR(gc_th->f2fs_gc_task); + kfree(gc_th); + sbi->gc_thread = NULL; + } +out: + return err; +} + +void stop_gc_thread(struct f2fs_sb_info *sbi) +{ + struct f2fs_gc_kthread *gc_th = sbi->gc_thread; + if (!gc_th) + return; + kthread_stop(gc_th->f2fs_gc_task); + kfree(gc_th); + sbi->gc_thread = NULL; +} + +static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type) +{ + int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY; + + if (gc_th && gc_th->gc_idle) { + if (gc_th->gc_idle == 1) + gc_mode = GC_CB; + else if (gc_th->gc_idle == 2) + gc_mode = GC_GREEDY; + } + return gc_mode; +} + +static void select_policy(struct f2fs_sb_info *sbi, int gc_type, + int type, struct victim_sel_policy *p) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + + if (p->alloc_mode == SSR) { + p->gc_mode = GC_GREEDY; + p->dirty_segmap = dirty_i->dirty_segmap[type]; + p->max_search = dirty_i->nr_dirty[type]; + p->ofs_unit = 1; + } else { + p->gc_mode = select_gc_type(sbi->gc_thread, gc_type); + p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; + p->max_search = dirty_i->nr_dirty[DIRTY]; + p->ofs_unit = sbi->segs_per_sec; + } + + if (p->max_search > sbi->max_victim_search) + p->max_search = sbi->max_victim_search; + + p->offset = sbi->last_victim[p->gc_mode]; +} + +static unsigned int get_max_cost(struct f2fs_sb_info *sbi, + struct victim_sel_policy *p) +{ + /* SSR allocates in a segment unit */ + if (p->alloc_mode == SSR) + return 1 << sbi->log_blocks_per_seg; + if (p->gc_mode == GC_GREEDY) + return (1 << sbi->log_blocks_per_seg) * p->ofs_unit; + else if (p->gc_mode == GC_CB) + return UINT_MAX; + else /* No other gc_mode */ + return 0; +} + +static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned int hint = 0; + unsigned int secno; + + /* + * If the gc_type is FG_GC, we can select victim segments + * selected by background GC before. + * Those segments guarantee they have small valid blocks. + */ +next: + secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++); + if (secno < TOTAL_SECS(sbi)) { + if (sec_usage_check(sbi, secno)) + goto next; + clear_bit(secno, dirty_i->victim_secmap); + return secno * sbi->segs_per_sec; + } + return NULL_SEGNO; +} + +static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned int secno = GET_SECNO(sbi, segno); + unsigned int start = secno * sbi->segs_per_sec; + unsigned long long mtime = 0; + unsigned int vblocks; + unsigned char age = 0; + unsigned char u; + unsigned int i; + + for (i = 0; i < sbi->segs_per_sec; i++) + mtime += get_seg_entry(sbi, start + i)->mtime; + vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); + + mtime = div_u64(mtime, sbi->segs_per_sec); + vblocks = div_u64(vblocks, sbi->segs_per_sec); + + u = (vblocks * 100) >> sbi->log_blocks_per_seg; + + /* Handle if the system time is changed by user */ + if (mtime < sit_i->min_mtime) + sit_i->min_mtime = mtime; + if (mtime > sit_i->max_mtime) + sit_i->max_mtime = mtime; + if (sit_i->max_mtime != sit_i->min_mtime) + age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), + sit_i->max_mtime - sit_i->min_mtime); + + return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); +} + +static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, + unsigned int segno, struct victim_sel_policy *p) +{ + if (p->alloc_mode == SSR) + return get_seg_entry(sbi, segno)->ckpt_valid_blocks; + + /* alloc_mode == LFS */ + if (p->gc_mode == GC_GREEDY) + return get_valid_blocks(sbi, segno, sbi->segs_per_sec); + else + return get_cb_cost(sbi, segno); +} + +/* + * This function is called from two paths. + * One is garbage collection and the other is SSR segment selection. + * When it is called during GC, it just gets a victim segment + * and it does not remove it from dirty seglist. + * When it is called from SSR segment selection, it finds a segment + * which has minimum valid blocks and removes it from dirty seglist. + */ +static int get_victim_by_default(struct f2fs_sb_info *sbi, + unsigned int *result, int gc_type, int type, char alloc_mode) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + struct victim_sel_policy p; + unsigned int secno, max_cost; + int nsearched = 0; + + p.alloc_mode = alloc_mode; + select_policy(sbi, gc_type, type, &p); + + p.min_segno = NULL_SEGNO; + p.min_cost = max_cost = get_max_cost(sbi, &p); + + mutex_lock(&dirty_i->seglist_lock); + + if (p.alloc_mode == LFS && gc_type == FG_GC) { + p.min_segno = check_bg_victims(sbi); + if (p.min_segno != NULL_SEGNO) + goto got_it; + } + + while (1) { + unsigned long cost; + unsigned int segno; + + segno = find_next_bit(p.dirty_segmap, + TOTAL_SEGS(sbi), p.offset); + if (segno >= TOTAL_SEGS(sbi)) { + if (sbi->last_victim[p.gc_mode]) { + sbi->last_victim[p.gc_mode] = 0; + p.offset = 0; + continue; + } + break; + } + + p.offset = segno + p.ofs_unit; + if (p.ofs_unit > 1) + p.offset -= segno % p.ofs_unit; + + secno = GET_SECNO(sbi, segno); + + if (sec_usage_check(sbi, secno)) + continue; + if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) + continue; + + cost = get_gc_cost(sbi, segno, &p); + + if (p.min_cost > cost) { + p.min_segno = segno; + p.min_cost = cost; + } else if (unlikely(cost == max_cost)) { + continue; + } + + if (nsearched++ >= p.max_search) { + sbi->last_victim[p.gc_mode] = segno; + break; + } + } + if (p.min_segno != NULL_SEGNO) { +got_it: + if (p.alloc_mode == LFS) { + secno = GET_SECNO(sbi, p.min_segno); + if (gc_type == FG_GC) + sbi->cur_victim_sec = secno; + else + set_bit(secno, dirty_i->victim_secmap); + } + *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; + + trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, + sbi->cur_victim_sec, + prefree_segments(sbi), free_segments(sbi)); + } + mutex_unlock(&dirty_i->seglist_lock); + + return (p.min_segno == NULL_SEGNO) ? 0 : 1; +} + +static const struct victim_selection default_v_ops = { + .get_victim = get_victim_by_default, +}; + +static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist) +{ + struct inode_entry *ie; + + list_for_each_entry(ie, ilist, list) + if (ie->inode->i_ino == ino) + return ie->inode; + return NULL; +} + +static void add_gc_inode(struct inode *inode, struct list_head *ilist) +{ + struct inode_entry *new_ie; + + if (inode == find_gc_inode(inode->i_ino, ilist)) { + iput(inode); + return; + } + + new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS); + new_ie->inode = inode; + list_add_tail(&new_ie->list, ilist); +} + +static void put_gc_inode(struct list_head *ilist) +{ + struct inode_entry *ie, *next_ie; + list_for_each_entry_safe(ie, next_ie, ilist, list) { + iput(ie->inode); + list_del(&ie->list); + kmem_cache_free(winode_slab, ie); + } +} + +static int check_valid_map(struct f2fs_sb_info *sbi, + unsigned int segno, int offset) +{ + struct sit_info *sit_i = SIT_I(sbi); + struct seg_entry *sentry; + int ret; + + mutex_lock(&sit_i->sentry_lock); + sentry = get_seg_entry(sbi, segno); + ret = f2fs_test_bit(offset, sentry->cur_valid_map); + mutex_unlock(&sit_i->sentry_lock); + return ret; +} + +/* + * This function compares node address got in summary with that in NAT. + * On validity, copy that node with cold status, otherwise (invalid node) + * ignore that. + */ +static void gc_node_segment(struct f2fs_sb_info *sbi, + struct f2fs_summary *sum, unsigned int segno, int gc_type) +{ + bool initial = true; + struct f2fs_summary *entry; + int off; + +next_step: + entry = sum; + + for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { + nid_t nid = le32_to_cpu(entry->nid); + struct page *node_page; + + /* stop BG_GC if there is not enough free sections. */ + if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) + return; + + if (check_valid_map(sbi, segno, off) == 0) + continue; + + if (initial) { + ra_node_page(sbi, nid); + continue; + } + node_page = get_node_page(sbi, nid); + if (IS_ERR(node_page)) + continue; + + /* set page dirty and write it */ + if (gc_type == FG_GC) { + f2fs_wait_on_page_writeback(node_page, NODE); + set_page_dirty(node_page); + } else { + if (!PageWriteback(node_page)) + set_page_dirty(node_page); + } + f2fs_put_page(node_page, 1); + stat_inc_node_blk_count(sbi, 1); + } + + if (initial) { + initial = false; + goto next_step; + } + + if (gc_type == FG_GC) { + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .for_reclaim = 0, + }; + sync_node_pages(sbi, 0, &wbc); + + /* + * In the case of FG_GC, it'd be better to reclaim this victim + * completely. + */ + if (get_valid_blocks(sbi, segno, 1) != 0) + goto next_step; + } +} + +/* + * Calculate start block index indicating the given node offset. + * Be careful, caller should give this node offset only indicating direct node + * blocks. If any node offsets, which point the other types of node blocks such + * as indirect or double indirect node blocks, are given, it must be a caller's + * bug. + */ +block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi) +{ + unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; + unsigned int bidx; + + if (node_ofs == 0) + return 0; + + if (node_ofs <= 2) { + bidx = node_ofs - 1; + } else if (node_ofs <= indirect_blks) { + int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); + bidx = node_ofs - 2 - dec; + } else { + int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); + bidx = node_ofs - 5 - dec; + } + return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi); +} + +static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, + struct node_info *dni, block_t blkaddr, unsigned int *nofs) +{ + struct page *node_page; + nid_t nid; + unsigned int ofs_in_node; + block_t source_blkaddr; + + nid = le32_to_cpu(sum->nid); + ofs_in_node = le16_to_cpu(sum->ofs_in_node); + + node_page = get_node_page(sbi, nid); + if (IS_ERR(node_page)) + return 0; + + get_node_info(sbi, nid, dni); + + if (sum->version != dni->version) { + f2fs_put_page(node_page, 1); + return 0; + } + + *nofs = ofs_of_node(node_page); + source_blkaddr = datablock_addr(node_page, ofs_in_node); + f2fs_put_page(node_page, 1); + + if (source_blkaddr != blkaddr) + return 0; + return 1; +} + +static void move_data_page(struct inode *inode, struct page *page, int gc_type) +{ + struct f2fs_io_info fio = { + .type = DATA, + .rw = WRITE_SYNC, + }; + + if (gc_type == BG_GC) { + if (PageWriteback(page)) + goto out; + set_page_dirty(page); + set_cold_data(page); + } else { + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + + f2fs_wait_on_page_writeback(page, DATA); + + if (clear_page_dirty_for_io(page) && + S_ISDIR(inode->i_mode)) { + dec_page_count(sbi, F2FS_DIRTY_DENTS); + inode_dec_dirty_dents(inode); + } + set_cold_data(page); + do_write_data_page(page, &fio); + clear_cold_data(page); + } +out: + f2fs_put_page(page, 1); +} + +/* + * This function tries to get parent node of victim data block, and identifies + * data block validity. If the block is valid, copy that with cold status and + * modify parent node. + * If the parent node is not valid or the data block address is different, + * the victim data block is ignored. + */ +static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, + struct list_head *ilist, unsigned int segno, int gc_type) +{ + struct super_block *sb = sbi->sb; + struct f2fs_summary *entry; + block_t start_addr; + int off; + int phase = 0; + + start_addr = START_BLOCK(sbi, segno); + +next_step: + entry = sum; + + for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { + struct page *data_page; + struct inode *inode; + struct node_info dni; /* dnode info for the data */ + unsigned int ofs_in_node, nofs; + block_t start_bidx; + + /* stop BG_GC if there is not enough free sections. */ + if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) + return; + + if (check_valid_map(sbi, segno, off) == 0) + continue; + + if (phase == 0) { + ra_node_page(sbi, le32_to_cpu(entry->nid)); + continue; + } + + /* Get an inode by ino with checking validity */ + if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0) + continue; + + if (phase == 1) { + ra_node_page(sbi, dni.ino); + continue; + } + + ofs_in_node = le16_to_cpu(entry->ofs_in_node); + + if (phase == 2) { + inode = f2fs_iget(sb, dni.ino); + if (IS_ERR(inode)) + continue; + + start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)); + + data_page = find_data_page(inode, + start_bidx + ofs_in_node, false); + if (IS_ERR(data_page)) + goto next_iput; + + f2fs_put_page(data_page, 0); + add_gc_inode(inode, ilist); + } else { + inode = find_gc_inode(dni.ino, ilist); + if (inode) { + start_bidx = start_bidx_of_node(nofs, + F2FS_I(inode)); + data_page = get_lock_data_page(inode, + start_bidx + ofs_in_node); + if (IS_ERR(data_page)) + continue; + move_data_page(inode, data_page, gc_type); + stat_inc_data_blk_count(sbi, 1); + } + } + continue; +next_iput: + iput(inode); + } + + if (++phase < 4) + goto next_step; + + if (gc_type == FG_GC) { + f2fs_submit_merged_bio(sbi, DATA, WRITE); + + /* + * In the case of FG_GC, it'd be better to reclaim this victim + * completely. + */ + if (get_valid_blocks(sbi, segno, 1) != 0) { + phase = 2; + goto next_step; + } + } +} + +static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, + int gc_type, int type) +{ + struct sit_info *sit_i = SIT_I(sbi); + int ret; + mutex_lock(&sit_i->sentry_lock); + ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS); + mutex_unlock(&sit_i->sentry_lock); + return ret; +} + +static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, + struct list_head *ilist, int gc_type) +{ + struct page *sum_page; + struct f2fs_summary_block *sum; + struct blk_plug plug; + + /* read segment summary of victim */ + sum_page = get_sum_page(sbi, segno); + + blk_start_plug(&plug); + + sum = page_address(sum_page); + + switch (GET_SUM_TYPE((&sum->footer))) { + case SUM_TYPE_NODE: + gc_node_segment(sbi, sum->entries, segno, gc_type); + break; + case SUM_TYPE_DATA: + gc_data_segment(sbi, sum->entries, ilist, segno, gc_type); + break; + } + blk_finish_plug(&plug); + + stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer))); + stat_inc_call_count(sbi->stat_info); + + f2fs_put_page(sum_page, 1); +} + +int f2fs_gc(struct f2fs_sb_info *sbi) +{ + struct list_head ilist; + unsigned int segno, i; + int gc_type = BG_GC; + int nfree = 0; + int ret = -1; + + INIT_LIST_HEAD(&ilist); +gc_more: + if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) + goto stop; + + if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) { + gc_type = FG_GC; + write_checkpoint(sbi, false); + } + + if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) + goto stop; + ret = 0; + + for (i = 0; i < sbi->segs_per_sec; i++) + do_garbage_collect(sbi, segno + i, &ilist, gc_type); + + if (gc_type == FG_GC) { + sbi->cur_victim_sec = NULL_SEGNO; + nfree++; + WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec)); + } + + if (has_not_enough_free_secs(sbi, nfree)) + goto gc_more; + + if (gc_type == FG_GC) + write_checkpoint(sbi, false); +stop: + mutex_unlock(&sbi->gc_mutex); + + put_gc_inode(&ilist); + return ret; +} + +void build_gc_manager(struct f2fs_sb_info *sbi) +{ + DIRTY_I(sbi)->v_ops = &default_v_ops; +} + +int __init create_gc_caches(void) +{ + winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", + sizeof(struct inode_entry), NULL); + if (!winode_slab) + return -ENOMEM; + return 0; +} + +void destroy_gc_caches(void) +{ + kmem_cache_destroy(winode_slab); +} diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h new file mode 100644 index 0000000000000..e423aef91143c --- /dev/null +++ b/fs/f2fs/gc.h @@ -0,0 +1,110 @@ +/* + * fs/f2fs/gc.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define GC_THREAD_MIN_WB_PAGES 1 /* + * a threshold to determine + * whether IO subsystem is idle + * or not + */ +#define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */ +#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000 +#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */ +#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */ +#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */ + +/* Search max. number of dirty segments to select a victim segment */ +#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */ + +struct f2fs_gc_kthread { + struct task_struct *f2fs_gc_task; + wait_queue_head_t gc_wait_queue_head; + + /* for gc sleep time */ + unsigned int min_sleep_time; + unsigned int max_sleep_time; + unsigned int no_gc_sleep_time; + + /* for changing gc mode */ + unsigned int gc_idle; +}; + +struct inode_entry { + struct list_head list; + struct inode *inode; +}; + +/* + * inline functions + */ +static inline block_t free_user_blocks(struct f2fs_sb_info *sbi) +{ + if (free_segments(sbi) < overprovision_segments(sbi)) + return 0; + else + return (free_segments(sbi) - overprovision_segments(sbi)) + << sbi->log_blocks_per_seg; +} + +static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi) +{ + return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100; +} + +static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi) +{ + block_t reclaimable_user_blocks = sbi->user_block_count - + written_block_count(sbi); + return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100; +} + +static inline long increase_sleep_time(struct f2fs_gc_kthread *gc_th, long wait) +{ + if (wait == gc_th->no_gc_sleep_time) + return wait; + + wait += gc_th->min_sleep_time; + if (wait > gc_th->max_sleep_time) + wait = gc_th->max_sleep_time; + return wait; +} + +static inline long decrease_sleep_time(struct f2fs_gc_kthread *gc_th, long wait) +{ + if (wait == gc_th->no_gc_sleep_time) + wait = gc_th->max_sleep_time; + + wait -= gc_th->min_sleep_time; + if (wait <= gc_th->min_sleep_time) + wait = gc_th->min_sleep_time; + return wait; +} + +static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) +{ + block_t invalid_user_blocks = sbi->user_block_count - + written_block_count(sbi); + /* + * Background GC is triggered with the following condition. + * 1. There are a number of invalid blocks. + * 2. There is not enough free space. + */ + if (invalid_user_blocks > limit_invalid_user_blocks(sbi) && + free_user_blocks(sbi) < limit_free_user_blocks(sbi)) + return true; + return false; +} + +static inline int is_idle(struct f2fs_sb_info *sbi) +{ + struct block_device *bdev = sbi->sb->s_bdev; + struct request_queue *q = bdev_get_queue(bdev); + struct request_list *rl = &q->rq; + return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]); +} diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c new file mode 100644 index 0000000000000..6eb8d269b53b6 --- /dev/null +++ b/fs/f2fs/hash.c @@ -0,0 +1,101 @@ +/* + * fs/f2fs/hash.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * Portions of this code from linux/fs/ext3/hash.c + * + * Copyright (C) 2002 by Theodore Ts'o + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include "f2fs.h" + +/* + * Hashing code copied from ext3 + */ +#define DELTA 0x9E3779B9 + +static void TEA_transform(unsigned int buf[4], unsigned int const in[]) +{ + __u32 sum = 0; + __u32 b0 = buf[0], b1 = buf[1]; + __u32 a = in[0], b = in[1], c = in[2], d = in[3]; + int n = 16; + + do { + sum += DELTA; + b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); + b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); + } while (--n); + + buf[0] += b0; + buf[1] += b1; +} + +static void str2hashbuf(const char *msg, size_t len, unsigned int *buf, int num) +{ + unsigned pad, val; + int i; + + pad = (__u32)len | ((__u32)len << 8); + pad |= pad << 16; + + val = pad; + if (len > num * 4) + len = num * 4; + for (i = 0; i < len; i++) { + if ((i % 4) == 0) + val = pad; + val = msg[i] + (val << 8); + if ((i % 4) == 3) { + *buf++ = val; + val = pad; + num--; + } + } + if (--num >= 0) + *buf++ = val; + while (--num >= 0) + *buf++ = pad; +} + +f2fs_hash_t f2fs_dentry_hash(const char *name, size_t len) +{ + __u32 hash; + f2fs_hash_t f2fs_hash; + const char *p; + __u32 in[8], buf[4]; + + if ((len <= 2) && (name[0] == '.') && + (name[1] == '.' || name[1] == '\0')) + return 0; + + /* Initialize the default seed for the hash checksum functions */ + buf[0] = 0x67452301; + buf[1] = 0xefcdab89; + buf[2] = 0x98badcfe; + buf[3] = 0x10325476; + + p = name; + while (1) { + str2hashbuf(p, len, in, 4); + TEA_transform(buf, in); + p += 16; + if (len <= 16) + break; + len -= 16; + } + hash = buf[0]; + f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT); + return f2fs_hash; +} diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c new file mode 100644 index 0000000000000..31ee5b164ff9f --- /dev/null +++ b/fs/f2fs/inline.c @@ -0,0 +1,222 @@ +/* + * fs/f2fs/inline.c + * Copyright (c) 2013, Intel Corporation + * Authors: Huajun Li + * Haicheng Li + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include "f2fs.h" + +bool f2fs_may_inline(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + block_t nr_blocks; + loff_t i_size; + + if (!test_opt(sbi, INLINE_DATA)) + return false; + + nr_blocks = F2FS_I(inode)->i_xattr_nid ? 3 : 2; + if (inode->i_blocks > nr_blocks) + return false; + + i_size = i_size_read(inode); + if (i_size > MAX_INLINE_DATA) + return false; + + return true; +} + +int f2fs_read_inline_data(struct inode *inode, struct page *page) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct page *ipage; + void *src_addr, *dst_addr; + + if (page->index) { + zero_user_segment(page, 0, PAGE_CACHE_SIZE); + goto out; + } + + ipage = get_node_page(sbi, inode->i_ino); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); + + zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); + + /* Copy the whole inline data block */ + src_addr = inline_data_addr(ipage); + dst_addr = kmap(page); + memcpy(dst_addr, src_addr, MAX_INLINE_DATA); + kunmap(page); + f2fs_put_page(ipage, 1); + +out: + SetPageUptodate(page); + unlock_page(page); + + return 0; +} + +static int __f2fs_convert_inline_data(struct inode *inode, struct page *page) +{ + int err; + struct page *ipage; + struct dnode_of_data dn; + void *src_addr, *dst_addr; + block_t new_blk_addr; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct f2fs_io_info fio = { + .type = DATA, + .rw = WRITE_SYNC | REQ_PRIO, + }; + + f2fs_lock_op(sbi); + ipage = get_node_page(sbi, inode->i_ino); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); + + /* + * i_addr[0] is not used for inline data, + * so reserving new block will not destroy inline data + */ + set_new_dnode(&dn, inode, ipage, NULL, 0); + err = f2fs_reserve_block(&dn, 0); + if (err) { + f2fs_unlock_op(sbi); + return err; + } + + zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); + + /* Copy the whole inline data block */ + src_addr = inline_data_addr(ipage); + dst_addr = kmap(page); + memcpy(dst_addr, src_addr, MAX_INLINE_DATA); + kunmap(page); + SetPageUptodate(page); + + /* write data page to try to make data consistent */ + set_page_writeback(page); + write_data_page(page, &dn, &new_blk_addr, &fio); + update_extent_cache(new_blk_addr, &dn); + f2fs_wait_on_page_writeback(page, DATA); + + /* clear inline data and flag after data writeback */ + zero_user_segment(ipage, INLINE_DATA_OFFSET, + INLINE_DATA_OFFSET + MAX_INLINE_DATA); + clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); + stat_dec_inline_inode(inode); + + sync_inode_page(&dn); + f2fs_put_dnode(&dn); + f2fs_unlock_op(sbi); + return err; +} + +int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size) +{ + struct page *page; + int err; + + if (!f2fs_has_inline_data(inode)) + return 0; + else if (to_size <= MAX_INLINE_DATA) + return 0; + + page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS); + if (!page) + return -ENOMEM; + + err = __f2fs_convert_inline_data(inode, page); + f2fs_put_page(page, 1); + return err; +} + +int f2fs_write_inline_data(struct inode *inode, + struct page *page, unsigned size) +{ + void *src_addr, *dst_addr; + struct page *ipage; + struct dnode_of_data dn; + int err; + + set_new_dnode(&dn, inode, NULL, NULL, 0); + err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); + if (err) + return err; + ipage = dn.inode_page; + + zero_user_segment(ipage, INLINE_DATA_OFFSET, + INLINE_DATA_OFFSET + MAX_INLINE_DATA); + src_addr = kmap(page); + dst_addr = inline_data_addr(ipage); + memcpy(dst_addr, src_addr, size); + kunmap(page); + + /* Release the first data block if it is allocated */ + if (!f2fs_has_inline_data(inode)) { + truncate_data_blocks_range(&dn, 1); + set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); + stat_inc_inline_inode(inode); + } + + sync_inode_page(&dn); + f2fs_put_dnode(&dn); + + return 0; +} + +int recover_inline_data(struct inode *inode, struct page *npage) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct f2fs_inode *ri = NULL; + void *src_addr, *dst_addr; + struct page *ipage; + + /* + * The inline_data recovery policy is as follows. + * [prev.] [next] of inline_data flag + * o o -> recover inline_data + * o x -> remove inline_data, and then recover data blocks + * x o -> remove inline_data, and then recover inline_data + * x x -> recover data blocks + */ + if (IS_INODE(npage)) + ri = F2FS_INODE(npage); + + if (f2fs_has_inline_data(inode) && + ri && ri->i_inline & F2FS_INLINE_DATA) { +process_inline: + ipage = get_node_page(sbi, inode->i_ino); + f2fs_bug_on(IS_ERR(ipage)); + + src_addr = inline_data_addr(npage); + dst_addr = inline_data_addr(ipage); + memcpy(dst_addr, src_addr, MAX_INLINE_DATA); + update_inode(inode, ipage); + f2fs_put_page(ipage, 1); + return -1; + } + + if (f2fs_has_inline_data(inode)) { + ipage = get_node_page(sbi, inode->i_ino); + f2fs_bug_on(IS_ERR(ipage)); + zero_user_segment(ipage, INLINE_DATA_OFFSET, + INLINE_DATA_OFFSET + MAX_INLINE_DATA); + clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); + update_inode(inode, ipage); + f2fs_put_page(ipage, 1); + } else if (ri && ri->i_inline & F2FS_INLINE_DATA) { + truncate_blocks(inode, 0); + set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); + goto process_inline; + } + return 0; +} diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c new file mode 100644 index 0000000000000..4ee8980b350fe --- /dev/null +++ b/fs/f2fs/inode.c @@ -0,0 +1,288 @@ +/* + * fs/f2fs/inode.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include + +#include "f2fs.h" +#include "node.h" + +#include + +void f2fs_set_inode_flags(struct inode *inode) +{ + unsigned int flags = F2FS_I(inode)->i_flags; + + inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | + S_NOATIME | S_DIRSYNC); + + if (flags & FS_SYNC_FL) + inode->i_flags |= S_SYNC; + if (flags & FS_APPEND_FL) + inode->i_flags |= S_APPEND; + if (flags & FS_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + if (flags & FS_NOATIME_FL) + inode->i_flags |= S_NOATIME; + if (flags & FS_DIRSYNC_FL) + inode->i_flags |= S_DIRSYNC; +} + +static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) +{ + if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { + if (ri->i_addr[0]) + inode->i_rdev = + old_decode_dev(le32_to_cpu(ri->i_addr[0])); + else + inode->i_rdev = + new_decode_dev(le32_to_cpu(ri->i_addr[1])); + } +} + +static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) +{ + if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { + if (old_valid_dev(inode->i_rdev)) { + ri->i_addr[0] = + cpu_to_le32(old_encode_dev(inode->i_rdev)); + ri->i_addr[1] = 0; + } else { + ri->i_addr[0] = 0; + ri->i_addr[1] = + cpu_to_le32(new_encode_dev(inode->i_rdev)); + ri->i_addr[2] = 0; + } + } +} + +static int do_read_inode(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct f2fs_inode_info *fi = F2FS_I(inode); + struct page *node_page; + struct f2fs_inode *ri; + + /* Check if ino is within scope */ + if (check_nid_range(sbi, inode->i_ino)) { + f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu", + (unsigned long) inode->i_ino); + return -EINVAL; + } + + node_page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(node_page)) + return PTR_ERR(node_page); + + ri = F2FS_INODE(node_page); + + inode->i_mode = le16_to_cpu(ri->i_mode); + inode->i_uid = le32_to_cpu(ri->i_uid); + inode->i_gid = le32_to_cpu(ri->i_gid); + set_nlink(inode, le32_to_cpu(ri->i_links)); + inode->i_size = le64_to_cpu(ri->i_size); + inode->i_blocks = le64_to_cpu(ri->i_blocks); + + inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime); + inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime); + inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime); + inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec); + inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec); + inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec); + inode->i_generation = le32_to_cpu(ri->i_generation); + + fi->i_current_depth = le32_to_cpu(ri->i_current_depth); + fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); + fi->i_flags = le32_to_cpu(ri->i_flags); + fi->flags = 0; + fi->i_advise = ri->i_advise; + fi->i_pino = le32_to_cpu(ri->i_pino); + + get_extent_info(&fi->ext, ri->i_ext); + get_inline_info(fi, ri); + + /* get rdev by using inline_info */ + __get_inode_rdev(inode, ri); + + f2fs_put_page(node_page, 1); + return 0; +} + +struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct inode *inode; + int ret = 0; + + inode = iget_locked(sb, ino); + if (!inode) + return ERR_PTR(-ENOMEM); + + if (!(inode->i_state & I_NEW)) { + trace_f2fs_iget(inode); + return inode; + } + if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) + goto make_now; + + ret = do_read_inode(inode); + if (ret) + goto bad_inode; +make_now: + if (ino == F2FS_NODE_INO(sbi)) { + inode->i_mapping->a_ops = &f2fs_node_aops; + mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); + } else if (ino == F2FS_META_INO(sbi)) { + inode->i_mapping->a_ops = &f2fs_meta_aops; + mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); + } else if (S_ISREG(inode->i_mode)) { + inode->i_op = &f2fs_file_inode_operations; + inode->i_fop = &f2fs_file_operations; + inode->i_mapping->a_ops = &f2fs_dblock_aops; + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = &f2fs_dir_inode_operations; + inode->i_fop = &f2fs_dir_operations; + inode->i_mapping->a_ops = &f2fs_dblock_aops; + mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); + } else if (S_ISLNK(inode->i_mode)) { + inode->i_op = &f2fs_symlink_inode_operations; + inode->i_mapping->a_ops = &f2fs_dblock_aops; + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { + inode->i_op = &f2fs_special_inode_operations; + init_special_inode(inode, inode->i_mode, inode->i_rdev); + } else { + ret = -EIO; + goto bad_inode; + } + unlock_new_inode(inode); + trace_f2fs_iget(inode); + return inode; + +bad_inode: + iget_failed(inode); + trace_f2fs_iget_exit(inode, ret); + return ERR_PTR(ret); +} + +void update_inode(struct inode *inode, struct page *node_page) +{ + struct f2fs_inode *ri; + + f2fs_wait_on_page_writeback(node_page, NODE); + + ri = F2FS_INODE(node_page); + + ri->i_mode = cpu_to_le16(inode->i_mode); + ri->i_advise = F2FS_I(inode)->i_advise; + ri->i_uid = cpu_to_le32(inode->i_uid); + ri->i_gid = cpu_to_le32(inode->i_gid); + ri->i_links = cpu_to_le32(inode->i_nlink); + ri->i_size = cpu_to_le64(i_size_read(inode)); + ri->i_blocks = cpu_to_le64(inode->i_blocks); + set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext); + set_raw_inline(F2FS_I(inode), ri); + + ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); + ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); + ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); + ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); + ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); + ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); + ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth); + ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); + ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); + ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); + ri->i_generation = cpu_to_le32(inode->i_generation); + + __set_inode_rdev(inode, ri); + set_cold_node(inode, node_page); + set_page_dirty(node_page); + + clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE); +} + +int update_inode_page(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct page *node_page; + + node_page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(node_page)) + return PTR_ERR(node_page); + + update_inode(inode, node_page); + f2fs_put_page(node_page, 1); + return 0; +} + +int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + int ret; + + if (inode->i_ino == F2FS_NODE_INO(sbi) || + inode->i_ino == F2FS_META_INO(sbi)) + return 0; + + if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE)) + return 0; + + /* + * We need to lock here to prevent from producing dirty node pages + * during the urgent cleaning time when runing out of free sections. + */ + f2fs_lock_op(sbi); + ret = update_inode_page(inode); + f2fs_unlock_op(sbi); + + if (wbc) + f2fs_balance_fs(sbi); + + return ret; +} + +/* + * Called at the last iput() if i_nlink is zero + */ +void f2fs_evict_inode(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + + trace_f2fs_evict_inode(inode); + truncate_inode_pages(&inode->i_data, 0); + + if (inode->i_ino == F2FS_NODE_INO(sbi) || + inode->i_ino == F2FS_META_INO(sbi)) + goto no_delete; + + f2fs_bug_on(atomic_read(&F2FS_I(inode)->dirty_dents)); + remove_dirty_dir_inode(inode); + + if (inode->i_nlink || is_bad_inode(inode)) + goto no_delete; + + set_inode_flag(F2FS_I(inode), FI_NO_ALLOC); + i_size_write(inode, 0); + + if (F2FS_HAS_BLOCKS(inode)) + f2fs_truncate(inode); + + f2fs_lock_op(sbi); + remove_inode_page(inode); + stat_dec_inline_inode(inode); + f2fs_unlock_op(sbi); + +no_delete: + end_writeback(inode); +} diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c new file mode 100644 index 0000000000000..791d45b0c2743 --- /dev/null +++ b/fs/f2fs/namei.c @@ -0,0 +1,544 @@ +/* + * fs/f2fs/namei.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "node.h" +#include "xattr.h" +#include "acl.h" +#include + +static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) +{ + struct super_block *sb = dir->i_sb; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + nid_t ino; + struct inode *inode; + bool nid_free = false; + int err; + + inode = new_inode(sb); + if (!inode) + return ERR_PTR(-ENOMEM); + + f2fs_lock_op(sbi); + if (!alloc_nid(sbi, &ino)) { + f2fs_unlock_op(sbi); + err = -ENOSPC; + goto fail; + } + f2fs_unlock_op(sbi); + + if (IS_ANDROID_EMU(sbi, F2FS_I(dir), F2FS_I(dir))) + f2fs_android_emu(sbi, inode, &inode->i_uid, + &inode->i_gid, &mode); + else { + inode->i_uid = current_fsuid(); + + if (dir->i_mode & S_ISGID) { + inode->i_gid = dir->i_gid; + if (S_ISDIR(mode)) + mode |= S_ISGID; + } else { + inode->i_gid = current_fsgid(); + } + } + + inode->i_ino = ino; + inode->i_mode = mode; + inode->i_blocks = 0; + inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + inode->i_generation = sbi->s_next_generation++; + + err = insert_inode_locked(inode); + if (err) { + err = -EINVAL; + nid_free = true; + goto out; + } + trace_f2fs_new_inode(inode, 0); + mark_inode_dirty(inode); + return inode; + +out: + clear_nlink(inode); + unlock_new_inode(inode); +fail: + trace_f2fs_new_inode(inode, err); + make_bad_inode(inode); + iput(inode); + if (nid_free) + alloc_nid_failed(sbi, ino); + return ERR_PTR(err); +} + +static int is_multimedia_file(const unsigned char *s, const char *sub) +{ + size_t slen = strlen(s); + size_t sublen = strlen(sub); + + if (sublen > slen) + return 0; + + return !strncasecmp(s + slen - sublen, sub, sublen); +} + +/* + * Set multimedia files as cold files for hot/cold data separation + */ +static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode, + const unsigned char *name) +{ + int i; + __u8 (*extlist)[8] = sbi->raw_super->extension_list; + + int count = le32_to_cpu(sbi->raw_super->extension_count); + for (i = 0; i < count; i++) { + if (is_multimedia_file(name, extlist[i])) { + file_set_cold(inode); + break; + } + } +} + +static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, + struct nameidata *nd) +{ + struct super_block *sb = dir->i_sb; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct inode *inode; + nid_t ino = 0; + int err; + + f2fs_balance_fs(sbi); + + inode = f2fs_new_inode(dir, mode); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + if (!test_opt(sbi, DISABLE_EXT_IDENTIFY)) + set_cold_files(sbi, inode, dentry->d_name.name); + + inode->i_op = &f2fs_file_inode_operations; + inode->i_fop = &f2fs_file_operations; + inode->i_mapping->a_ops = &f2fs_dblock_aops; + ino = inode->i_ino; + + f2fs_lock_op(sbi); + err = f2fs_add_link(dentry, inode); + f2fs_unlock_op(sbi); + if (err) + goto out; + + alloc_nid_done(sbi, ino); + + d_instantiate(dentry, inode); + unlock_new_inode(inode); + return 0; +out: + clear_nlink(inode); + unlock_new_inode(inode); + make_bad_inode(inode); + iput(inode); + alloc_nid_failed(sbi, ino); + return err; +} + +static int f2fs_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) +{ + struct inode *inode = old_dentry->d_inode; + struct super_block *sb = dir->i_sb; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + int err; + + f2fs_balance_fs(sbi); + + inode->i_ctime = CURRENT_TIME; + ihold(inode); + + set_inode_flag(F2FS_I(inode), FI_INC_LINK); + f2fs_lock_op(sbi); + err = f2fs_add_link(dentry, inode); + f2fs_unlock_op(sbi); + if (err) + goto out; + + d_instantiate(dentry, inode); + return 0; +out: + clear_inode_flag(F2FS_I(inode), FI_INC_LINK); + iput(inode); + return err; +} + +struct dentry *f2fs_get_parent(struct dentry *child) +{ + struct qstr dotdot = {.name = "..", .len = 2}; + unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); + if (!ino) + return ERR_PTR(-ENOENT); + return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); +} + +static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *nd) +{ + struct inode *inode = NULL; + struct f2fs_dir_entry *de; + struct page *page; + + if (dentry->d_name.len > F2FS_NAME_LEN) + return ERR_PTR(-ENAMETOOLONG); + + de = f2fs_find_entry(dir, &dentry->d_name, &page); + if (de) { + nid_t ino = le32_to_cpu(de->ino); + kunmap(page); + f2fs_put_page(page, 0); + + inode = f2fs_iget(dir->i_sb, ino); + if (IS_ERR(inode)) + return ERR_CAST(inode); + } + + return d_splice_alias(inode, dentry); +} + +static int f2fs_unlink(struct inode *dir, struct dentry *dentry) +{ + struct super_block *sb = dir->i_sb; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct inode *inode = dentry->d_inode; + struct f2fs_dir_entry *de; + struct page *page; + int err = -ENOENT; + + trace_f2fs_unlink_enter(dir, dentry); + f2fs_balance_fs(sbi); + + de = f2fs_find_entry(dir, &dentry->d_name, &page); + if (!de) + goto fail; + + f2fs_lock_op(sbi); + err = acquire_orphan_inode(sbi); + if (err) { + f2fs_unlock_op(sbi); + kunmap(page); + f2fs_put_page(page, 0); + goto fail; + } + f2fs_delete_entry(de, page, inode); + f2fs_unlock_op(sbi); + + /* In order to evict this inode, we set it dirty */ + mark_inode_dirty(inode); +fail: + trace_f2fs_unlink_exit(inode, err); + return err; +} + +static int f2fs_symlink(struct inode *dir, struct dentry *dentry, + const char *symname) +{ + struct super_block *sb = dir->i_sb; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct inode *inode; + size_t symlen = strlen(symname) + 1; + int err; + + f2fs_balance_fs(sbi); + + inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + inode->i_op = &f2fs_symlink_inode_operations; + inode->i_mapping->a_ops = &f2fs_dblock_aops; + + f2fs_lock_op(sbi); + err = f2fs_add_link(dentry, inode); + f2fs_unlock_op(sbi); + if (err) + goto out; + + err = page_symlink(inode, symname, symlen); + alloc_nid_done(sbi, inode->i_ino); + + d_instantiate(dentry, inode); + unlock_new_inode(inode); + return err; +out: + clear_nlink(inode); + unlock_new_inode(inode); + make_bad_inode(inode); + iput(inode); + alloc_nid_failed(sbi, inode->i_ino); + return err; +} + +static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); + struct inode *inode; + int err; + + f2fs_balance_fs(sbi); + + inode = f2fs_new_inode(dir, S_IFDIR | mode); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + inode->i_op = &f2fs_dir_inode_operations; + inode->i_fop = &f2fs_dir_operations; + inode->i_mapping->a_ops = &f2fs_dblock_aops; + mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); + + set_inode_flag(F2FS_I(inode), FI_INC_LINK); + f2fs_lock_op(sbi); + err = f2fs_add_link(dentry, inode); + f2fs_unlock_op(sbi); + if (err) + goto out_fail; + + alloc_nid_done(sbi, inode->i_ino); + + d_instantiate(dentry, inode); + unlock_new_inode(inode); + + return 0; + +out_fail: + clear_inode_flag(F2FS_I(inode), FI_INC_LINK); + clear_nlink(inode); + unlock_new_inode(inode); + make_bad_inode(inode); + iput(inode); + alloc_nid_failed(sbi, inode->i_ino); + return err; +} + +static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + if (f2fs_empty_dir(inode)) + return f2fs_unlink(dir, dentry); + return -ENOTEMPTY; +} + +static int f2fs_mknod(struct inode *dir, struct dentry *dentry, + umode_t mode, dev_t rdev) +{ + struct super_block *sb = dir->i_sb; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct inode *inode; + int err = 0; + + if (!new_valid_dev(rdev)) + return -EINVAL; + + f2fs_balance_fs(sbi); + + inode = f2fs_new_inode(dir, mode); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + init_special_inode(inode, inode->i_mode, rdev); + inode->i_op = &f2fs_special_inode_operations; + + f2fs_lock_op(sbi); + err = f2fs_add_link(dentry, inode); + f2fs_unlock_op(sbi); + if (err) + goto out; + + alloc_nid_done(sbi, inode->i_ino); + d_instantiate(dentry, inode); + unlock_new_inode(inode); + return 0; +out: + clear_nlink(inode); + unlock_new_inode(inode); + make_bad_inode(inode); + iput(inode); + alloc_nid_failed(sbi, inode->i_ino); + return err; +} + +static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + struct super_block *sb = old_dir->i_sb; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct inode *old_inode = old_dentry->d_inode; + struct inode *new_inode = new_dentry->d_inode; + struct page *old_dir_page; + struct page *old_page, *new_page; + struct f2fs_dir_entry *old_dir_entry = NULL; + struct f2fs_dir_entry *old_entry; + struct f2fs_dir_entry *new_entry; + int err = -ENOENT; + + f2fs_balance_fs(sbi); + + old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); + if (!old_entry) + goto out; + + if (S_ISDIR(old_inode->i_mode)) { + err = -EIO; + old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); + if (!old_dir_entry) + goto out_old; + } + + f2fs_lock_op(sbi); + + if (new_inode) { + + err = -ENOTEMPTY; + if (old_dir_entry && !f2fs_empty_dir(new_inode)) + goto out_dir; + + err = -ENOENT; + new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, + &new_page); + if (!new_entry) + goto out_dir; + + err = acquire_orphan_inode(sbi); + if (err) + goto put_out_dir; + + if (update_dent_inode(old_inode, &new_dentry->d_name)) { + release_orphan_inode(sbi); + goto put_out_dir; + } + + f2fs_set_link(new_dir, new_entry, new_page, old_inode); + F2FS_I(old_inode)->i_pino = new_dir->i_ino; + + new_inode->i_ctime = CURRENT_TIME; + if (old_dir_entry) + drop_nlink(new_inode); + drop_nlink(new_inode); + mark_inode_dirty(new_inode); + + if (!new_inode->i_nlink) + add_orphan_inode(sbi, new_inode->i_ino); + else + release_orphan_inode(sbi); + + update_inode_page(old_inode); + update_inode_page(new_inode); + } else { + err = f2fs_add_link(new_dentry, old_inode); + if (err) + goto out_dir; + + if (old_dir_entry) { + inc_nlink(new_dir); + update_inode_page(new_dir); + } + } + + old_inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(old_inode); + + f2fs_delete_entry(old_entry, old_page, NULL); + + if (old_dir_entry) { + if (old_dir != new_dir) { + f2fs_set_link(old_inode, old_dir_entry, + old_dir_page, new_dir); + F2FS_I(old_inode)->i_pino = new_dir->i_ino; + update_inode_page(old_inode); + } else { + kunmap(old_dir_page); + f2fs_put_page(old_dir_page, 0); + } + drop_nlink(old_dir); + mark_inode_dirty(old_dir); + update_inode_page(old_dir); + } + + f2fs_unlock_op(sbi); + return 0; + +put_out_dir: + if (PageLocked(new_page)) + f2fs_put_page(new_page, 1); + else + f2fs_put_page(new_page, 0); +out_dir: + if (old_dir_entry) { + kunmap(old_dir_page); + f2fs_put_page(old_dir_page, 0); + } + f2fs_unlock_op(sbi); +out_old: + kunmap(old_page); + f2fs_put_page(old_page, 0); +out: + return err; +} + +const struct inode_operations f2fs_dir_inode_operations = { + .create = f2fs_create, + .lookup = f2fs_lookup, + .link = f2fs_link, + .unlink = f2fs_unlink, + .symlink = f2fs_symlink, + .mkdir = f2fs_mkdir, + .rmdir = f2fs_rmdir, + .mknod = f2fs_mknod, + .rename = f2fs_rename, + .getattr = f2fs_getattr, + .setattr = f2fs_setattr, + .get_acl = f2fs_get_acl, +#ifdef CONFIG_F2FS_FS_XATTR + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = f2fs_listxattr, + .removexattr = generic_removexattr, +#endif +}; + +const struct inode_operations f2fs_symlink_inode_operations = { + .readlink = generic_readlink, + .follow_link = page_follow_link_light, + .put_link = page_put_link, + .getattr = f2fs_getattr, + .setattr = f2fs_setattr, +#ifdef CONFIG_F2FS_FS_XATTR + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = f2fs_listxattr, + .removexattr = generic_removexattr, +#endif +}; + +const struct inode_operations f2fs_special_inode_operations = { + .getattr = f2fs_getattr, + .setattr = f2fs_setattr, + .get_acl = f2fs_get_acl, +#ifdef CONFIG_F2FS_FS_XATTR + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = f2fs_listxattr, + .removexattr = generic_removexattr, +#endif +}; diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c new file mode 100644 index 0000000000000..2740c942bc46a --- /dev/null +++ b/fs/f2fs/node.c @@ -0,0 +1,1939 @@ +/* + * fs/f2fs/node.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "node.h" +#include "segment.h" +#include + +static struct kmem_cache *nat_entry_slab; +static struct kmem_cache *free_nid_slab; + +static void clear_node_page_dirty(struct page *page) +{ + struct address_space *mapping = page->mapping; + struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); + unsigned int long flags; + + if (PageDirty(page)) { + spin_lock_irqsave(&mapping->tree_lock, flags); + radix_tree_tag_clear(&mapping->page_tree, + page_index(page), + PAGECACHE_TAG_DIRTY); + spin_unlock_irqrestore(&mapping->tree_lock, flags); + + clear_page_dirty_for_io(page); + dec_page_count(sbi, F2FS_DIRTY_NODES); + } + ClearPageUptodate(page); +} + +static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) +{ + pgoff_t index = current_nat_addr(sbi, nid); + return get_meta_page(sbi, index); +} + +static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) +{ + struct page *src_page; + struct page *dst_page; + pgoff_t src_off; + pgoff_t dst_off; + void *src_addr; + void *dst_addr; + struct f2fs_nm_info *nm_i = NM_I(sbi); + + src_off = current_nat_addr(sbi, nid); + dst_off = next_nat_addr(sbi, src_off); + + /* get current nat block page with lock */ + src_page = get_meta_page(sbi, src_off); + + /* Dirty src_page means that it is already the new target NAT page. */ + if (PageDirty(src_page)) + return src_page; + + dst_page = grab_meta_page(sbi, dst_off); + + src_addr = page_address(src_page); + dst_addr = page_address(dst_page); + memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); + set_page_dirty(dst_page); + f2fs_put_page(src_page, 1); + + set_to_next_nat(nm_i, nid); + + return dst_page; +} + +/* + * Readahead NAT pages + */ +static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid) +{ + struct address_space *mapping = META_MAPPING(sbi); + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct page *page; + pgoff_t index; + int i; + struct f2fs_io_info fio = { + .type = META, + .rw = READ_SYNC | REQ_META | REQ_PRIO + }; + + + for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) { + if (unlikely(nid >= nm_i->max_nid)) + nid = 0; + index = current_nat_addr(sbi, nid); + + page = grab_cache_page(mapping, index); + if (!page) + continue; + if (PageUptodate(page)) { + mark_page_accessed(page); + f2fs_put_page(page, 1); + continue; + } + f2fs_submit_page_mbio(sbi, page, index, &fio); + mark_page_accessed(page); + f2fs_put_page(page, 0); + } + f2fs_submit_merged_bio(sbi, META, READ); +} + +static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) +{ + return radix_tree_lookup(&nm_i->nat_root, n); +} + +static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, + nid_t start, unsigned int nr, struct nat_entry **ep) +{ + return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); +} + +static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) +{ + list_del(&e->list); + radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); + nm_i->nat_cnt--; + kmem_cache_free(nat_entry_slab, e); +} + +int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct nat_entry *e; + int is_cp = 1; + + read_lock(&nm_i->nat_tree_lock); + e = __lookup_nat_cache(nm_i, nid); + if (e && !e->checkpointed) + is_cp = 0; + read_unlock(&nm_i->nat_tree_lock); + return is_cp; +} + +static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) +{ + struct nat_entry *new; + + new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); + if (!new) + return NULL; + if (radix_tree_insert(&nm_i->nat_root, nid, new)) { + kmem_cache_free(nat_entry_slab, new); + return NULL; + } + memset(new, 0, sizeof(struct nat_entry)); + nat_set_nid(new, nid); + list_add_tail(&new->list, &nm_i->nat_entries); + nm_i->nat_cnt++; + return new; +} + +static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, + struct f2fs_nat_entry *ne) +{ + struct nat_entry *e; +retry: + write_lock(&nm_i->nat_tree_lock); + e = __lookup_nat_cache(nm_i, nid); + if (!e) { + e = grab_nat_entry(nm_i, nid); + if (!e) { + write_unlock(&nm_i->nat_tree_lock); + goto retry; + } + nat_set_blkaddr(e, le32_to_cpu(ne->block_addr)); + nat_set_ino(e, le32_to_cpu(ne->ino)); + nat_set_version(e, ne->version); + e->checkpointed = true; + } + write_unlock(&nm_i->nat_tree_lock); +} + +static int set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, + block_t new_blkaddr) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct nat_entry *e; +retry: + write_lock(&nm_i->nat_tree_lock); + e = __lookup_nat_cache(nm_i, ni->nid); + if (!e) { + e = grab_nat_entry(nm_i, ni->nid); + if (!e) { + write_unlock(&nm_i->nat_tree_lock); + goto retry; + } + e->ni = *ni; + e->checkpointed = true; + f2fs_bug_on(ni->blk_addr == NEW_ADDR); + } else if (new_blkaddr == NEW_ADDR) { + /* + * when nid is reallocated, + * previous nat entry can be remained in nat cache. + * So, reinitialize it with new information. + */ + e->ni = *ni; + if (ni->blk_addr != NULL_ADDR) { + f2fs_msg(sbi->sb, KERN_ERR, "node block address is " + "already set: %u", ni->blk_addr); + f2fs_handle_error(sbi); + /* just give up on this node */ + write_unlock(&nm_i->nat_tree_lock); + return -EIO; + } + } + + if (new_blkaddr == NEW_ADDR) + e->checkpointed = false; + + /* sanity check */ + f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr); + f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR && + new_blkaddr == NULL_ADDR); + f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR && + new_blkaddr == NEW_ADDR); + f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR && + nat_get_blkaddr(e) != NULL_ADDR && + new_blkaddr == NEW_ADDR); + + /* increament version no as node is removed */ + if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { + unsigned char version = nat_get_version(e); + nat_set_version(e, inc_node_version(version)); + } + + /* change address */ + nat_set_blkaddr(e, new_blkaddr); + __set_nat_cache_dirty(nm_i, e); + write_unlock(&nm_i->nat_tree_lock); + return 0; +} + +int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + + if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD) + return 0; + + write_lock(&nm_i->nat_tree_lock); + while (nr_shrink && !list_empty(&nm_i->nat_entries)) { + struct nat_entry *ne; + ne = list_first_entry(&nm_i->nat_entries, + struct nat_entry, list); + __del_from_nat_cache(nm_i, ne); + nr_shrink--; + } + write_unlock(&nm_i->nat_tree_lock); + return nr_shrink; +} + +/* + * This function returns always success + */ +void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + nid_t start_nid = START_NID(nid); + struct f2fs_nat_block *nat_blk; + struct page *page = NULL; + struct f2fs_nat_entry ne; + struct nat_entry *e; + int i; + + memset(&ne, 0, sizeof(struct f2fs_nat_entry)); + ni->nid = nid; + + /* Check nat cache */ + read_lock(&nm_i->nat_tree_lock); + e = __lookup_nat_cache(nm_i, nid); + if (e) { + ni->ino = nat_get_ino(e); + ni->blk_addr = nat_get_blkaddr(e); + ni->version = nat_get_version(e); + } + read_unlock(&nm_i->nat_tree_lock); + if (e) + return; + + /* Check current segment summary */ + mutex_lock(&curseg->curseg_mutex); + i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); + if (i >= 0) { + ne = nat_in_journal(sum, i); + node_info_from_raw_nat(ni, &ne); + } + mutex_unlock(&curseg->curseg_mutex); + if (i >= 0) + goto cache; + + /* Fill node_info from nat page */ + page = get_current_nat_page(sbi, start_nid); + nat_blk = (struct f2fs_nat_block *)page_address(page); + ne = nat_blk->entries[nid - start_nid]; + node_info_from_raw_nat(ni, &ne); + f2fs_put_page(page, 1); +cache: + /* cache nat entry */ + cache_nat_entry(NM_I(sbi), nid, &ne); +} + +/* + * The maximum depth is four. + * Offset[0] will have raw inode offset. + */ +static int get_node_path(struct f2fs_inode_info *fi, long block, + int offset[4], unsigned int noffset[4]) +{ + const long direct_index = ADDRS_PER_INODE(fi); + const long direct_blks = ADDRS_PER_BLOCK; + const long dptrs_per_blk = NIDS_PER_BLOCK; + const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; + const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; + int n = 0; + int level = 0; + + noffset[0] = 0; + + if (block < direct_index) { + offset[n] = block; + goto got; + } + block -= direct_index; + if (block < direct_blks) { + offset[n++] = NODE_DIR1_BLOCK; + noffset[n] = 1; + offset[n] = block; + level = 1; + goto got; + } + block -= direct_blks; + if (block < direct_blks) { + offset[n++] = NODE_DIR2_BLOCK; + noffset[n] = 2; + offset[n] = block; + level = 1; + goto got; + } + block -= direct_blks; + if (block < indirect_blks) { + offset[n++] = NODE_IND1_BLOCK; + noffset[n] = 3; + offset[n++] = block / direct_blks; + noffset[n] = 4 + offset[n - 1]; + offset[n] = block % direct_blks; + level = 2; + goto got; + } + block -= indirect_blks; + if (block < indirect_blks) { + offset[n++] = NODE_IND2_BLOCK; + noffset[n] = 4 + dptrs_per_blk; + offset[n++] = block / direct_blks; + noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; + offset[n] = block % direct_blks; + level = 2; + goto got; + } + block -= indirect_blks; + if (block < dindirect_blks) { + offset[n++] = NODE_DIND_BLOCK; + noffset[n] = 5 + (dptrs_per_blk * 2); + offset[n++] = block / indirect_blks; + noffset[n] = 6 + (dptrs_per_blk * 2) + + offset[n - 1] * (dptrs_per_blk + 1); + offset[n++] = (block / direct_blks) % dptrs_per_blk; + noffset[n] = 7 + (dptrs_per_blk * 2) + + offset[n - 2] * (dptrs_per_blk + 1) + + offset[n - 1]; + offset[n] = block % direct_blks; + level = 3; + goto got; + } else { + BUG(); + } +got: + return level; +} + +/* + * Caller should call f2fs_put_dnode(dn). + * Also, it should grab and release a rwsem by calling f2fs_lock_op() and + * f2fs_unlock_op() only if ro is not set RDONLY_NODE. + * In the case of RDONLY_NODE, we don't need to care about mutex. + */ +int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + struct page *npage[4]; + struct page *parent; + int offset[4]; + unsigned int noffset[4]; + nid_t nids[4]; + int level, i; + int err = 0; + + level = get_node_path(F2FS_I(dn->inode), index, offset, noffset); + + nids[0] = dn->inode->i_ino; + npage[0] = dn->inode_page; + + if (!npage[0]) { + npage[0] = get_node_page(sbi, nids[0]); + if (IS_ERR(npage[0])) + return PTR_ERR(npage[0]); + } + parent = npage[0]; + if (level != 0) + nids[1] = get_nid(parent, offset[0], true); + dn->inode_page = npage[0]; + dn->inode_page_locked = true; + + /* get indirect or direct nodes */ + for (i = 1; i <= level; i++) { + bool done = false; + + if (!nids[i] && mode == ALLOC_NODE) { + /* alloc new node */ + if (!alloc_nid(sbi, &(nids[i]))) { + err = -ENOSPC; + goto release_pages; + } + + dn->nid = nids[i]; + npage[i] = new_node_page(dn, noffset[i], NULL); + if (IS_ERR(npage[i])) { + alloc_nid_failed(sbi, nids[i]); + err = PTR_ERR(npage[i]); + goto release_pages; + } + + set_nid(parent, offset[i - 1], nids[i], i == 1); + alloc_nid_done(sbi, nids[i]); + done = true; + } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { + npage[i] = get_node_page_ra(parent, offset[i - 1]); + if (IS_ERR(npage[i])) { + err = PTR_ERR(npage[i]); + goto release_pages; + } + done = true; + } + if (i == 1) { + dn->inode_page_locked = false; + unlock_page(parent); + } else { + f2fs_put_page(parent, 1); + } + + if (!done) { + npage[i] = get_node_page(sbi, nids[i]); + if (IS_ERR(npage[i])) { + err = PTR_ERR(npage[i]); + f2fs_put_page(npage[0], 0); + goto release_out; + } + } + if (i < level) { + parent = npage[i]; + nids[i + 1] = get_nid(parent, offset[i], false); + } + } + dn->nid = nids[level]; + dn->ofs_in_node = offset[level]; + dn->node_page = npage[level]; + dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); + return 0; + +release_pages: + f2fs_put_page(parent, 1); + if (i > 1) + f2fs_put_page(npage[0], 0); +release_out: + dn->inode_page = NULL; + dn->node_page = NULL; + return err; +} + +static void truncate_node(struct dnode_of_data *dn) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + struct node_info ni; + + get_node_info(sbi, dn->nid, &ni); + if (dn->inode->i_blocks == 0) { + if (ni.blk_addr != NULL_ADDR) { + f2fs_msg(sbi->sb, KERN_ERR, + "empty node still has block address %u ", + ni.blk_addr); + f2fs_handle_error(sbi); + } + goto invalidate; + } + f2fs_bug_on(ni.blk_addr == NULL_ADDR); + + /* Deallocate node address */ + invalidate_blocks(sbi, ni.blk_addr); + dec_valid_node_count(sbi, dn->inode); + set_node_addr(sbi, &ni, NULL_ADDR); + + if (dn->nid == dn->inode->i_ino) { + remove_orphan_inode(sbi, dn->nid); + dec_valid_inode_count(sbi); + } else { + sync_inode_page(dn); + } +invalidate: + clear_node_page_dirty(dn->node_page); + F2FS_SET_SB_DIRT(sbi); + + f2fs_put_page(dn->node_page, 1); + + invalidate_mapping_pages(NODE_MAPPING(sbi), + dn->node_page->index, dn->node_page->index); + + dn->node_page = NULL; + trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); +} + +static int truncate_dnode(struct dnode_of_data *dn) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + struct page *page; + + if (dn->nid == 0) + return 1; + + /* get direct node */ + page = get_node_page(sbi, dn->nid); + if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) + return 1; + else if (IS_ERR(page)) + return PTR_ERR(page); + + /* Make dnode_of_data for parameter */ + dn->node_page = page; + dn->ofs_in_node = 0; + truncate_data_blocks(dn); + truncate_node(dn); + return 1; +} + +static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, + int ofs, int depth) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + struct dnode_of_data rdn = *dn; + struct page *page; + struct f2fs_node *rn; + nid_t child_nid; + unsigned int child_nofs; + int freed = 0; + int i, ret; + + if (dn->nid == 0) + return NIDS_PER_BLOCK + 1; + + trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); + + page = get_node_page(sbi, dn->nid); + if (IS_ERR(page)) { + trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); + return PTR_ERR(page); + } + + rn = F2FS_NODE(page); + if (depth < 3) { + for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { + child_nid = le32_to_cpu(rn->in.nid[i]); + if (child_nid == 0) + continue; + rdn.nid = child_nid; + ret = truncate_dnode(&rdn); + if (ret < 0) + goto out_err; + set_nid(page, i, 0, false); + } + } else { + child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; + for (i = ofs; i < NIDS_PER_BLOCK; i++) { + child_nid = le32_to_cpu(rn->in.nid[i]); + if (child_nid == 0) { + child_nofs += NIDS_PER_BLOCK + 1; + continue; + } + rdn.nid = child_nid; + ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); + if (ret == (NIDS_PER_BLOCK + 1)) { + set_nid(page, i, 0, false); + child_nofs += ret; + } else if (ret < 0 && ret != -ENOENT) { + goto out_err; + } + } + freed = child_nofs; + } + + if (!ofs) { + /* remove current indirect node */ + dn->node_page = page; + truncate_node(dn); + freed++; + } else { + f2fs_put_page(page, 1); + } + trace_f2fs_truncate_nodes_exit(dn->inode, freed); + return freed; + +out_err: + f2fs_put_page(page, 1); + trace_f2fs_truncate_nodes_exit(dn->inode, ret); + return ret; +} + +static int truncate_partial_nodes(struct dnode_of_data *dn, + struct f2fs_inode *ri, int *offset, int depth) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + struct page *pages[2]; + nid_t nid[3]; + nid_t child_nid; + int err = 0; + int i; + int idx = depth - 2; + + nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); + if (!nid[0]) + return 0; + + /* get indirect nodes in the path */ + for (i = 0; i < idx + 1; i++) { + /* refernece count'll be increased */ + pages[i] = get_node_page(sbi, nid[i]); + if (IS_ERR(pages[i])) { + err = PTR_ERR(pages[i]); + idx = i - 1; + goto fail; + } + nid[i + 1] = get_nid(pages[i], offset[i + 1], false); + } + + /* free direct nodes linked to a partial indirect node */ + for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { + child_nid = get_nid(pages[idx], i, false); + if (!child_nid) + continue; + dn->nid = child_nid; + err = truncate_dnode(dn); + if (err < 0) + goto fail; + set_nid(pages[idx], i, 0, false); + } + + if (offset[idx + 1] == 0) { + dn->node_page = pages[idx]; + dn->nid = nid[idx]; + truncate_node(dn); + } else { + f2fs_put_page(pages[idx], 1); + } + offset[idx]++; + offset[idx + 1] = 0; + idx--; +fail: + for (i = idx; i >= 0; i--) + f2fs_put_page(pages[i], 1); + + trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); + + return err; +} + +/* + * All the block addresses of data and nodes should be nullified. + */ +int truncate_inode_blocks(struct inode *inode, pgoff_t from) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + int err = 0, cont = 1; + int level, offset[4], noffset[4]; + unsigned int nofs = 0; + struct f2fs_inode *ri; + struct dnode_of_data dn; + struct page *page; + + noffset[1] = 0; + + trace_f2fs_truncate_inode_blocks_enter(inode, from); + + level = get_node_path(F2FS_I(inode), from, offset, noffset); +restart: + page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(page)) { + trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); + return PTR_ERR(page); + } + + set_new_dnode(&dn, inode, page, NULL, 0); + unlock_page(page); + + ri = F2FS_INODE(page); + switch (level) { + case 0: + case 1: + nofs = noffset[1]; + break; + case 2: + nofs = noffset[1]; + if (!offset[level - 1]) + goto skip_partial; + err = truncate_partial_nodes(&dn, ri, offset, level); + if (err < 0 && err != -ENOENT) + goto fail; + nofs += 1 + NIDS_PER_BLOCK; + break; + case 3: + nofs = 5 + 2 * NIDS_PER_BLOCK; + if (!offset[level - 1]) + goto skip_partial; + err = truncate_partial_nodes(&dn, ri, offset, level); + if (err < 0 && err != -ENOENT) + goto fail; + break; + default: + BUG(); + } + +skip_partial: + while (cont) { + dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); + switch (offset[0]) { + case NODE_DIR1_BLOCK: + case NODE_DIR2_BLOCK: + err = truncate_dnode(&dn); + break; + + case NODE_IND1_BLOCK: + case NODE_IND2_BLOCK: + err = truncate_nodes(&dn, nofs, offset[1], 2); + break; + + case NODE_DIND_BLOCK: + err = truncate_nodes(&dn, nofs, offset[1], 3); + cont = 0; + break; + + default: + BUG(); + } + if (err < 0 && err != -ENOENT) + goto fail; + if (offset[1] == 0 && + ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { + lock_page(page); + if (unlikely(page->mapping != NODE_MAPPING(sbi))) { + f2fs_put_page(page, 1); + goto restart; + } + wait_on_page_writeback(page); + ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; + set_page_dirty(page); + unlock_page(page); + } + offset[1] = 0; + offset[0]++; + nofs += err; + } +fail: + f2fs_put_page(page, 0); + trace_f2fs_truncate_inode_blocks_exit(inode, err); + return err > 0 ? 0 : err; +} + +int truncate_xattr_node(struct inode *inode, struct page *page) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + nid_t nid = F2FS_I(inode)->i_xattr_nid; + struct dnode_of_data dn; + struct page *npage; + + if (!nid) + return 0; + + npage = get_node_page(sbi, nid); + if (IS_ERR(npage)) + return PTR_ERR(npage); + + F2FS_I(inode)->i_xattr_nid = 0; + + /* need to do checkpoint during fsync */ + F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); + + set_new_dnode(&dn, inode, page, npage, nid); + + if (page) + dn.inode_page_locked = true; + truncate_node(&dn); + return 0; +} + +/* + * Caller should grab and release a rwsem by calling f2fs_lock_op() and + * f2fs_unlock_op(). + */ +void remove_inode_page(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct page *page; + nid_t ino = inode->i_ino; + struct dnode_of_data dn; + + page = get_node_page(sbi, ino); + if (IS_ERR(page)) + return; + + if (truncate_xattr_node(inode, page)) { + f2fs_put_page(page, 1); + return; + } + /* 0 is possible, after f2fs_new_inode() is failed */ + if (inode->i_blocks != 0 && inode->i_blocks != 1) { + f2fs_msg(sbi->sb, KERN_ERR, "inode %u still has %llu blocks", + ino, inode->i_blocks); + f2fs_handle_error(sbi); + } + set_new_dnode(&dn, inode, page, page, ino); + truncate_node(&dn); +} + +struct page *new_inode_page(struct inode *inode, const struct qstr *name) +{ + struct dnode_of_data dn; + + /* allocate inode page for new inode */ + set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); + + /* caller should f2fs_put_page(page, 1); */ + return new_node_page(&dn, 0, NULL); +} + +struct page *new_node_page(struct dnode_of_data *dn, + unsigned int ofs, struct page *ipage) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + struct node_info old_ni, new_ni; + struct page *page; + int err; + + if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) + return ERR_PTR(-EPERM); + + page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); + if (!page) + return ERR_PTR(-ENOMEM); + + if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { + err = -ENOSPC; + goto fail; + } + + get_node_info(sbi, dn->nid, &old_ni); + + /* Reinitialize old_ni with new node page */ + f2fs_bug_on(old_ni.blk_addr != NULL_ADDR); + new_ni = old_ni; + new_ni.ino = dn->inode->i_ino; + set_node_addr(sbi, &new_ni, NEW_ADDR); + + fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); + set_cold_node(dn->inode, page); + SetPageUptodate(page); + set_page_dirty(page); + + if (ofs == XATTR_NODE_OFFSET) + F2FS_I(dn->inode)->i_xattr_nid = dn->nid; + + dn->node_page = page; + if (ipage) + update_inode(dn->inode, ipage); + else + sync_inode_page(dn); + if (ofs == 0) + inc_valid_inode_count(sbi); + + return page; + +fail: + clear_node_page_dirty(page); + f2fs_put_page(page, 1); + return ERR_PTR(err); +} + +/* + * Caller should do after getting the following values. + * 0: f2fs_put_page(page, 0) + * LOCKED_PAGE: f2fs_put_page(page, 1) + * error: nothing + */ +static int read_node_page(struct page *page, int rw) +{ + struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); + struct node_info ni; + + get_node_info(sbi, page->index, &ni); + + if (unlikely(ni.blk_addr == NULL_ADDR)) { + f2fs_put_page(page, 1); + return -ENOENT; + } + + if (PageUptodate(page)) + return LOCKED_PAGE; + + return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); +} + +/* + * Readahead a node page + */ +void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) +{ + struct page *apage; + int err; + + apage = find_get_page(NODE_MAPPING(sbi), nid); + if (apage && PageUptodate(apage)) { + f2fs_put_page(apage, 0); + return; + } + f2fs_put_page(apage, 0); + + apage = grab_cache_page(NODE_MAPPING(sbi), nid); + if (!apage) + return; + + err = read_node_page(apage, READA); + if (err == 0) + f2fs_put_page(apage, 0); + else if (err == LOCKED_PAGE) + f2fs_put_page(apage, 1); +} + +struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) +{ + struct page *page; + int err; +repeat: + page = grab_cache_page(NODE_MAPPING(sbi), nid); + if (!page) + return ERR_PTR(-ENOMEM); + + err = read_node_page(page, READ_SYNC); + if (err < 0) + return ERR_PTR(err); + else if (err == LOCKED_PAGE) + goto got_it; + + lock_page(page); + if (unlikely(!PageUptodate(page))) { + f2fs_put_page(page, 1); + return ERR_PTR(-EIO); + } + if (unlikely(page->mapping != NODE_MAPPING(sbi))) { + f2fs_put_page(page, 1); + goto repeat; + } +got_it: + if (nid != nid_of_node(page)) { + f2fs_msg(sbi->sb, KERN_ERR, "page node id does not match " + "request: %lu", nid); + f2fs_handle_error(sbi); + f2fs_put_page(page, 1); + return ERR_PTR(-EIO); + } + mark_page_accessed(page); + return page; +} + +/* + * Return a locked page for the desired node page. + * And, readahead MAX_RA_NODE number of node pages. + */ +struct page *get_node_page_ra(struct page *parent, int start) +{ + struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb); + struct blk_plug plug; + struct page *page; + int err, i, end; + nid_t nid; + + /* First, try getting the desired direct node. */ + nid = get_nid(parent, start, false); + if (!nid) + return ERR_PTR(-ENOENT); +repeat: + page = grab_cache_page(NODE_MAPPING(sbi), nid); + if (!page) + return ERR_PTR(-ENOMEM); + + err = read_node_page(page, READ_SYNC); + if (err < 0) + return ERR_PTR(err); + else if (err == LOCKED_PAGE) + goto page_hit; + + blk_start_plug(&plug); + + /* Then, try readahead for siblings of the desired node */ + end = start + MAX_RA_NODE; + end = min(end, NIDS_PER_BLOCK); + for (i = start + 1; i < end; i++) { + nid = get_nid(parent, i, false); + if (!nid) + continue; + ra_node_page(sbi, nid); + } + + blk_finish_plug(&plug); + + lock_page(page); + if (unlikely(page->mapping != NODE_MAPPING(sbi))) { + f2fs_put_page(page, 1); + goto repeat; + } +page_hit: + if (unlikely(!PageUptodate(page))) { + f2fs_put_page(page, 1); + return ERR_PTR(-EIO); + } + mark_page_accessed(page); + return page; +} + +void sync_inode_page(struct dnode_of_data *dn) +{ + if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { + update_inode(dn->inode, dn->node_page); + } else if (dn->inode_page) { + if (!dn->inode_page_locked) + lock_page(dn->inode_page); + update_inode(dn->inode, dn->inode_page); + if (!dn->inode_page_locked) + unlock_page(dn->inode_page); + } else { + update_inode_page(dn->inode); + } +} + +int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, + struct writeback_control *wbc) +{ + pgoff_t index, end; + struct pagevec pvec; + int step = ino ? 2 : 0; + int nwritten = 0, wrote = 0; + + pagevec_init(&pvec, 0); + +next_step: + index = 0; + end = LONG_MAX; + + while (index <= end) { + int i, nr_pages; + nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, + PAGECACHE_TAG_DIRTY, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); + if (nr_pages == 0) + break; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + /* + * flushing sequence with step: + * 0. indirect nodes + * 1. dentry dnodes + * 2. file dnodes + */ + if (step == 0 && IS_DNODE(page)) + continue; + if (step == 1 && (!IS_DNODE(page) || + is_cold_node(page))) + continue; + if (step == 2 && (!IS_DNODE(page) || + !is_cold_node(page))) + continue; + + /* + * If an fsync mode, + * we should not skip writing node pages. + */ + if (ino && ino_of_node(page) == ino) + lock_page(page); + else if (!trylock_page(page)) + continue; + + if (unlikely(page->mapping != NODE_MAPPING(sbi))) { +continue_unlock: + unlock_page(page); + continue; + } + if (ino && ino_of_node(page) != ino) + goto continue_unlock; + + if (!PageDirty(page)) { + /* someone wrote it for us */ + goto continue_unlock; + } + + if (!clear_page_dirty_for_io(page)) + goto continue_unlock; + + /* called by fsync() */ + if (ino && IS_DNODE(page)) { + int mark = !is_checkpointed_node(sbi, ino); + set_fsync_mark(page, 1); + if (IS_INODE(page)) + set_dentry_mark(page, mark); + nwritten++; + } else { + set_fsync_mark(page, 0); + set_dentry_mark(page, 0); + } + NODE_MAPPING(sbi)->a_ops->writepage(page, wbc); + wrote++; + + if (--wbc->nr_to_write == 0) + break; + } + pagevec_release(&pvec); + cond_resched(); + + if (wbc->nr_to_write == 0) { + step = 2; + break; + } + } + + if (step < 2) { + step++; + goto next_step; + } + + if (wrote) + f2fs_submit_merged_bio(sbi, NODE, WRITE); + return nwritten; +} + +int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) +{ + pgoff_t index = 0, end = LONG_MAX; + struct pagevec pvec; + int ret2 = 0, ret = 0; + + pagevec_init(&pvec, 0); + + while (index <= end) { + int i, nr_pages; + nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, + PAGECACHE_TAG_WRITEBACK, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); + if (nr_pages == 0) + break; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + /* until radix tree lookup accepts end_index */ + if (unlikely(page->index > end)) + continue; + + if (ino && ino_of_node(page) == ino) { + wait_on_page_writeback(page); + if (TestClearPageError(page)) + ret = -EIO; + } + } + pagevec_release(&pvec); + cond_resched(); + } + + if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags))) + ret2 = -ENOSPC; + if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags))) + ret2 = -EIO; + if (!ret) + ret = ret2; + return ret; +} + +static int f2fs_write_node_page(struct page *page, + struct writeback_control *wbc) +{ + struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); + nid_t nid; + block_t new_addr; + struct node_info ni; + struct f2fs_io_info fio = { + .type = NODE, + .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, + }; + + if (unlikely(sbi->por_doing)) + goto redirty_out; + + wait_on_page_writeback(page); + + /* get old block addr of this node page */ + nid = nid_of_node(page); + f2fs_bug_on(page->index != nid); + + get_node_info(sbi, nid, &ni); + + /* This page is already truncated */ + if (unlikely(ni.blk_addr == NULL_ADDR)) { + dec_page_count(sbi, F2FS_DIRTY_NODES); + unlock_page(page); + return 0; + } + + if (wbc->for_reclaim) + goto redirty_out; + + mutex_lock(&sbi->node_write); + set_page_writeback(page); + write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); + set_node_addr(sbi, &ni, new_addr); + dec_page_count(sbi, F2FS_DIRTY_NODES); + mutex_unlock(&sbi->node_write); + unlock_page(page); + return 0; + +redirty_out: + dec_page_count(sbi, F2FS_DIRTY_NODES); + wbc->pages_skipped++; + set_page_dirty(page); + return AOP_WRITEPAGE_ACTIVATE; +} + +/* + * It is very important to gather dirty pages and write at once, so that we can + * submit a big bio without interfering other data writes. + * Be default, 512 pages (2MB) * 3 node types, is more reasonable. + */ +#define COLLECT_DIRTY_NODES 1536 +static int f2fs_write_node_pages(struct address_space *mapping, + struct writeback_control *wbc) +{ + struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); + long nr_to_write = wbc->nr_to_write; + + /* balancing f2fs's metadata in background */ + f2fs_balance_fs_bg(sbi); + + /* collect a number of dirty node pages and write together */ + if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) + return 0; + + /* if mounting is failed, skip writing node pages */ + wbc->nr_to_write = 3 * max_hw_blocks(sbi); + wbc->sync_mode = WB_SYNC_NONE; + sync_node_pages(sbi, 0, wbc); + wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) - + wbc->nr_to_write); + return 0; +} + +static int f2fs_set_node_page_dirty(struct page *page) +{ + struct address_space *mapping = page->mapping; + struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); + + trace_f2fs_set_page_dirty(page, NODE); + + SetPageUptodate(page); + if (!PageDirty(page)) { + __set_page_dirty_nobuffers(page); + inc_page_count(sbi, F2FS_DIRTY_NODES); + SetPagePrivate(page); + return 1; + } + return 0; +} + +static void f2fs_invalidate_node_page(struct page *page, unsigned long offset) +{ + struct inode *inode = page->mapping->host; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + if (PageDirty(page)) + dec_page_count(sbi, F2FS_DIRTY_NODES); + ClearPagePrivate(page); +} + +static int f2fs_release_node_page(struct page *page, gfp_t wait) +{ + ClearPagePrivate(page); + return 1; +} + +/* + * Structure of the f2fs node operations + */ +const struct address_space_operations f2fs_node_aops = { + .writepage = f2fs_write_node_page, + .writepages = f2fs_write_node_pages, + .set_page_dirty = f2fs_set_node_page_dirty, + .invalidatepage = f2fs_invalidate_node_page, + .releasepage = f2fs_release_node_page, +}; + +static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head) +{ + struct list_head *this; + struct free_nid *i; + list_for_each(this, head) { + i = list_entry(this, struct free_nid, list); + if (i->nid == n) + return i; + } + return NULL; +} + +static void __del_from_free_nid_list(struct free_nid *i) +{ + list_del(&i->list); + kmem_cache_free(free_nid_slab, i); +} + +static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build) +{ + struct free_nid *i; + struct nat_entry *ne; + bool allocated = false; + + if (nm_i->fcnt > 2 * MAX_FREE_NIDS) + return -1; + + /* 0 nid should not be used */ + if (unlikely(nid == 0)) + return 0; + + if (build) { + /* do not add allocated nids */ + read_lock(&nm_i->nat_tree_lock); + ne = __lookup_nat_cache(nm_i, nid); + if (ne && nat_get_blkaddr(ne) != NULL_ADDR) + allocated = true; + read_unlock(&nm_i->nat_tree_lock); + if (allocated) + return 0; + } + + i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); + i->nid = nid; + i->state = NID_NEW; + + spin_lock(&nm_i->free_nid_list_lock); + if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) { + spin_unlock(&nm_i->free_nid_list_lock); + kmem_cache_free(free_nid_slab, i); + return 0; + } + list_add_tail(&i->list, &nm_i->free_nid_list); + nm_i->fcnt++; + spin_unlock(&nm_i->free_nid_list_lock); + return 1; +} + +static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) +{ + struct free_nid *i; + spin_lock(&nm_i->free_nid_list_lock); + i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); + if (i && i->state == NID_NEW) { + __del_from_free_nid_list(i); + nm_i->fcnt--; + } + spin_unlock(&nm_i->free_nid_list_lock); +} + +static void scan_nat_page(struct f2fs_nm_info *nm_i, + struct page *nat_page, nid_t start_nid) +{ + struct f2fs_nat_block *nat_blk = page_address(nat_page); + block_t blk_addr; + int i; + + i = start_nid % NAT_ENTRY_PER_BLOCK; + + for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { + + if (unlikely(start_nid >= nm_i->max_nid)) + break; + + blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); + f2fs_bug_on(blk_addr == NEW_ADDR); + if (blk_addr == NULL_ADDR) { + if (add_free_nid(nm_i, start_nid, true) < 0) + break; + } + } +} + +static void build_free_nids(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + int i = 0; + nid_t nid = nm_i->next_scan_nid; + + /* Enough entries */ + if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) + return; + + /* readahead nat pages to be scanned */ + ra_nat_pages(sbi, nid); + + while (1) { + struct page *page = get_current_nat_page(sbi, nid); + + scan_nat_page(nm_i, page, nid); + f2fs_put_page(page, 1); + + nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); + if (unlikely(nid >= nm_i->max_nid)) + nid = 0; + + if (i++ == FREE_NID_PAGES) + break; + } + + /* go to the next free nat pages to find free nids abundantly */ + nm_i->next_scan_nid = nid; + + /* find free nids from current sum_pages */ + mutex_lock(&curseg->curseg_mutex); + for (i = 0; i < nats_in_cursum(sum); i++) { + block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); + nid = le32_to_cpu(nid_in_journal(sum, i)); + if (addr == NULL_ADDR) + add_free_nid(nm_i, nid, true); + else + remove_free_nid(nm_i, nid); + } + mutex_unlock(&curseg->curseg_mutex); +} + +/* + * If this function returns success, caller can obtain a new nid + * from second parameter of this function. + * The returned nid could be used ino as well as nid when inode is created. + */ +bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct free_nid *i = NULL; + struct list_head *this; +retry: + if (unlikely(sbi->total_valid_node_count + 1 >= nm_i->max_nid)) + return false; + + spin_lock(&nm_i->free_nid_list_lock); + + /* We should not use stale free nids created by build_free_nids */ + if (nm_i->fcnt && !sbi->on_build_free_nids) { + f2fs_bug_on(list_empty(&nm_i->free_nid_list)); + list_for_each(this, &nm_i->free_nid_list) { + i = list_entry(this, struct free_nid, list); + if (i->state == NID_NEW) + break; + } + + f2fs_bug_on(i->state != NID_NEW); + *nid = i->nid; + i->state = NID_ALLOC; + nm_i->fcnt--; + spin_unlock(&nm_i->free_nid_list_lock); + return true; + } + spin_unlock(&nm_i->free_nid_list_lock); + + /* Let's scan nat pages and its caches to get free nids */ + mutex_lock(&nm_i->build_lock); + sbi->on_build_free_nids = true; + build_free_nids(sbi); + sbi->on_build_free_nids = false; + mutex_unlock(&nm_i->build_lock); + goto retry; +} + +/* + * alloc_nid() should be called prior to this function. + */ +void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct free_nid *i; + + spin_lock(&nm_i->free_nid_list_lock); + i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); + f2fs_bug_on(!i || i->state != NID_ALLOC); + __del_from_free_nid_list(i); + spin_unlock(&nm_i->free_nid_list_lock); +} + +/* + * alloc_nid() should be called prior to this function. + */ +void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct free_nid *i; + + if (!nid) + return; + + spin_lock(&nm_i->free_nid_list_lock); + i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); + f2fs_bug_on(!i || i->state != NID_ALLOC); + if (nm_i->fcnt > 2 * MAX_FREE_NIDS) { + __del_from_free_nid_list(i); + } else { + i->state = NID_NEW; + nm_i->fcnt++; + } + spin_unlock(&nm_i->free_nid_list_lock); +} + +void recover_node_page(struct f2fs_sb_info *sbi, struct page *page, + struct f2fs_summary *sum, struct node_info *ni, + block_t new_blkaddr) +{ + rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr); + set_node_addr(sbi, ni, new_blkaddr); + clear_node_page_dirty(page); +} + +int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) +{ + struct f2fs_inode *src, *dst; + nid_t ino = ino_of_node(page); + struct node_info old_ni, new_ni; + struct page *ipage; + int err; + + ipage = grab_cache_page(NODE_MAPPING(sbi), ino); + if (!ipage) + return -ENOMEM; + + /* Should not use this inode from free nid list */ + remove_free_nid(NM_I(sbi), ino); + + get_node_info(sbi, ino, &old_ni); + SetPageUptodate(ipage); + fill_node_footer(ipage, ino, ino, 0, true); + + src = F2FS_INODE(page); + dst = F2FS_INODE(ipage); + + memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); + dst->i_size = 0; + dst->i_blocks = cpu_to_le64(1); + dst->i_links = cpu_to_le32(1); + dst->i_xattr_nid = 0; + + new_ni = old_ni; + new_ni.ino = ino; + + err = set_node_addr(sbi, &new_ni, NEW_ADDR); + if (!err) + if (unlikely(!inc_valid_node_count(sbi, NULL))) + err = -ENOSPC; + if (!err) + inc_valid_inode_count(sbi); + f2fs_put_page(ipage, 1); + return err; +} + +/* + * ra_sum_pages() merge contiguous pages into one bio and submit. + * these pre-readed pages are linked in pages list. + */ +static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages, + int start, int nrpages) +{ + struct page *page; + int page_idx = start; + struct f2fs_io_info fio = { + .type = META, + .rw = READ_SYNC | REQ_META | REQ_PRIO + }; + + for (; page_idx < start + nrpages; page_idx++) { + /* alloc temporal page for read node summary info*/ + page = alloc_page(GFP_F2FS_ZERO); + if (!page) { + struct page *tmp; + list_for_each_entry_safe(page, tmp, pages, lru) { + list_del(&page->lru); + unlock_page(page); + __free_pages(page, 0); + } + return -ENOMEM; + } + + lock_page(page); + page->index = page_idx; + list_add_tail(&page->lru, pages); + } + + list_for_each_entry(page, pages, lru) + f2fs_submit_page_mbio(sbi, page, page->index, &fio); + + f2fs_submit_merged_bio(sbi, META, READ); + return 0; +} + +int restore_node_summary(struct f2fs_sb_info *sbi, + unsigned int segno, struct f2fs_summary_block *sum) +{ + struct f2fs_node *rn; + struct f2fs_summary *sum_entry; + struct page *page, *tmp; + block_t addr; + int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); + int i, last_offset, nrpages, err = 0; + LIST_HEAD(page_list); + + /* scan the node segment */ + last_offset = sbi->blocks_per_seg; + addr = START_BLOCK(sbi, segno); + sum_entry = &sum->entries[0]; + + for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { + nrpages = min(last_offset - i, bio_blocks); + + /* read ahead node pages */ + err = ra_sum_pages(sbi, &page_list, addr, nrpages); + if (err) + return err; + + list_for_each_entry_safe(page, tmp, &page_list, lru) { + + lock_page(page); + if (unlikely(!PageUptodate(page))) { + err = -EIO; + } else { + rn = F2FS_NODE(page); + sum_entry->nid = rn->footer.nid; + sum_entry->version = 0; + sum_entry->ofs_in_node = 0; + sum_entry++; + } + + list_del(&page->lru); + unlock_page(page); + __free_pages(page, 0); + } + } + return err; +} + +static bool flush_nats_in_journal(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + int i; + + mutex_lock(&curseg->curseg_mutex); + + if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) { + mutex_unlock(&curseg->curseg_mutex); + return false; + } + + for (i = 0; i < nats_in_cursum(sum); i++) { + struct nat_entry *ne; + struct f2fs_nat_entry raw_ne; + nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); + + raw_ne = nat_in_journal(sum, i); +retry: + write_lock(&nm_i->nat_tree_lock); + ne = __lookup_nat_cache(nm_i, nid); + if (ne) { + __set_nat_cache_dirty(nm_i, ne); + write_unlock(&nm_i->nat_tree_lock); + continue; + } + ne = grab_nat_entry(nm_i, nid); + if (!ne) { + write_unlock(&nm_i->nat_tree_lock); + goto retry; + } + nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr)); + nat_set_ino(ne, le32_to_cpu(raw_ne.ino)); + nat_set_version(ne, raw_ne.version); + __set_nat_cache_dirty(nm_i, ne); + write_unlock(&nm_i->nat_tree_lock); + } + update_nats_in_cursum(sum, -i); + mutex_unlock(&curseg->curseg_mutex); + return true; +} + +/* + * This function is called during the checkpointing process. + */ +void flush_nat_entries(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + struct list_head *cur, *n; + struct page *page = NULL; + struct f2fs_nat_block *nat_blk = NULL; + nid_t start_nid = 0, end_nid = 0; + bool flushed; + + flushed = flush_nats_in_journal(sbi); + + if (!flushed) + mutex_lock(&curseg->curseg_mutex); + + /* 1) flush dirty nat caches */ + list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) { + struct nat_entry *ne; + nid_t nid; + struct f2fs_nat_entry raw_ne; + int offset = -1; + block_t new_blkaddr; + + ne = list_entry(cur, struct nat_entry, list); + nid = nat_get_nid(ne); + + if (nat_get_blkaddr(ne) == NEW_ADDR) + continue; + if (flushed) + goto to_nat_page; + + /* if there is room for nat enries in curseg->sumpage */ + offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1); + if (offset >= 0) { + raw_ne = nat_in_journal(sum, offset); + goto flush_now; + } +to_nat_page: + if (!page || (start_nid > nid || nid > end_nid)) { + if (page) { + f2fs_put_page(page, 1); + page = NULL; + } + start_nid = START_NID(nid); + end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1; + + /* + * get nat block with dirty flag, increased reference + * count, mapped and lock + */ + page = get_next_nat_page(sbi, start_nid); + nat_blk = page_address(page); + } + + f2fs_bug_on(!nat_blk); + raw_ne = nat_blk->entries[nid - start_nid]; +flush_now: + new_blkaddr = nat_get_blkaddr(ne); + + raw_ne.ino = cpu_to_le32(nat_get_ino(ne)); + raw_ne.block_addr = cpu_to_le32(new_blkaddr); + raw_ne.version = nat_get_version(ne); + + if (offset < 0) { + nat_blk->entries[nid - start_nid] = raw_ne; + } else { + nat_in_journal(sum, offset) = raw_ne; + nid_in_journal(sum, offset) = cpu_to_le32(nid); + } + + if (nat_get_blkaddr(ne) == NULL_ADDR && + add_free_nid(NM_I(sbi), nid, false) <= 0) { + write_lock(&nm_i->nat_tree_lock); + __del_from_nat_cache(nm_i, ne); + write_unlock(&nm_i->nat_tree_lock); + } else { + write_lock(&nm_i->nat_tree_lock); + __clear_nat_cache_dirty(nm_i, ne); + ne->checkpointed = true; + write_unlock(&nm_i->nat_tree_lock); + } + } + if (!flushed) + mutex_unlock(&curseg->curseg_mutex); + f2fs_put_page(page, 1); + + /* 2) shrink nat caches if necessary */ + try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD); +} + +static int init_node_manager(struct f2fs_sb_info *sbi) +{ + struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); + struct f2fs_nm_info *nm_i = NM_I(sbi); + unsigned char *version_bitmap; + unsigned int nat_segs, nat_blocks; + + nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); + + /* segment_count_nat includes pair segment so divide to 2. */ + nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; + nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); + nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; + nm_i->fcnt = 0; + nm_i->nat_cnt = 0; + + INIT_LIST_HEAD(&nm_i->free_nid_list); + INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC); + INIT_LIST_HEAD(&nm_i->nat_entries); + INIT_LIST_HEAD(&nm_i->dirty_nat_entries); + + mutex_init(&nm_i->build_lock); + spin_lock_init(&nm_i->free_nid_list_lock); + rwlock_init(&nm_i->nat_tree_lock); + + nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); + nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); + version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); + if (!version_bitmap) + return -EFAULT; + + nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, + GFP_KERNEL); + if (!nm_i->nat_bitmap) + return -ENOMEM; + return 0; +} + +int build_node_manager(struct f2fs_sb_info *sbi) +{ + int err; + + sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); + if (!sbi->nm_info) + return -ENOMEM; + + err = init_node_manager(sbi); + if (err) + return err; + + build_free_nids(sbi); + return 0; +} + +void destroy_node_manager(struct f2fs_sb_info *sbi) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct free_nid *i, *next_i; + struct nat_entry *natvec[NATVEC_SIZE]; + nid_t nid = 0; + unsigned int found; + + if (!nm_i) + return; + + /* destroy free nid list */ + spin_lock(&nm_i->free_nid_list_lock); + list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { + f2fs_bug_on(i->state == NID_ALLOC); + __del_from_free_nid_list(i); + nm_i->fcnt--; + } + f2fs_bug_on(nm_i->fcnt); + spin_unlock(&nm_i->free_nid_list_lock); + + /* destroy nat cache */ + write_lock(&nm_i->nat_tree_lock); + while ((found = __gang_lookup_nat_cache(nm_i, + nid, NATVEC_SIZE, natvec))) { + unsigned idx; + for (idx = 0; idx < found; idx++) { + struct nat_entry *e = natvec[idx]; + nid = nat_get_nid(e) + 1; + __del_from_nat_cache(nm_i, e); + } + } + f2fs_bug_on(nm_i->nat_cnt); + write_unlock(&nm_i->nat_tree_lock); + + kfree(nm_i->nat_bitmap); + sbi->nm_info = NULL; + kfree(nm_i); +} + +int __init create_node_manager_caches(void) +{ + nat_entry_slab = f2fs_kmem_cache_create("nat_entry", + sizeof(struct nat_entry), NULL); + if (!nat_entry_slab) + return -ENOMEM; + + free_nid_slab = f2fs_kmem_cache_create("free_nid", + sizeof(struct free_nid), NULL); + if (!free_nid_slab) { + kmem_cache_destroy(nat_entry_slab); + return -ENOMEM; + } + return 0; +} + +void destroy_node_manager_caches(void) +{ + kmem_cache_destroy(free_nid_slab); + kmem_cache_destroy(nat_entry_slab); +} diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h new file mode 100644 index 0000000000000..c4c79885c9934 --- /dev/null +++ b/fs/f2fs/node.h @@ -0,0 +1,351 @@ +/* + * fs/f2fs/node.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +/* start node id of a node block dedicated to the given node id */ +#define START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK) + +/* node block offset on the NAT area dedicated to the given start node id */ +#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK) + +/* # of pages to perform readahead before building free nids */ +#define FREE_NID_PAGES 4 + +/* maximum # of free node ids to produce during build_free_nids */ +#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES) + +/* maximum readahead size for node during getting data blocks */ +#define MAX_RA_NODE 128 + +/* maximum cached nat entries to manage memory footprint */ +#define NM_WOUT_THRESHOLD (64 * NAT_ENTRY_PER_BLOCK) + +/* vector size for gang look-up from nat cache that consists of radix tree */ +#define NATVEC_SIZE 64 + +/* return value for read_node_page */ +#define LOCKED_PAGE 1 + +/* + * For node information + */ +struct node_info { + nid_t nid; /* node id */ + nid_t ino; /* inode number of the node's owner */ + block_t blk_addr; /* block address of the node */ + unsigned char version; /* version of the node */ +}; + +struct nat_entry { + struct list_head list; /* for clean or dirty nat list */ + bool checkpointed; /* whether it is checkpointed or not */ + struct node_info ni; /* in-memory node information */ +}; + +#define nat_get_nid(nat) (nat->ni.nid) +#define nat_set_nid(nat, n) (nat->ni.nid = n) +#define nat_get_blkaddr(nat) (nat->ni.blk_addr) +#define nat_set_blkaddr(nat, b) (nat->ni.blk_addr = b) +#define nat_get_ino(nat) (nat->ni.ino) +#define nat_set_ino(nat, i) (nat->ni.ino = i) +#define nat_get_version(nat) (nat->ni.version) +#define nat_set_version(nat, v) (nat->ni.version = v) + +#define __set_nat_cache_dirty(nm_i, ne) \ + list_move_tail(&ne->list, &nm_i->dirty_nat_entries); +#define __clear_nat_cache_dirty(nm_i, ne) \ + list_move_tail(&ne->list, &nm_i->nat_entries); +#define inc_node_version(version) (++version) + +static inline void node_info_from_raw_nat(struct node_info *ni, + struct f2fs_nat_entry *raw_ne) +{ + ni->ino = le32_to_cpu(raw_ne->ino); + ni->blk_addr = le32_to_cpu(raw_ne->block_addr); + ni->version = raw_ne->version; +} + +/* + * For free nid mangement + */ +enum nid_state { + NID_NEW, /* newly added to free nid list */ + NID_ALLOC /* it is allocated */ +}; + +struct free_nid { + struct list_head list; /* for free node id list */ + nid_t nid; /* node id */ + int state; /* in use or not: NID_NEW or NID_ALLOC */ +}; + +static inline int next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + struct free_nid *fnid; + + if (nm_i->fcnt <= 0) + return -1; + spin_lock(&nm_i->free_nid_list_lock); + fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list); + *nid = fnid->nid; + spin_unlock(&nm_i->free_nid_list_lock); + return 0; +} + +/* + * inline functions + */ +static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size); +} + +static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + pgoff_t block_off; + pgoff_t block_addr; + int seg_off; + + block_off = NAT_BLOCK_OFFSET(start); + seg_off = block_off >> sbi->log_blocks_per_seg; + + block_addr = (pgoff_t)(nm_i->nat_blkaddr + + (seg_off << sbi->log_blocks_per_seg << 1) + + (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); + + if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) + block_addr += sbi->blocks_per_seg; + + return block_addr; +} + +static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi, + pgoff_t block_addr) +{ + struct f2fs_nm_info *nm_i = NM_I(sbi); + + block_addr -= nm_i->nat_blkaddr; + if ((block_addr >> sbi->log_blocks_per_seg) % 2) + block_addr -= sbi->blocks_per_seg; + else + block_addr += sbi->blocks_per_seg; + + return block_addr + nm_i->nat_blkaddr; +} + +static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid) +{ + unsigned int block_off = NAT_BLOCK_OFFSET(start_nid); + + if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) + f2fs_clear_bit(block_off, nm_i->nat_bitmap); + else + f2fs_set_bit(block_off, nm_i->nat_bitmap); +} + +static inline void fill_node_footer(struct page *page, nid_t nid, + nid_t ino, unsigned int ofs, bool reset) +{ + struct f2fs_node *rn = F2FS_NODE(page); + if (reset) + memset(rn, 0, sizeof(*rn)); + rn->footer.nid = cpu_to_le32(nid); + rn->footer.ino = cpu_to_le32(ino); + rn->footer.flag = cpu_to_le32(ofs << OFFSET_BIT_SHIFT); +} + +static inline void copy_node_footer(struct page *dst, struct page *src) +{ + struct f2fs_node *src_rn = F2FS_NODE(src); + struct f2fs_node *dst_rn = F2FS_NODE(dst); + memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer)); +} + +static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) +{ + struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct f2fs_node *rn = F2FS_NODE(page); + + rn->footer.cp_ver = ckpt->checkpoint_ver; + rn->footer.next_blkaddr = cpu_to_le32(blkaddr); +} + +static inline nid_t ino_of_node(struct page *node_page) +{ + struct f2fs_node *rn = F2FS_NODE(node_page); + return le32_to_cpu(rn->footer.ino); +} + +static inline nid_t nid_of_node(struct page *node_page) +{ + struct f2fs_node *rn = F2FS_NODE(node_page); + return le32_to_cpu(rn->footer.nid); +} + +static inline unsigned int ofs_of_node(struct page *node_page) +{ + struct f2fs_node *rn = F2FS_NODE(node_page); + unsigned flag = le32_to_cpu(rn->footer.flag); + return flag >> OFFSET_BIT_SHIFT; +} + +static inline unsigned long long cpver_of_node(struct page *node_page) +{ + struct f2fs_node *rn = F2FS_NODE(node_page); + return le64_to_cpu(rn->footer.cp_ver); +} + +static inline block_t next_blkaddr_of_node(struct page *node_page) +{ + struct f2fs_node *rn = F2FS_NODE(node_page); + return le32_to_cpu(rn->footer.next_blkaddr); +} + +/* + * f2fs assigns the following node offsets described as (num). + * N = NIDS_PER_BLOCK + * + * Inode block (0) + * |- direct node (1) + * |- direct node (2) + * |- indirect node (3) + * | `- direct node (4 => 4 + N - 1) + * |- indirect node (4 + N) + * | `- direct node (5 + N => 5 + 2N - 1) + * `- double indirect node (5 + 2N) + * `- indirect node (6 + 2N) + * `- direct node + * ...... + * `- indirect node ((6 + 2N) + x(N + 1)) + * `- direct node + * ...... + * `- indirect node ((6 + 2N) + (N - 1)(N + 1)) + * `- direct node + */ +static inline bool IS_DNODE(struct page *node_page) +{ + unsigned int ofs = ofs_of_node(node_page); + + if (ofs == XATTR_NODE_OFFSET) + return false; + + if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || + ofs == 5 + 2 * NIDS_PER_BLOCK) + return false; + if (ofs >= 6 + 2 * NIDS_PER_BLOCK) { + ofs -= 6 + 2 * NIDS_PER_BLOCK; + if (!((long int)ofs % (NIDS_PER_BLOCK + 1))) + return false; + } + return true; +} + +static inline void set_nid(struct page *p, int off, nid_t nid, bool i) +{ + struct f2fs_node *rn = F2FS_NODE(p); + + wait_on_page_writeback(p); + + if (i) + rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid); + else + rn->in.nid[off] = cpu_to_le32(nid); + set_page_dirty(p); +} + +static inline nid_t get_nid(struct page *p, int off, bool i) +{ + struct f2fs_node *rn = F2FS_NODE(p); + + if (i) + return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]); + return le32_to_cpu(rn->in.nid[off]); +} + +/* + * Coldness identification: + * - Mark cold files in f2fs_inode_info + * - Mark cold node blocks in their node footer + * - Mark cold data pages in page cache + */ +static inline int is_file(struct inode *inode, int type) +{ + return F2FS_I(inode)->i_advise & type; +} + +static inline void set_file(struct inode *inode, int type) +{ + F2FS_I(inode)->i_advise |= type; +} + +static inline void clear_file(struct inode *inode, int type) +{ + F2FS_I(inode)->i_advise &= ~type; +} + +#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) +#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) +#define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) +#define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) +#define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) +#define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) + +static inline int is_cold_data(struct page *page) +{ + return PageChecked(page); +} + +static inline void set_cold_data(struct page *page) +{ + SetPageChecked(page); +} + +static inline void clear_cold_data(struct page *page) +{ + ClearPageChecked(page); +} + +static inline int is_node(struct page *page, int type) +{ + struct f2fs_node *rn = F2FS_NODE(page); + return le32_to_cpu(rn->footer.flag) & (1 << type); +} + +#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT) +#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT) +#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT) + +static inline void set_cold_node(struct inode *inode, struct page *page) +{ + struct f2fs_node *rn = F2FS_NODE(page); + unsigned int flag = le32_to_cpu(rn->footer.flag); + + if (S_ISDIR(inode->i_mode)) + flag &= ~(0x1 << COLD_BIT_SHIFT); + else + flag |= (0x1 << COLD_BIT_SHIFT); + rn->footer.flag = cpu_to_le32(flag); +} + +static inline void set_mark(struct page *page, int mark, int type) +{ + struct f2fs_node *rn = F2FS_NODE(page); + unsigned int flag = le32_to_cpu(rn->footer.flag); + if (mark) + flag |= (0x1 << type); + else + flag &= ~(0x1 << type); + rn->footer.flag = cpu_to_le32(flag); +} +#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT) +#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c new file mode 100644 index 0000000000000..953854f355a6b --- /dev/null +++ b/fs/f2fs/recovery.c @@ -0,0 +1,519 @@ +/* + * fs/f2fs/recovery.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include "f2fs.h" +#include "node.h" +#include "segment.h" + +static struct kmem_cache *fsync_entry_slab; + +bool space_for_roll_forward(struct f2fs_sb_info *sbi) +{ + if (sbi->last_valid_block_count + sbi->alloc_valid_block_count + > sbi->user_block_count) + return false; + return true; +} + +static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, + nid_t ino) +{ + struct list_head *this; + struct fsync_inode_entry *entry; + + list_for_each(this, head) { + entry = list_entry(this, struct fsync_inode_entry, list); + if (entry->inode->i_ino == ino) + return entry; + } + return NULL; +} + +static int recover_dentry(struct page *ipage, struct inode *inode) +{ + struct f2fs_inode *raw_inode = F2FS_INODE(ipage); + nid_t pino = le32_to_cpu(raw_inode->i_pino); + struct f2fs_dir_entry *de; + struct qstr name; + struct page *page; + struct inode *dir, *einode; + int err = 0; + + dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino); + if (!dir) { + dir = f2fs_iget(inode->i_sb, pino); + if (IS_ERR(dir)) { + f2fs_msg(inode->i_sb, KERN_INFO, + "%s: f2fs_iget failed: %ld", + __func__, PTR_ERR(dir)); + err = PTR_ERR(dir); + goto out; + } + set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT); + add_dirty_dir_inode(dir); + } + + name.len = le32_to_cpu(raw_inode->i_namelen); + name.name = raw_inode->i_name; + + if (unlikely(name.len > F2FS_NAME_LEN)) { + WARN_ON(1); + err = -ENAMETOOLONG; + goto out; + } +retry: + de = f2fs_find_entry(dir, &name, &page); + if (de && inode->i_ino == le32_to_cpu(de->ino)) + goto out_unmap_put; + if (de) { + einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); + if (IS_ERR(einode)) { + WARN_ON(1); + if (PTR_ERR(einode) == -ENOENT) + err = -EEXIST; + goto out_unmap_put; + } + err = acquire_orphan_inode(F2FS_SB(inode->i_sb)); + if (err) { + iput(einode); + goto out_unmap_put; + } + f2fs_delete_entry(de, page, einode); + iput(einode); + goto retry; + } + err = __f2fs_add_link(dir, &name, inode); + goto out; + +out_unmap_put: + kunmap(page); + f2fs_put_page(page, 0); +out: + f2fs_msg(inode->i_sb, KERN_DEBUG, + "%s: ino = %x, name = %s, dir = %lx, err = %d", + __func__, ino_of_node(ipage), raw_inode->i_name, + IS_ERR(dir) ? 0 : dir->i_ino, err); + return err; +} + +static int recover_inode(struct inode *inode, struct page *node_page) +{ + struct f2fs_inode *raw_inode = F2FS_INODE(node_page); + + if (!IS_INODE(node_page)) + return 0; + + inode->i_mode = le16_to_cpu(raw_inode->i_mode); + i_size_write(inode, le64_to_cpu(raw_inode->i_size)); + inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); + inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); + inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); + inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); + inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); + inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); + + if (is_dent_dnode(node_page)) + return recover_dentry(node_page, inode); + + f2fs_msg(inode->i_sb, KERN_DEBUG, "recover_inode: ino = %x, name = %s", + ino_of_node(node_page), raw_inode->i_name); + return 0; +} + +static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) +{ + unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); + struct curseg_info *curseg; + struct page *page; + block_t blkaddr; + int err = 0; + + /* get node pages in the current segment */ + curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); + blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff; + + /* read node page */ + page = alloc_page(GFP_F2FS_ZERO); + if (!page) + return -ENOMEM; + lock_page(page); + + while (1) { + struct fsync_inode_entry *entry; + + err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); + if (err) + return err; + + lock_page(page); + + if (cp_ver != cpver_of_node(page)) + break; + + if (!is_fsync_dnode(page)) + goto next; + + entry = get_fsync_inode(head, ino_of_node(page)); + if (entry) { + if (IS_INODE(page) && is_dent_dnode(page)) + set_inode_flag(F2FS_I(entry->inode), + FI_INC_LINK); + } else { + if (IS_INODE(page) && is_dent_dnode(page)) { + err = recover_inode_page(sbi, page); + if (err) { + f2fs_msg(sbi->sb, KERN_INFO, + "%s: recover_inode_page failed: %d", + __func__, err); + break; + } + } + + /* add this fsync inode to the list */ + entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS); + if (!entry) { + err = -ENOMEM; + break; + } + + entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); + if (IS_ERR(entry->inode)) { + err = PTR_ERR(entry->inode); + f2fs_msg(sbi->sb, KERN_INFO, + "%s: f2fs_iget failed: %d", + __func__, err); + kmem_cache_free(fsync_entry_slab, entry); + break; + } + list_add_tail(&entry->list, head); + } + entry->blkaddr = blkaddr; + + err = recover_inode(entry->inode, page); + if (err && err != -ENOENT) { + f2fs_msg(sbi->sb, KERN_INFO, + "%s: recover_inode failed: %d", + __func__, err); + break; + } +next: + /* check next segment */ + blkaddr = next_blkaddr_of_node(page); + } + + unlock_page(page); + __free_pages(page, 0); + + return err; +} + +static void destroy_fsync_dnodes(struct list_head *head) +{ + struct fsync_inode_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, head, list) { + iput(entry->inode); + list_del(&entry->list); + kmem_cache_free(fsync_entry_slab, entry); + } +} + +static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, + block_t blkaddr, struct dnode_of_data *dn) +{ + struct seg_entry *sentry; + unsigned int segno = GET_SEGNO(sbi, blkaddr); + unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & + (sbi->blocks_per_seg - 1); + struct f2fs_summary sum; + nid_t ino, nid; + void *kaddr; + struct inode *inode; + struct page *node_page; + unsigned int offset; + block_t bidx; + int i; + + if (segno >= TOTAL_SEGS(sbi)) { + f2fs_msg(sbi->sb, KERN_ERR, "invalid segment number %u", segno); + if (f2fs_handle_error(sbi)) + return -EIO; + } + + sentry = get_seg_entry(sbi, segno); + if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) + return 0; + + /* Get the previous summary */ + for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) { + struct curseg_info *curseg = CURSEG_I(sbi, i); + if (curseg->segno == segno) { + sum = curseg->sum_blk->entries[blkoff]; + break; + } + } + if (i > CURSEG_COLD_DATA) { + struct page *sum_page = get_sum_page(sbi, segno); + struct f2fs_summary_block *sum_node; + kaddr = page_address(sum_page); + sum_node = (struct f2fs_summary_block *)kaddr; + sum = sum_node->entries[blkoff]; + f2fs_put_page(sum_page, 1); + } + + /* Use the locked dnode page and inode */ + nid = le32_to_cpu(sum.nid); + if (dn->inode->i_ino == nid) { + struct dnode_of_data tdn = *dn; + tdn.nid = nid; + tdn.node_page = dn->inode_page; + tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); + truncate_data_blocks_range(&tdn, 1); + return 0; + } else if (dn->nid == nid) { + struct dnode_of_data tdn = *dn; + tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); + truncate_data_blocks_range(&tdn, 1); + return 0; + } + + /* Get the node page */ + node_page = get_node_page(sbi, nid); + if (IS_ERR(node_page)) + return PTR_ERR(node_page); + + offset = ofs_of_node(node_page); + ino = ino_of_node(node_page); + f2fs_put_page(node_page, 1); + + /* Skip nodes with circular references */ + if (ino == dn->inode->i_ino) { + f2fs_msg(sbi->sb, KERN_ERR, "%s: node %x has circular inode %x", + __func__, ino, nid); + f2fs_handle_error(sbi); + return -EDEADLK; + } + + /* Deallocate previous index in the node page */ + inode = f2fs_iget(sbi->sb, ino); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + bidx = start_bidx_of_node(offset, F2FS_I(inode)) + + le16_to_cpu(sum.ofs_in_node); + + truncate_hole(inode, bidx, bidx + 1); + iput(inode); + return 0; +} + +static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, + struct page *page, block_t blkaddr) +{ + struct f2fs_inode_info *fi = F2FS_I(inode); + unsigned int start, end; + struct dnode_of_data dn; + struct f2fs_summary sum; + struct node_info ni; + int err = 0, recovered = 0; + + if (recover_inline_data(inode, page)) + goto out; + + start = start_bidx_of_node(ofs_of_node(page), fi); + if (IS_INODE(page)) + end = start + ADDRS_PER_INODE(fi); + else + end = start + ADDRS_PER_BLOCK; + + f2fs_lock_op(sbi); + + set_new_dnode(&dn, inode, NULL, NULL, 0); + + err = get_dnode_of_data(&dn, start, ALLOC_NODE); + if (err) { + f2fs_unlock_op(sbi); + f2fs_msg(sbi->sb, KERN_INFO, + "%s: get_dnode_of_data failed: %d", __func__, err); + goto out; + } + + wait_on_page_writeback(dn.node_page); + + get_node_info(sbi, dn.nid, &ni); + f2fs_bug_on(ni.ino != ino_of_node(page)); + f2fs_bug_on(ofs_of_node(dn.node_page) != ofs_of_node(page)); + + for (; start < end; start++) { + block_t src, dest; + + src = datablock_addr(dn.node_page, dn.ofs_in_node); + dest = datablock_addr(page, dn.ofs_in_node); + + if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) { + if (src == NULL_ADDR) { + err = reserve_new_block(&dn); + /* We should not get -ENOSPC */ + f2fs_bug_on(err); + if (err) + f2fs_msg(sbi->sb, KERN_INFO, + "%s: reserve_new_block failed: %d", + __func__, err); + } + + /* Check the previous node page having this index */ + err = check_index_in_prev_nodes(sbi, dest, &dn); + if (err) + goto err; + + set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); + + /* write dummy data page */ + recover_data_page(sbi, NULL, &sum, src, dest); + update_extent_cache(dest, &dn); + recovered++; + } + dn.ofs_in_node++; + } + + /* write node page in place */ + set_summary(&sum, dn.nid, 0, 0); + if (IS_INODE(dn.node_page)) + sync_inode_page(&dn); + + copy_node_footer(dn.node_page, page); + fill_node_footer(dn.node_page, dn.nid, ni.ino, + ofs_of_node(page), false); + set_page_dirty(dn.node_page); + + recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr); +err: + f2fs_put_dnode(&dn); + f2fs_unlock_op(sbi); +out: + f2fs_msg(sbi->sb, KERN_DEBUG, + "recover_data: ino = %lx, recovered = %d blocks, err = %d", + inode->i_ino, recovered, err); + return err; +} + +static int recover_data(struct f2fs_sb_info *sbi, + struct list_head *head, int type) +{ + unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); + struct curseg_info *curseg; + struct page *page; + int err = 0; + block_t blkaddr; + + /* get node pages in the current segment */ + curseg = CURSEG_I(sbi, type); + blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); + + /* read node page */ + page = alloc_page(GFP_F2FS_ZERO); + if (!page) + return -ENOMEM; + + lock_page(page); + + while (1) { + struct fsync_inode_entry *entry; + + err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); + if (err) { + f2fs_msg(sbi->sb, KERN_INFO, + "%s: f2fs_readpage failed: %d", + __func__, err); + return err; + } + + lock_page(page); + + if (cp_ver != cpver_of_node(page)) + break; + + entry = get_fsync_inode(head, ino_of_node(page)); + if (!entry) + goto next; + + err = do_recover_data(sbi, entry->inode, page, blkaddr); + if (err) { + f2fs_msg(sbi->sb, KERN_INFO, + "%s: do_recover_data failed: %d", + __func__, err); + break; + } + + if (entry->blkaddr == blkaddr) { + iput(entry->inode); + list_del(&entry->list); + kmem_cache_free(fsync_entry_slab, entry); + } +next: + /* check next segment */ + blkaddr = next_blkaddr_of_node(page); + } + + unlock_page(page); + __free_pages(page, 0); + + if (!err) + allocate_new_segments(sbi); + return err; +} + +int recover_fsync_data(struct f2fs_sb_info *sbi) +{ + struct list_head inode_list; + int err; + bool need_writecp = false; + + fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", + sizeof(struct fsync_inode_entry), NULL); + if (!fsync_entry_slab) + return -ENOMEM; + + INIT_LIST_HEAD(&inode_list); + + /* step #1: find fsynced inode numbers */ + sbi->por_doing = true; + err = find_fsync_dnodes(sbi, &inode_list); + if (err) { + f2fs_msg(sbi->sb, KERN_INFO, + "%s: find_fsync_dnodes failed: %d", __func__, err); + goto out; + } + + if (list_empty(&inode_list)) + goto out; + + need_writecp = true; + + /* step #2: recover data */ + err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE); + if (!list_empty(&inode_list)) { + f2fs_handle_error(sbi); + err = -EIO; + } +out: + destroy_fsync_dnodes(&inode_list); + kmem_cache_destroy(fsync_entry_slab); + sbi->por_doing = false; + if (!err && need_writecp) { + f2fs_msg(sbi->sb, KERN_INFO, "recovery complete"); + write_checkpoint(sbi, false); + } else + f2fs_msg(sbi->sb, KERN_ERR, "recovery did not fully complete"); + + return err; +} diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c new file mode 100644 index 0000000000000..868832a848f4d --- /dev/null +++ b/fs/f2fs/segment.c @@ -0,0 +1,1972 @@ +/* + * fs/f2fs/segment.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "segment.h" +#include "node.h" +#include + +#define __reverse_ffz(x) __reverse_ffs(~(x)) + +static struct kmem_cache *discard_entry_slab; + +/* + * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since + * MSB and LSB are reversed in a byte by f2fs_set_bit. + */ +static inline unsigned long __reverse_ffs(unsigned long word) +{ + int num = 0; + +#if BITS_PER_LONG == 64 + if ((word & 0xffffffff) == 0) { + num += 32; + word >>= 32; + } +#endif + if ((word & 0xffff) == 0) { + num += 16; + word >>= 16; + } + if ((word & 0xff) == 0) { + num += 8; + word >>= 8; + } + if ((word & 0xf0) == 0) + num += 4; + else + word >>= 4; + if ((word & 0xc) == 0) + num += 2; + else + word >>= 2; + if ((word & 0x2) == 0) + num += 1; + return num; +} + +/* + * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue + * f2fs_set_bit makes MSB and LSB reversed in a byte. + * Example: + * LSB <--> MSB + * f2fs_set_bit(0, bitmap) => 0000 0001 + * f2fs_set_bit(7, bitmap) => 1000 0000 + */ +static unsigned long __find_rev_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset) +{ + const unsigned long *p = addr + BIT_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + unsigned long mask, submask; + unsigned long quot, rest; + + if (offset >= size) + return size; + + size -= result; + offset %= BITS_PER_LONG; + if (!offset) + goto aligned; + + tmp = *(p++); + quot = (offset >> 3) << 3; + rest = offset & 0x7; + mask = ~0UL << quot; + submask = (unsigned char)(0xff << rest) >> rest; + submask <<= quot; + mask &= submask; + tmp &= mask; + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + + size -= BITS_PER_LONG; + result += BITS_PER_LONG; +aligned: + while (size & ~(BITS_PER_LONG-1)) { + tmp = *(p++); + if (tmp) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __reverse_ffs(tmp); +} + +static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, + unsigned long size, unsigned long offset) +{ + const unsigned long *p = addr + BIT_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + unsigned long mask, submask; + unsigned long quot, rest; + + if (offset >= size) + return size; + + size -= result; + offset %= BITS_PER_LONG; + if (!offset) + goto aligned; + + tmp = *(p++); + quot = (offset >> 3) << 3; + rest = offset & 0x7; + mask = ~(~0UL << quot); + submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest); + submask <<= quot; + mask += submask; + tmp |= mask; + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + + size -= BITS_PER_LONG; + result += BITS_PER_LONG; +aligned: + while (size & ~(BITS_PER_LONG - 1)) { + tmp = *(p++); + if (~tmp) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + __reverse_ffz(tmp); +} + +/* + * This function balances dirty node and dentry pages. + * In addition, it controls garbage collection. + */ +void f2fs_balance_fs(struct f2fs_sb_info *sbi) +{ + /* + * We should do GC or end up with checkpoint, if there are so many dirty + * dir/node pages without enough free segments. + */ + if (has_not_enough_free_secs(sbi, 0)) { + mutex_lock(&sbi->gc_mutex); + f2fs_gc(sbi); + } +} + +void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) +{ + /* check the # of cached NAT entries and prefree segments */ + if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || + excess_prefree_segs(sbi)) + f2fs_sync_fs(sbi->sb, true); +} + +static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, + enum dirty_type dirty_type) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + + /* need not be added */ + if (IS_CURSEG(sbi, segno)) + return; + + if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) + dirty_i->nr_dirty[dirty_type]++; + + if (dirty_type == DIRTY) { + struct seg_entry *sentry = get_seg_entry(sbi, segno); + enum dirty_type t = sentry->type; + + if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) + dirty_i->nr_dirty[t]++; + } +} + +static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, + enum dirty_type dirty_type) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + + if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) + dirty_i->nr_dirty[dirty_type]--; + + if (dirty_type == DIRTY) { + struct seg_entry *sentry = get_seg_entry(sbi, segno); + enum dirty_type t = sentry->type; + + if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) + dirty_i->nr_dirty[t]--; + + if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) + clear_bit(GET_SECNO(sbi, segno), + dirty_i->victim_secmap); + } +} + +/* + * Should not occur error such as -ENOMEM. + * Adding dirty entry into seglist is not critical operation. + * If a given segment is one of current working segments, it won't be added. + */ +static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned short valid_blocks; + + if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) + return; + + mutex_lock(&dirty_i->seglist_lock); + + valid_blocks = get_valid_blocks(sbi, segno, 0); + + if (valid_blocks == 0) { + __locate_dirty_segment(sbi, segno, PRE); + __remove_dirty_segment(sbi, segno, DIRTY); + } else if (valid_blocks < sbi->blocks_per_seg) { + __locate_dirty_segment(sbi, segno, DIRTY); + } else { + /* Recovery routine with SSR needs this */ + __remove_dirty_segment(sbi, segno, DIRTY); + } + + mutex_unlock(&dirty_i->seglist_lock); +} + +static void f2fs_issue_discard(struct f2fs_sb_info *sbi, + block_t blkstart, block_t blklen) +{ + sector_t start = SECTOR_FROM_BLOCK(sbi, blkstart); + sector_t len = SECTOR_FROM_BLOCK(sbi, blklen); + blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); + trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); +} + +static void add_discard_addrs(struct f2fs_sb_info *sbi, + unsigned int segno, struct seg_entry *se) +{ + struct list_head *head = &SM_I(sbi)->discard_list; + struct discard_entry *new; + int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); + int max_blocks = sbi->blocks_per_seg; + unsigned long *cur_map = (unsigned long *)se->cur_valid_map; + unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; + unsigned long dmap[entries]; + unsigned int start = 0, end = -1; + int i; + + if (!test_opt(sbi, DISCARD)) + return; + + /* zero block will be discarded through the prefree list */ + if (!se->valid_blocks || se->valid_blocks == max_blocks) + return; + + /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ + for (i = 0; i < entries; i++) + dmap[i] = (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; + + while (SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { + start = __find_rev_next_bit(dmap, max_blocks, end + 1); + if (start >= max_blocks) + break; + + end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); + + new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS); + INIT_LIST_HEAD(&new->list); + new->blkaddr = START_BLOCK(sbi, segno) + start; + new->len = end - start; + + list_add_tail(&new->list, head); + SM_I(sbi)->nr_discards += end - start; + } +} + +/* + * Should call clear_prefree_segments after checkpoint is done. + */ +static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned int segno = -1; + unsigned int total_segs = TOTAL_SEGS(sbi); + + mutex_lock(&dirty_i->seglist_lock); + while (1) { + segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, + segno + 1); + if (segno >= total_segs) + break; + __set_test_and_free(sbi, segno); + } + mutex_unlock(&dirty_i->seglist_lock); +} + +void clear_prefree_segments(struct f2fs_sb_info *sbi) +{ + struct list_head *head = &(SM_I(sbi)->discard_list); + struct list_head *this, *next; + struct discard_entry *entry; + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; + unsigned int total_segs = TOTAL_SEGS(sbi); + unsigned int start = 0, end = -1; + + mutex_lock(&dirty_i->seglist_lock); + + while (1) { + int i; + start = find_next_bit(prefree_map, total_segs, end + 1); + if (start >= total_segs) + break; + end = find_next_zero_bit(prefree_map, total_segs, start + 1); + + for (i = start; i < end; i++) + clear_bit(i, prefree_map); + + dirty_i->nr_dirty[PRE] -= end - start; + + if (!test_opt(sbi, DISCARD)) + continue; + + f2fs_issue_discard(sbi, START_BLOCK(sbi, start), + (end - start) << sbi->log_blocks_per_seg); + } + mutex_unlock(&dirty_i->seglist_lock); + + /* send small discards */ + list_for_each_safe(this, next, head) { + entry = list_entry(this, struct discard_entry, list); + f2fs_issue_discard(sbi, entry->blkaddr, entry->len); + list_del(&entry->list); + SM_I(sbi)->nr_discards -= entry->len; + kmem_cache_free(discard_entry_slab, entry); + } +} + +static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) +{ + struct sit_info *sit_i = SIT_I(sbi); + if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) + sit_i->dirty_sentries++; +} + +static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, + unsigned int segno, int modified) +{ + struct seg_entry *se = get_seg_entry(sbi, segno); + se->type = type; + if (modified) + __mark_sit_entry_dirty(sbi, segno); +} + +static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) +{ + struct seg_entry *se; + unsigned int segno, offset; + long int new_vblocks; + bool check_map = false; + + segno = GET_SEGNO(sbi, blkaddr); + + se = get_seg_entry(sbi, segno); + new_vblocks = se->valid_blocks + del; + offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1); + + if (new_vblocks < 0 || new_vblocks > sbi->blocks_per_seg || + (new_vblocks >> (sizeof(unsigned short) << 3))) + if (f2fs_handle_error(sbi)) + check_map = true; + + se->mtime = get_mtime(sbi); + SIT_I(sbi)->max_mtime = se->mtime; + + /* Update valid block bitmap */ + if (del > 0) { + if (f2fs_set_bit(offset, se->cur_valid_map)) + if (f2fs_handle_error(sbi)) + check_map = true; + } else { + if (!f2fs_clear_bit(offset, se->cur_valid_map)) + if (f2fs_handle_error(sbi)) + check_map = true; + } + + if (unlikely(check_map)) { + int i; + long int vblocks = 0; + + f2fs_msg(sbi->sb, KERN_ERR, + "cannot %svalidate block %u in segment %u with %hu valid blocks", + (del < 0) ? "in" : "", + offset, segno, se->valid_blocks); + + /* assume the count was stale to start */ + del = 0; + for (i = 0; i < sbi->blocks_per_seg; i++) + if (f2fs_test_bit(i, se->cur_valid_map)) + vblocks++; + if (vblocks != se->valid_blocks) { + f2fs_msg(sbi->sb, KERN_INFO, "correcting valid block " + "counts %d -> %ld", se->valid_blocks, vblocks); + /* make accounting corrections */ + del = vblocks - se->valid_blocks; + } + } + se->valid_blocks += del; + + if (!f2fs_test_bit(offset, se->ckpt_valid_map)) + se->ckpt_valid_blocks += del; + + __mark_sit_entry_dirty(sbi, segno); + + /* update total number of valid blocks to be written in ckpt area */ + SIT_I(sbi)->written_valid_blocks += del; + + if (sbi->segs_per_sec > 1) + get_sec_entry(sbi, segno)->valid_blocks += del; +} + +static void refresh_sit_entry(struct f2fs_sb_info *sbi, + block_t old_blkaddr, block_t new_blkaddr) +{ + update_sit_entry(sbi, new_blkaddr, 1); + if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) + update_sit_entry(sbi, old_blkaddr, -1); +} + +void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) +{ + unsigned int segno = GET_SEGNO(sbi, addr); + struct sit_info *sit_i = SIT_I(sbi); + + f2fs_bug_on(addr == NULL_ADDR); + if (addr == NEW_ADDR) + return; + + if (segno >= TOTAL_SEGS(sbi)) { + f2fs_msg(sbi->sb, KERN_ERR, "invalid segment number %u", segno); + if (f2fs_handle_error(sbi)) + return; + } + + /* add it into sit main buffer */ + mutex_lock(&sit_i->sentry_lock); + + update_sit_entry(sbi, addr, -1); + + /* add it into dirty seglist */ + locate_dirty_segment(sbi, segno); + + mutex_unlock(&sit_i->sentry_lock); +} + +/* + * This function should be resided under the curseg_mutex lock + */ +static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, + struct f2fs_summary *sum) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + void *addr = curseg->sum_blk; + addr += curseg->next_blkoff * sizeof(struct f2fs_summary); + memcpy(addr, sum, sizeof(struct f2fs_summary)); +} + +/* + * Calculate the number of current summary pages for writing + */ +int npages_for_summary_flush(struct f2fs_sb_info *sbi) +{ + int valid_sum_count = 0; + int i, sum_in_page; + + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { + if (sbi->ckpt->alloc_type[i] == SSR) + valid_sum_count += sbi->blocks_per_seg; + else + valid_sum_count += curseg_blkoff(sbi, i); + } + + sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - + SUM_FOOTER_SIZE) / SUMMARY_SIZE; + if (valid_sum_count <= sum_in_page) + return 1; + else if ((valid_sum_count - sum_in_page) <= + (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) + return 2; + return 3; +} + +/* + * Caller should put this summary page + */ +struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) +{ + return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); +} + +static void write_sum_page(struct f2fs_sb_info *sbi, + struct f2fs_summary_block *sum_blk, block_t blk_addr) +{ + struct page *page = grab_meta_page(sbi, blk_addr); + void *kaddr = page_address(page); + memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE); + set_page_dirty(page); + f2fs_put_page(page, 1); +} + +static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + unsigned int segno = curseg->segno + 1; + struct free_segmap_info *free_i = FREE_I(sbi); + + if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec) + return !test_bit(segno, free_i->free_segmap); + return 0; +} + +/* + * Find a new segment from the free segments bitmap to right order + * This function should be returned with success, otherwise BUG + */ +static void get_new_segment(struct f2fs_sb_info *sbi, + unsigned int *newseg, bool new_sec, int dir) +{ + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int segno, secno, zoneno; + unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone; + unsigned int hint = *newseg / sbi->segs_per_sec; + unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); + unsigned int left_start = hint; + bool init = true; + int go_left = 0; + int i; + + write_lock(&free_i->segmap_lock); + + if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { + segno = find_next_zero_bit(free_i->free_segmap, + TOTAL_SEGS(sbi), *newseg + 1); + if (segno - *newseg < sbi->segs_per_sec - + (*newseg % sbi->segs_per_sec)) + goto got_it; + } +find_other_zone: + secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint); + if (secno >= TOTAL_SECS(sbi)) { + if (dir == ALLOC_RIGHT) { + secno = find_next_zero_bit(free_i->free_secmap, + TOTAL_SECS(sbi), 0); + f2fs_bug_on(secno >= TOTAL_SECS(sbi)); + } else { + go_left = 1; + left_start = hint - 1; + } + } + if (go_left == 0) + goto skip_left; + + while (test_bit(left_start, free_i->free_secmap)) { + if (left_start > 0) { + left_start--; + continue; + } + left_start = find_next_zero_bit(free_i->free_secmap, + TOTAL_SECS(sbi), 0); + f2fs_bug_on(left_start >= TOTAL_SECS(sbi)); + break; + } + secno = left_start; +skip_left: + hint = secno; + segno = secno * sbi->segs_per_sec; + zoneno = secno / sbi->secs_per_zone; + + /* give up on finding another zone */ + if (!init) + goto got_it; + if (sbi->secs_per_zone == 1) + goto got_it; + if (zoneno == old_zoneno) + goto got_it; + if (dir == ALLOC_LEFT) { + if (!go_left && zoneno + 1 >= total_zones) + goto got_it; + if (go_left && zoneno == 0) + goto got_it; + } + for (i = 0; i < NR_CURSEG_TYPE; i++) + if (CURSEG_I(sbi, i)->zone == zoneno) + break; + + if (i < NR_CURSEG_TYPE) { + /* zone is in user, try another */ + if (go_left) + hint = zoneno * sbi->secs_per_zone - 1; + else if (zoneno + 1 >= total_zones) + hint = 0; + else + hint = (zoneno + 1) * sbi->secs_per_zone; + init = false; + goto find_other_zone; + } +got_it: + /* set it as dirty segment in free segmap */ + f2fs_bug_on(test_bit(segno, free_i->free_segmap)); + __set_inuse(sbi, segno); + *newseg = segno; + write_unlock(&free_i->segmap_lock); +} + +static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + struct summary_footer *sum_footer; + + curseg->segno = curseg->next_segno; + curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); + curseg->next_blkoff = 0; + curseg->next_segno = NULL_SEGNO; + + sum_footer = &(curseg->sum_blk->footer); + memset(sum_footer, 0, sizeof(struct summary_footer)); + if (IS_DATASEG(type)) + SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); + if (IS_NODESEG(type)) + SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); + __set_sit_entry_type(sbi, type, curseg->segno, modified); +} + +/* + * Allocate a current working segment. + * This function always allocates a free segment in LFS manner. + */ +static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + unsigned int segno = curseg->segno; + int dir = ALLOC_LEFT; + + write_sum_page(sbi, curseg->sum_blk, + GET_SUM_BLOCK(sbi, segno)); + if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) + dir = ALLOC_RIGHT; + + if (test_opt(sbi, NOHEAP)) + dir = ALLOC_RIGHT; + + get_new_segment(sbi, &segno, new_sec, dir); + curseg->next_segno = segno; + reset_curseg(sbi, type, 1); + curseg->alloc_type = LFS; +} + +static void __next_free_blkoff(struct f2fs_sb_info *sbi, + struct curseg_info *seg, block_t start) +{ + struct seg_entry *se = get_seg_entry(sbi, seg->segno); + int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); + unsigned long target_map[entries]; + unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; + unsigned long *cur_map = (unsigned long *)se->cur_valid_map; + int i, pos; + + for (i = 0; i < entries; i++) + target_map[i] = ckpt_map[i] | cur_map[i]; + + pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); + + seg->next_blkoff = pos; +} + +/* + * If a segment is written by LFS manner, next block offset is just obtained + * by increasing the current block offset. However, if a segment is written by + * SSR manner, next block offset obtained by calling __next_free_blkoff + */ +static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, + struct curseg_info *seg) +{ + if (seg->alloc_type == SSR) + __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); + else + seg->next_blkoff++; +} + +/* + * This function always allocates a used segment (from dirty seglist) by SSR + * manner, so it should recover the existing segment information of valid blocks + */ +static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + struct curseg_info *curseg = CURSEG_I(sbi, type); + unsigned int new_segno = curseg->next_segno; + struct f2fs_summary_block *sum_node; + struct page *sum_page; + + write_sum_page(sbi, curseg->sum_blk, + GET_SUM_BLOCK(sbi, curseg->segno)); + __set_test_and_inuse(sbi, new_segno); + + mutex_lock(&dirty_i->seglist_lock); + __remove_dirty_segment(sbi, new_segno, PRE); + __remove_dirty_segment(sbi, new_segno, DIRTY); + mutex_unlock(&dirty_i->seglist_lock); + + reset_curseg(sbi, type, 1); + curseg->alloc_type = SSR; + __next_free_blkoff(sbi, curseg, 0); + + if (reuse) { + sum_page = get_sum_page(sbi, new_segno); + sum_node = (struct f2fs_summary_block *)page_address(sum_page); + memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); + f2fs_put_page(sum_page, 1); + } +} + +static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; + + if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0)) + return v_ops->get_victim(sbi, + &(curseg)->next_segno, BG_GC, type, SSR); + + /* For data segments, let's do SSR more intensively */ + for (; type >= CURSEG_HOT_DATA; type--) + if (v_ops->get_victim(sbi, &(curseg)->next_segno, + BG_GC, type, SSR)) + return 1; + return 0; +} + +/* + * flush out current segment and replace it with new segment + * This function should be returned with success, otherwise BUG + */ +static void allocate_segment_by_default(struct f2fs_sb_info *sbi, + int type, bool force) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + + if (force) + new_curseg(sbi, type, true); + else if (type == CURSEG_WARM_NODE) + new_curseg(sbi, type, false); + else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) + new_curseg(sbi, type, false); + else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) + change_curseg(sbi, type, true); + else + new_curseg(sbi, type, false); + + stat_inc_seg_type(sbi, curseg); +} + +void allocate_new_segments(struct f2fs_sb_info *sbi) +{ + struct curseg_info *curseg; + unsigned int old_curseg; + int i; + + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { + curseg = CURSEG_I(sbi, i); + old_curseg = curseg->segno; + SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); + locate_dirty_segment(sbi, old_curseg); + } +} + +static const struct segment_allocation default_salloc_ops = { + .allocate_segment = allocate_segment_by_default, +}; + +static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + if (curseg->next_blkoff < sbi->blocks_per_seg) + return true; + return false; +} + +static int __get_segment_type_2(struct page *page, enum page_type p_type) +{ + if (p_type == DATA) + return CURSEG_HOT_DATA; + else + return CURSEG_HOT_NODE; +} + +static int __get_segment_type_4(struct page *page, enum page_type p_type) +{ + if (p_type == DATA) { + struct inode *inode = page->mapping->host; + + if (S_ISDIR(inode->i_mode)) + return CURSEG_HOT_DATA; + else + return CURSEG_COLD_DATA; + } else { + if (IS_DNODE(page) && !is_cold_node(page)) + return CURSEG_HOT_NODE; + else + return CURSEG_COLD_NODE; + } +} + +static int __get_segment_type_6(struct page *page, enum page_type p_type) +{ + if (p_type == DATA) { + struct inode *inode = page->mapping->host; + + if (S_ISDIR(inode->i_mode)) + return CURSEG_HOT_DATA; + else if (is_cold_data(page) || file_is_cold(inode)) + return CURSEG_COLD_DATA; + else + return CURSEG_WARM_DATA; + } else { + if (IS_DNODE(page)) + return is_cold_node(page) ? CURSEG_WARM_NODE : + CURSEG_HOT_NODE; + else + return CURSEG_COLD_NODE; + } +} + +static int __get_segment_type(struct page *page, enum page_type p_type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); + switch (sbi->active_logs) { + case 2: + return __get_segment_type_2(page, p_type); + case 4: + return __get_segment_type_4(page, p_type); + } + /* NR_CURSEG_TYPE(6) logs by default */ + f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE); + return __get_segment_type_6(page, p_type); +} + +void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, + block_t old_blkaddr, block_t *new_blkaddr, + struct f2fs_summary *sum, int type) +{ + struct sit_info *sit_i = SIT_I(sbi); + struct curseg_info *curseg; + unsigned int old_cursegno; + + curseg = CURSEG_I(sbi, type); + + mutex_lock(&curseg->curseg_mutex); + + *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); + old_cursegno = curseg->segno; + + /* + * __add_sum_entry should be resided under the curseg_mutex + * because, this function updates a summary entry in the + * current summary block. + */ + __add_sum_entry(sbi, type, sum); + + mutex_lock(&sit_i->sentry_lock); + __refresh_next_blkoff(sbi, curseg); + + stat_inc_block_count(sbi, curseg); + + /* + * SIT information should be updated before segment allocation, + * since SSR needs latest valid block information. + */ + refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); + + if (!__has_curseg_space(sbi, type)) + sit_i->s_ops->allocate_segment(sbi, type, false); + + locate_dirty_segment(sbi, old_cursegno); + locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); + mutex_unlock(&sit_i->sentry_lock); + + if (page && IS_NODESEG(type)) + fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); + + mutex_unlock(&curseg->curseg_mutex); +} + +static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, + block_t old_blkaddr, block_t *new_blkaddr, + struct f2fs_summary *sum, struct f2fs_io_info *fio) +{ + int type = __get_segment_type(page, fio->type); + + allocate_data_block(sbi, page, old_blkaddr, new_blkaddr, sum, type); + + /* writeout dirty page into bdev */ + f2fs_submit_page_mbio(sbi, page, *new_blkaddr, fio); +} + +void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) +{ + struct f2fs_io_info fio = { + .type = META, + .rw = WRITE_SYNC | REQ_META | REQ_PRIO + }; + + set_page_writeback(page); + f2fs_submit_page_mbio(sbi, page, page->index, &fio); +} + +void write_node_page(struct f2fs_sb_info *sbi, struct page *page, + struct f2fs_io_info *fio, + unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr) +{ + struct f2fs_summary sum; + set_summary(&sum, nid, 0, 0); + do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio); +} + +void write_data_page(struct page *page, struct dnode_of_data *dn, + block_t *new_blkaddr, struct f2fs_io_info *fio) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); + struct f2fs_summary sum; + struct node_info ni; + + f2fs_bug_on(dn->data_blkaddr == NULL_ADDR); + get_node_info(sbi, dn->nid, &ni); + set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); + + do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio); +} + +void rewrite_data_page(struct page *page, block_t old_blkaddr, + struct f2fs_io_info *fio) +{ + struct inode *inode = page->mapping->host; + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + f2fs_submit_page_mbio(sbi, page, old_blkaddr, fio); +} + +void recover_data_page(struct f2fs_sb_info *sbi, + struct page *page, struct f2fs_summary *sum, + block_t old_blkaddr, block_t new_blkaddr) +{ + struct sit_info *sit_i = SIT_I(sbi); + struct curseg_info *curseg; + unsigned int segno, old_cursegno; + struct seg_entry *se; + int type; + + segno = GET_SEGNO(sbi, new_blkaddr); + se = get_seg_entry(sbi, segno); + type = se->type; + + if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { + if (old_blkaddr == NULL_ADDR) + type = CURSEG_COLD_DATA; + else + type = CURSEG_WARM_DATA; + } + curseg = CURSEG_I(sbi, type); + + mutex_lock(&curseg->curseg_mutex); + mutex_lock(&sit_i->sentry_lock); + + old_cursegno = curseg->segno; + + /* change the current segment */ + if (segno != curseg->segno) { + curseg->next_segno = segno; + change_curseg(sbi, type, true); + } + + curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & + (sbi->blocks_per_seg - 1); + __add_sum_entry(sbi, type, sum); + + refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); + + locate_dirty_segment(sbi, old_cursegno); + locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); + + mutex_unlock(&sit_i->sentry_lock); + mutex_unlock(&curseg->curseg_mutex); +} + +void rewrite_node_page(struct f2fs_sb_info *sbi, + struct page *page, struct f2fs_summary *sum, + block_t old_blkaddr, block_t new_blkaddr) +{ + struct sit_info *sit_i = SIT_I(sbi); + int type = CURSEG_WARM_NODE; + struct curseg_info *curseg; + unsigned int segno, old_cursegno; + block_t next_blkaddr = next_blkaddr_of_node(page); + unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr); + struct f2fs_io_info fio = { + .type = NODE, + .rw = WRITE_SYNC, + }; + + curseg = CURSEG_I(sbi, type); + + mutex_lock(&curseg->curseg_mutex); + mutex_lock(&sit_i->sentry_lock); + + segno = GET_SEGNO(sbi, new_blkaddr); + old_cursegno = curseg->segno; + + /* change the current segment */ + if (segno != curseg->segno) { + curseg->next_segno = segno; + change_curseg(sbi, type, true); + } + curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & + (sbi->blocks_per_seg - 1); + __add_sum_entry(sbi, type, sum); + + /* change the current log to the next block addr in advance */ + if (next_segno != segno) { + curseg->next_segno = next_segno; + change_curseg(sbi, type, true); + } + curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) & + (sbi->blocks_per_seg - 1); + + /* rewrite node page */ + set_page_writeback(page); + f2fs_submit_page_mbio(sbi, page, new_blkaddr, &fio); + f2fs_submit_merged_bio(sbi, NODE, WRITE); + refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); + + locate_dirty_segment(sbi, old_cursegno); + locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); + + mutex_unlock(&sit_i->sentry_lock); + mutex_unlock(&curseg->curseg_mutex); +} + +void f2fs_wait_on_page_writeback(struct page *page, + enum page_type type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); + if (PageWriteback(page)) { + f2fs_submit_merged_bio(sbi, type, WRITE); + wait_on_page_writeback(page); + } +} + +static int read_compacted_summaries(struct f2fs_sb_info *sbi) +{ + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct curseg_info *seg_i; + unsigned char *kaddr; + struct page *page; + block_t start; + int i, j, offset; + + start = start_sum_block(sbi); + + page = get_meta_page(sbi, start++); + kaddr = (unsigned char *)page_address(page); + + /* Step 1: restore nat cache */ + seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); + memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); + + /* Step 2: restore sit cache */ + seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); + memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, + SUM_JOURNAL_SIZE); + offset = 2 * SUM_JOURNAL_SIZE; + + /* Step 3: restore summary entries */ + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { + unsigned short blk_off; + unsigned int segno; + + seg_i = CURSEG_I(sbi, i); + segno = le32_to_cpu(ckpt->cur_data_segno[i]); + blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); + seg_i->next_segno = segno; + reset_curseg(sbi, i, 0); + seg_i->alloc_type = ckpt->alloc_type[i]; + seg_i->next_blkoff = blk_off; + + if (seg_i->alloc_type == SSR) + blk_off = sbi->blocks_per_seg; + + for (j = 0; j < blk_off; j++) { + struct f2fs_summary *s; + s = (struct f2fs_summary *)(kaddr + offset); + seg_i->sum_blk->entries[j] = *s; + offset += SUMMARY_SIZE; + if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - + SUM_FOOTER_SIZE) + continue; + + f2fs_put_page(page, 1); + page = NULL; + + page = get_meta_page(sbi, start++); + kaddr = (unsigned char *)page_address(page); + offset = 0; + } + } + f2fs_put_page(page, 1); + return 0; +} + +static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) +{ + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct f2fs_summary_block *sum; + struct curseg_info *curseg; + struct page *new; + unsigned short blk_off; + unsigned int segno = 0; + block_t blk_addr = 0; + + /* get segment number and block addr */ + if (IS_DATASEG(type)) { + segno = le32_to_cpu(ckpt->cur_data_segno[type]); + blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - + CURSEG_HOT_DATA]); + if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) + blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); + else + blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); + } else { + segno = le32_to_cpu(ckpt->cur_node_segno[type - + CURSEG_HOT_NODE]); + blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - + CURSEG_HOT_NODE]); + if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) + blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, + type - CURSEG_HOT_NODE); + else + blk_addr = GET_SUM_BLOCK(sbi, segno); + } + + new = get_meta_page(sbi, blk_addr); + sum = (struct f2fs_summary_block *)page_address(new); + + if (IS_NODESEG(type)) { + if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { + struct f2fs_summary *ns = &sum->entries[0]; + int i; + for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { + ns->version = 0; + ns->ofs_in_node = 0; + } + } else { + if (restore_node_summary(sbi, segno, sum)) { + f2fs_put_page(new, 1); + return -EINVAL; + } + } + } + + /* set uncompleted segment to curseg */ + curseg = CURSEG_I(sbi, type); + mutex_lock(&curseg->curseg_mutex); + memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); + curseg->next_segno = segno; + reset_curseg(sbi, type, 0); + curseg->alloc_type = ckpt->alloc_type[type]; + curseg->next_blkoff = blk_off; + mutex_unlock(&curseg->curseg_mutex); + f2fs_put_page(new, 1); + return 0; +} + +static int restore_curseg_summaries(struct f2fs_sb_info *sbi) +{ + int type = CURSEG_HOT_DATA; + + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { + /* restore for compacted data summary */ + if (read_compacted_summaries(sbi)) + return -EINVAL; + type = CURSEG_HOT_NODE; + } + + for (; type <= CURSEG_COLD_NODE; type++) + if (read_normal_summaries(sbi, type)) + return -EINVAL; + return 0; +} + +static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) +{ + struct page *page; + unsigned char *kaddr; + struct f2fs_summary *summary; + struct curseg_info *seg_i; + int written_size = 0; + int i, j; + + page = grab_meta_page(sbi, blkaddr++); + kaddr = (unsigned char *)page_address(page); + + /* Step 1: write nat cache */ + seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); + memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE); + written_size += SUM_JOURNAL_SIZE; + + /* Step 2: write sit cache */ + seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); + memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, + SUM_JOURNAL_SIZE); + written_size += SUM_JOURNAL_SIZE; + + /* Step 3: write summary entries */ + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { + unsigned short blkoff; + seg_i = CURSEG_I(sbi, i); + if (sbi->ckpt->alloc_type[i] == SSR) + blkoff = sbi->blocks_per_seg; + else + blkoff = curseg_blkoff(sbi, i); + + for (j = 0; j < blkoff; j++) { + if (!page) { + page = grab_meta_page(sbi, blkaddr++); + kaddr = (unsigned char *)page_address(page); + written_size = 0; + } + summary = (struct f2fs_summary *)(kaddr + written_size); + *summary = seg_i->sum_blk->entries[j]; + written_size += SUMMARY_SIZE; + + if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - + SUM_FOOTER_SIZE) + continue; + + set_page_dirty(page); + f2fs_put_page(page, 1); + page = NULL; + } + } + if (page) { + set_page_dirty(page); + f2fs_put_page(page, 1); + } +} + +static void write_normal_summaries(struct f2fs_sb_info *sbi, + block_t blkaddr, int type) +{ + int i, end; + if (IS_DATASEG(type)) + end = type + NR_CURSEG_DATA_TYPE; + else + end = type + NR_CURSEG_NODE_TYPE; + + for (i = type; i < end; i++) { + struct curseg_info *sum = CURSEG_I(sbi, i); + mutex_lock(&sum->curseg_mutex); + write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type)); + mutex_unlock(&sum->curseg_mutex); + } +} + +void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) +{ + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) + write_compacted_summaries(sbi, start_blk); + else + write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); +} + +void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) +{ + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) + write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); +} + +int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, + unsigned int val, int alloc) +{ + int i; + + if (type == NAT_JOURNAL) { + for (i = 0; i < nats_in_cursum(sum); i++) { + if (le32_to_cpu(nid_in_journal(sum, i)) == val) + return i; + } + if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) + return update_nats_in_cursum(sum, 1); + } else if (type == SIT_JOURNAL) { + for (i = 0; i < sits_in_cursum(sum); i++) + if (le32_to_cpu(segno_in_journal(sum, i)) == val) + return i; + if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES) + return update_sits_in_cursum(sum, 1); + } + return -1; +} + +static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); + block_t blk_addr = sit_i->sit_base_addr + offset; + + check_seg_range(sbi, segno); + + /* calculate sit block address */ + if (f2fs_test_bit(offset, sit_i->sit_bitmap)) + blk_addr += sit_i->sit_blocks; + + return get_meta_page(sbi, blk_addr); +} + +static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, + unsigned int start) +{ + struct sit_info *sit_i = SIT_I(sbi); + struct page *src_page, *dst_page; + pgoff_t src_off, dst_off; + void *src_addr, *dst_addr; + + src_off = current_sit_addr(sbi, start); + dst_off = next_sit_addr(sbi, src_off); + + /* get current sit block page without lock */ + src_page = get_meta_page(sbi, src_off); + dst_page = grab_meta_page(sbi, dst_off); + f2fs_bug_on(PageDirty(src_page)); + + src_addr = page_address(src_page); + dst_addr = page_address(dst_page); + memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); + + set_page_dirty(dst_page); + f2fs_put_page(src_page, 1); + + set_to_next_sit(sit_i, start); + + return dst_page; +} + +static bool flush_sits_in_journal(struct f2fs_sb_info *sbi) +{ + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + int i; + + /* + * If the journal area in the current summary is full of sit entries, + * all the sit entries will be flushed. Otherwise the sit entries + * are not able to replace with newly hot sit entries. + */ + if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) { + for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { + unsigned int segno; + segno = le32_to_cpu(segno_in_journal(sum, i)); + __mark_sit_entry_dirty(sbi, segno); + } + update_sits_in_cursum(sum, -sits_in_cursum(sum)); + return true; + } + return false; +} + +/* + * CP calls this function, which flushes SIT entries including sit_journal, + * and moves prefree segs to free segs. + */ +void flush_sit_entries(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned long *bitmap = sit_i->dirty_sentries_bitmap; + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + unsigned long nsegs = TOTAL_SEGS(sbi); + struct page *page = NULL; + struct f2fs_sit_block *raw_sit = NULL; + unsigned int start = 0, end = 0; + unsigned int segno = -1; + bool flushed; + + mutex_lock(&curseg->curseg_mutex); + mutex_lock(&sit_i->sentry_lock); + + /* + * "flushed" indicates whether sit entries in journal are flushed + * to the SIT area or not. + */ + flushed = flush_sits_in_journal(sbi); + + while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) { + struct seg_entry *se = get_seg_entry(sbi, segno); + int sit_offset, offset; + + sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); + + /* add discard candidates */ + if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards) + add_discard_addrs(sbi, segno, se); + + if (flushed) + goto to_sit_page; + + offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1); + if (offset >= 0) { + segno_in_journal(sum, offset) = cpu_to_le32(segno); + seg_info_to_raw_sit(se, &sit_in_journal(sum, offset)); + goto flush_done; + } +to_sit_page: + if (!page || (start > segno) || (segno > end)) { + if (page) { + f2fs_put_page(page, 1); + page = NULL; + } + + start = START_SEGNO(sit_i, segno); + end = start + SIT_ENTRY_PER_BLOCK - 1; + + /* read sit block that will be updated */ + page = get_next_sit_page(sbi, start); + raw_sit = page_address(page); + } + + /* udpate entry in SIT block */ + seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); +flush_done: + __clear_bit(segno, bitmap); + sit_i->dirty_sentries--; + } + mutex_unlock(&sit_i->sentry_lock); + mutex_unlock(&curseg->curseg_mutex); + + /* writeout last modified SIT block */ + f2fs_put_page(page, 1); + + set_prefree_as_free_segments(sbi); +} + +static int build_sit_info(struct f2fs_sb_info *sbi) +{ + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct sit_info *sit_i; + unsigned int sit_segs, start; + char *src_bitmap, *dst_bitmap; + unsigned int bitmap_size; + + /* allocate memory for SIT information */ + sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); + if (!sit_i) + return -ENOMEM; + + SM_I(sbi)->sit_info = sit_i; + + sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry)); + if (!sit_i->sentries) + return -ENOMEM; + + bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); + sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!sit_i->dirty_sentries_bitmap) + return -ENOMEM; + + for (start = 0; start < TOTAL_SEGS(sbi); start++) { + sit_i->sentries[start].cur_valid_map + = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); + sit_i->sentries[start].ckpt_valid_map + = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); + if (!sit_i->sentries[start].cur_valid_map + || !sit_i->sentries[start].ckpt_valid_map) + return -ENOMEM; + } + + if (sbi->segs_per_sec > 1) { + sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) * + sizeof(struct sec_entry)); + if (!sit_i->sec_entries) + return -ENOMEM; + } + + /* get information related with SIT */ + sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; + + /* setup SIT bitmap from ckeckpoint pack */ + bitmap_size = __bitmap_size(sbi, SIT_BITMAP); + src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); + + dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); + if (!dst_bitmap) + return -ENOMEM; + + /* init SIT information */ + sit_i->s_ops = &default_salloc_ops; + + sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); + sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; + sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); + sit_i->sit_bitmap = dst_bitmap; + sit_i->bitmap_size = bitmap_size; + sit_i->dirty_sentries = 0; + sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; + sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); + sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; + mutex_init(&sit_i->sentry_lock); + return 0; +} + +static int build_free_segmap(struct f2fs_sb_info *sbi) +{ + struct f2fs_sm_info *sm_info = SM_I(sbi); + struct free_segmap_info *free_i; + unsigned int bitmap_size, sec_bitmap_size; + + /* allocate memory for free segmap information */ + free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); + if (!free_i) + return -ENOMEM; + + SM_I(sbi)->free_info = free_i; + + bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); + free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); + if (!free_i->free_segmap) + return -ENOMEM; + + sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); + free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); + if (!free_i->free_secmap) + return -ENOMEM; + + /* set all segments as dirty temporarily */ + memset(free_i->free_segmap, 0xff, bitmap_size); + memset(free_i->free_secmap, 0xff, sec_bitmap_size); + + /* init free segmap information */ + free_i->start_segno = + (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr); + free_i->free_segments = 0; + free_i->free_sections = 0; + rwlock_init(&free_i->segmap_lock); + return 0; +} + +static int build_curseg(struct f2fs_sb_info *sbi) +{ + struct curseg_info *array; + int i; + + array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL); + if (!array) + return -ENOMEM; + + SM_I(sbi)->curseg_array = array; + + for (i = 0; i < NR_CURSEG_TYPE; i++) { + mutex_init(&array[i].curseg_mutex); + array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + if (!array[i].sum_blk) + return -ENOMEM; + array[i].segno = NULL_SEGNO; + array[i].next_blkoff = 0; + } + return restore_curseg_summaries(sbi); +} + +static int ra_sit_pages(struct f2fs_sb_info *sbi, int start, int nrpages) +{ + struct address_space *mapping = META_MAPPING(sbi); + struct page *page; + block_t blk_addr, prev_blk_addr = 0; + int sit_blk_cnt = SIT_BLK_CNT(sbi); + int blkno = start; + struct f2fs_io_info fio = { + .type = META, + .rw = READ_SYNC | REQ_META | REQ_PRIO + }; + + for (; blkno < start + nrpages && blkno < sit_blk_cnt; blkno++) { + + blk_addr = current_sit_addr(sbi, blkno * SIT_ENTRY_PER_BLOCK); + + if (blkno != start && prev_blk_addr + 1 != blk_addr) + break; + prev_blk_addr = blk_addr; +repeat: + page = grab_cache_page(mapping, blk_addr); + if (!page) { + cond_resched(); + goto repeat; + } + if (PageUptodate(page)) { + mark_page_accessed(page); + f2fs_put_page(page, 1); + continue; + } + + f2fs_submit_page_mbio(sbi, page, blk_addr, &fio); + + mark_page_accessed(page); + f2fs_put_page(page, 0); + } + + f2fs_submit_merged_bio(sbi, META, READ); + return blkno - start; +} + +static void build_sit_entries(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + int sit_blk_cnt = SIT_BLK_CNT(sbi); + unsigned int i, start, end; + unsigned int readed, start_blk = 0; + int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); + + do { + readed = ra_sit_pages(sbi, start_blk, nrpages); + + start = start_blk * sit_i->sents_per_block; + end = (start_blk + readed) * sit_i->sents_per_block; + + for (; start < end && start < TOTAL_SEGS(sbi); start++) { + struct seg_entry *se = &sit_i->sentries[start]; + struct f2fs_sit_block *sit_blk; + struct f2fs_sit_entry sit; + struct page *page; + + mutex_lock(&curseg->curseg_mutex); + for (i = 0; i < sits_in_cursum(sum); i++) { + if (le32_to_cpu(segno_in_journal(sum, i)) + == start) { + sit = sit_in_journal(sum, i); + mutex_unlock(&curseg->curseg_mutex); + goto got_it; + } + } + mutex_unlock(&curseg->curseg_mutex); + + page = get_current_sit_page(sbi, start); + sit_blk = (struct f2fs_sit_block *)page_address(page); + sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; + f2fs_put_page(page, 1); +got_it: + check_block_count(sbi, start, &sit); + seg_info_from_raw_sit(se, &sit); + if (sbi->segs_per_sec > 1) { + struct sec_entry *e = get_sec_entry(sbi, start); + e->valid_blocks += se->valid_blocks; + } + } + start_blk += readed; + } while (start_blk < sit_blk_cnt); +} + +static void init_free_segmap(struct f2fs_sb_info *sbi) +{ + unsigned int start; + int type; + + for (start = 0; start < TOTAL_SEGS(sbi); start++) { + struct seg_entry *sentry = get_seg_entry(sbi, start); + if (!sentry->valid_blocks) + __set_free(sbi, start); + } + + /* set use the current segments */ + for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { + struct curseg_info *curseg_t = CURSEG_I(sbi, type); + __set_test_and_inuse(sbi, curseg_t->segno); + } +} + +static void init_dirty_segmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi); + unsigned short valid_blocks; + + while (1) { + /* find dirty segment based on free segmap */ + segno = find_next_inuse(free_i, total_segs, offset); + if (segno >= total_segs) + break; + offset = segno + 1; + valid_blocks = get_valid_blocks(sbi, segno, 0); + if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks) + continue; + mutex_lock(&dirty_i->seglist_lock); + __locate_dirty_segment(sbi, segno, DIRTY); + mutex_unlock(&dirty_i->seglist_lock); + } +} + +static int init_victim_secmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi)); + + dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!dirty_i->victim_secmap) + return -ENOMEM; + return 0; +} + +static int build_dirty_segmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i; + unsigned int bitmap_size, i; + + /* allocate memory for dirty segments list information */ + dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); + if (!dirty_i) + return -ENOMEM; + + SM_I(sbi)->dirty_info = dirty_i; + mutex_init(&dirty_i->seglist_lock); + + bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); + + for (i = 0; i < NR_DIRTY_TYPE; i++) { + dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); + if (!dirty_i->dirty_segmap[i]) + return -ENOMEM; + } + + init_dirty_segmap(sbi); + return init_victim_secmap(sbi); +} + +/* + * Update min, max modified time for cost-benefit GC algorithm + */ +static void init_min_max_mtime(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned int segno; + + mutex_lock(&sit_i->sentry_lock); + + sit_i->min_mtime = LLONG_MAX; + + for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) { + unsigned int i; + unsigned long long mtime = 0; + + for (i = 0; i < sbi->segs_per_sec; i++) + mtime += get_seg_entry(sbi, segno + i)->mtime; + + mtime = div_u64(mtime, sbi->segs_per_sec); + + if (sit_i->min_mtime > mtime) + sit_i->min_mtime = mtime; + } + sit_i->max_mtime = get_mtime(sbi); + mutex_unlock(&sit_i->sentry_lock); +} + +int build_segment_manager(struct f2fs_sb_info *sbi) +{ + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct f2fs_sm_info *sm_info; + int err; + + sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); + if (!sm_info) + return -ENOMEM; + + /* init sm info */ + sbi->sm_info = sm_info; + INIT_LIST_HEAD(&sm_info->wblist_head); + spin_lock_init(&sm_info->wblist_lock); + sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); + sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); + sm_info->segment_count = le32_to_cpu(raw_super->segment_count); + sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); + sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); + sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); + sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); + sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS; + sm_info->ipu_policy = F2FS_IPU_DISABLE; + sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; + + INIT_LIST_HEAD(&sm_info->discard_list); + sm_info->nr_discards = 0; + sm_info->max_discards = 0; + + err = build_sit_info(sbi); + if (err) + return err; + err = build_free_segmap(sbi); + if (err) + return err; + err = build_curseg(sbi); + if (err) + return err; + + /* reinit free segmap based on SIT */ + build_sit_entries(sbi); + + init_free_segmap(sbi); + err = build_dirty_segmap(sbi); + if (err) + return err; + + init_min_max_mtime(sbi); + return 0; +} + +static void discard_dirty_segmap(struct f2fs_sb_info *sbi, + enum dirty_type dirty_type) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + + mutex_lock(&dirty_i->seglist_lock); + kfree(dirty_i->dirty_segmap[dirty_type]); + dirty_i->nr_dirty[dirty_type] = 0; + mutex_unlock(&dirty_i->seglist_lock); +} + +static void destroy_victim_secmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + kfree(dirty_i->victim_secmap); +} + +static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + int i; + + if (!dirty_i) + return; + + /* discard pre-free/dirty segments list */ + for (i = 0; i < NR_DIRTY_TYPE; i++) + discard_dirty_segmap(sbi, i); + + destroy_victim_secmap(sbi); + SM_I(sbi)->dirty_info = NULL; + kfree(dirty_i); +} + +static void destroy_curseg(struct f2fs_sb_info *sbi) +{ + struct curseg_info *array = SM_I(sbi)->curseg_array; + int i; + + if (!array) + return; + SM_I(sbi)->curseg_array = NULL; + for (i = 0; i < NR_CURSEG_TYPE; i++) + kfree(array[i].sum_blk); + kfree(array); +} + +static void destroy_free_segmap(struct f2fs_sb_info *sbi) +{ + struct free_segmap_info *free_i = SM_I(sbi)->free_info; + if (!free_i) + return; + SM_I(sbi)->free_info = NULL; + kfree(free_i->free_segmap); + kfree(free_i->free_secmap); + kfree(free_i); +} + +static void destroy_sit_info(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned int start; + + if (!sit_i) + return; + + if (sit_i->sentries) { + for (start = 0; start < TOTAL_SEGS(sbi); start++) { + kfree(sit_i->sentries[start].cur_valid_map); + kfree(sit_i->sentries[start].ckpt_valid_map); + } + } + vfree(sit_i->sentries); + vfree(sit_i->sec_entries); + kfree(sit_i->dirty_sentries_bitmap); + + SM_I(sbi)->sit_info = NULL; + kfree(sit_i->sit_bitmap); + kfree(sit_i); +} + +void destroy_segment_manager(struct f2fs_sb_info *sbi) +{ + struct f2fs_sm_info *sm_info = SM_I(sbi); + if (!sm_info) + return; + destroy_dirty_segmap(sbi); + destroy_curseg(sbi); + destroy_free_segmap(sbi); + destroy_sit_info(sbi); + sbi->sm_info = NULL; + kfree(sm_info); +} + +int __init create_segment_manager_caches(void) +{ + discard_entry_slab = f2fs_kmem_cache_create("discard_entry", + sizeof(struct discard_entry), NULL); + if (!discard_entry_slab) + return -ENOMEM; + return 0; +} + +void destroy_segment_manager_caches(void) +{ + kmem_cache_destroy(discard_entry_slab); +} diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h new file mode 100644 index 0000000000000..5731682d7516a --- /dev/null +++ b/fs/f2fs/segment.h @@ -0,0 +1,684 @@ +/* + * fs/f2fs/segment.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +/* constant macro */ +#define NULL_SEGNO ((unsigned int)(~0)) +#define NULL_SECNO ((unsigned int)(~0)) + +#define DEF_RECLAIM_PREFREE_SEGMENTS 100 /* 200MB of prefree segments */ + +/* L: Logical segment # in volume, R: Relative segment # in main area */ +#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno) +#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno) + +#define IS_DATASEG(t) (t <= CURSEG_COLD_DATA) +#define IS_NODESEG(t) (t >= CURSEG_HOT_NODE) + +#define IS_CURSEG(sbi, seg) \ + ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ + (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ + (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ + (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ + (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ + (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno)) + +#define IS_CURSEC(sbi, secno) \ + ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ + sbi->segs_per_sec) || \ + (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ + sbi->segs_per_sec) || \ + (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ + sbi->segs_per_sec) || \ + (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ + sbi->segs_per_sec) || \ + (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ + sbi->segs_per_sec) || \ + (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ + sbi->segs_per_sec)) \ + +#define START_BLOCK(sbi, segno) \ + (SM_I(sbi)->seg0_blkaddr + \ + (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg)) +#define NEXT_FREE_BLKADDR(sbi, curseg) \ + (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff) + +#define MAIN_BASE_BLOCK(sbi) (SM_I(sbi)->main_blkaddr) + +#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) \ + ((blk_addr) - SM_I(sbi)->seg0_blkaddr) +#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ + (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg) +#define GET_SEGNO(sbi, blk_addr) \ + (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \ + NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ + GET_SEGNO_FROM_SEG0(sbi, blk_addr))) +#define GET_SECNO(sbi, segno) \ + ((segno) / sbi->segs_per_sec) +#define GET_ZONENO_FROM_SEGNO(sbi, segno) \ + ((segno / sbi->segs_per_sec) / sbi->secs_per_zone) + +#define GET_SUM_BLOCK(sbi, segno) \ + ((sbi->sm_info->ssa_blkaddr) + segno) + +#define GET_SUM_TYPE(footer) ((footer)->entry_type) +#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type) + +#define SIT_ENTRY_OFFSET(sit_i, segno) \ + (segno % sit_i->sents_per_block) +#define SIT_BLOCK_OFFSET(sit_i, segno) \ + (segno / SIT_ENTRY_PER_BLOCK) +#define START_SEGNO(sit_i, segno) \ + (SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK) +#define SIT_BLK_CNT(sbi) \ + ((TOTAL_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK) +#define f2fs_bitmap_size(nr) \ + (BITS_TO_LONGS(nr) * sizeof(unsigned long)) +#define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments) +#define TOTAL_SECS(sbi) (sbi->total_sections) + +#define SECTOR_FROM_BLOCK(sbi, blk_addr) \ + (((sector_t)blk_addr) << (sbi)->log_sectors_per_block) +#define SECTOR_TO_BLOCK(sbi, sectors) \ + (sectors >> (sbi)->log_sectors_per_block) +#define MAX_BIO_BLOCKS(max_hw_blocks) \ + (min((int)max_hw_blocks, BIO_MAX_PAGES)) + +/* + * indicate a block allocation direction: RIGHT and LEFT. + * RIGHT means allocating new sections towards the end of volume. + * LEFT means the opposite direction. + */ +enum { + ALLOC_RIGHT = 0, + ALLOC_LEFT +}; + +/* + * In the victim_sel_policy->alloc_mode, there are two block allocation modes. + * LFS writes data sequentially with cleaning operations. + * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. + */ +enum { + LFS = 0, + SSR +}; + +/* + * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes. + * GC_CB is based on cost-benefit algorithm. + * GC_GREEDY is based on greedy algorithm. + */ +enum { + GC_CB = 0, + GC_GREEDY +}; + +/* + * BG_GC means the background cleaning job. + * FG_GC means the on-demand cleaning job. + */ +enum { + BG_GC = 0, + FG_GC +}; + +/* for a function parameter to select a victim segment */ +struct victim_sel_policy { + int alloc_mode; /* LFS or SSR */ + int gc_mode; /* GC_CB or GC_GREEDY */ + unsigned long *dirty_segmap; /* dirty segment bitmap */ + unsigned int max_search; /* maximum # of segments to search */ + unsigned int offset; /* last scanned bitmap offset */ + unsigned int ofs_unit; /* bitmap search unit */ + unsigned int min_cost; /* minimum cost */ + unsigned int min_segno; /* segment # having min. cost */ +}; + +struct seg_entry { + unsigned short valid_blocks; /* # of valid blocks */ + unsigned char *cur_valid_map; /* validity bitmap of blocks */ + /* + * # of valid blocks and the validity bitmap stored in the the last + * checkpoint pack. This information is used by the SSR mode. + */ + unsigned short ckpt_valid_blocks; + unsigned char *ckpt_valid_map; + unsigned char type; /* segment type like CURSEG_XXX_TYPE */ + unsigned long long mtime; /* modification time of the segment */ +}; + +struct sec_entry { + unsigned int valid_blocks; /* # of valid blocks in a section */ +}; + +struct segment_allocation { + void (*allocate_segment)(struct f2fs_sb_info *, int, bool); +}; + +struct sit_info { + const struct segment_allocation *s_ops; + + block_t sit_base_addr; /* start block address of SIT area */ + block_t sit_blocks; /* # of blocks used by SIT area */ + block_t written_valid_blocks; /* # of valid blocks in main area */ + char *sit_bitmap; /* SIT bitmap pointer */ + unsigned int bitmap_size; /* SIT bitmap size */ + + unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ + unsigned int dirty_sentries; /* # of dirty sentries */ + unsigned int sents_per_block; /* # of SIT entries per block */ + struct mutex sentry_lock; /* to protect SIT cache */ + struct seg_entry *sentries; /* SIT segment-level cache */ + struct sec_entry *sec_entries; /* SIT section-level cache */ + + /* for cost-benefit algorithm in cleaning procedure */ + unsigned long long elapsed_time; /* elapsed time after mount */ + unsigned long long mounted_time; /* mount time */ + unsigned long long min_mtime; /* min. modification time */ + unsigned long long max_mtime; /* max. modification time */ +}; + +struct free_segmap_info { + unsigned int start_segno; /* start segment number logically */ + unsigned int free_segments; /* # of free segments */ + unsigned int free_sections; /* # of free sections */ + rwlock_t segmap_lock; /* free segmap lock */ + unsigned long *free_segmap; /* free segment bitmap */ + unsigned long *free_secmap; /* free section bitmap */ +}; + +/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */ +enum dirty_type { + DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */ + DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */ + DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */ + DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */ + DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */ + DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */ + DIRTY, /* to count # of dirty segments */ + PRE, /* to count # of entirely obsolete segments */ + NR_DIRTY_TYPE +}; + +struct dirty_seglist_info { + const struct victim_selection *v_ops; /* victim selction operation */ + unsigned long *dirty_segmap[NR_DIRTY_TYPE]; + struct mutex seglist_lock; /* lock for segment bitmaps */ + int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ + unsigned long *victim_secmap; /* background GC victims */ +}; + +/* victim selection function for cleaning and SSR */ +struct victim_selection { + int (*get_victim)(struct f2fs_sb_info *, unsigned int *, + int, int, char); +}; + +/* for active log information */ +struct curseg_info { + struct mutex curseg_mutex; /* lock for consistency */ + struct f2fs_summary_block *sum_blk; /* cached summary block */ + unsigned char alloc_type; /* current allocation type */ + unsigned int segno; /* current segment number */ + unsigned short next_blkoff; /* next block offset to write */ + unsigned int zone; /* current zone number */ + unsigned int next_segno; /* preallocated segment */ +}; + +/* + * inline functions + */ +static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) +{ + return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); +} + +static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + struct sit_info *sit_i = SIT_I(sbi); + return &sit_i->sentries[segno]; +} + +static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + struct sit_info *sit_i = SIT_I(sbi); + return &sit_i->sec_entries[GET_SECNO(sbi, segno)]; +} + +static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, + unsigned int segno, int section) +{ + /* + * In order to get # of valid blocks in a section instantly from many + * segments, f2fs manages two counting structures separately. + */ + if (section > 1) + return get_sec_entry(sbi, segno)->valid_blocks; + else + return get_seg_entry(sbi, segno)->valid_blocks; +} + +static inline void seg_info_from_raw_sit(struct seg_entry *se, + struct f2fs_sit_entry *rs) +{ + se->valid_blocks = GET_SIT_VBLOCKS(rs); + se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); + memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); + memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); + se->type = GET_SIT_TYPE(rs); + se->mtime = le64_to_cpu(rs->mtime); +} + +static inline void seg_info_to_raw_sit(struct seg_entry *se, + struct f2fs_sit_entry *rs) +{ + unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | + se->valid_blocks; + rs->vblocks = cpu_to_le16(raw_vblocks); + memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); + memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); + se->ckpt_valid_blocks = se->valid_blocks; + rs->mtime = cpu_to_le64(se->mtime); +} + +static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, + unsigned int max, unsigned int segno) +{ + unsigned int ret; + read_lock(&free_i->segmap_lock); + ret = find_next_bit(free_i->free_segmap, max, segno); + read_unlock(&free_i->segmap_lock); + return ret; +} + +static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) +{ + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int secno = segno / sbi->segs_per_sec; + unsigned int start_segno = secno * sbi->segs_per_sec; + unsigned int next; + + write_lock(&free_i->segmap_lock); + clear_bit(segno, free_i->free_segmap); + free_i->free_segments++; + + next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), start_segno); + if (next >= start_segno + sbi->segs_per_sec) { + clear_bit(secno, free_i->free_secmap); + free_i->free_sections++; + } + write_unlock(&free_i->segmap_lock); +} + +static inline void __set_inuse(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int secno = segno / sbi->segs_per_sec; + set_bit(segno, free_i->free_segmap); + free_i->free_segments--; + if (!test_and_set_bit(secno, free_i->free_secmap)) + free_i->free_sections--; +} + +static inline void __set_test_and_free(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int secno = segno / sbi->segs_per_sec; + unsigned int start_segno = secno * sbi->segs_per_sec; + unsigned int next; + + write_lock(&free_i->segmap_lock); + if (test_and_clear_bit(segno, free_i->free_segmap)) { + free_i->free_segments++; + + next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), + start_segno); + if (next >= start_segno + sbi->segs_per_sec) { + if (test_and_clear_bit(secno, free_i->free_secmap)) + free_i->free_sections++; + } + } + write_unlock(&free_i->segmap_lock); +} + +static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int secno = segno / sbi->segs_per_sec; + write_lock(&free_i->segmap_lock); + if (!test_and_set_bit(segno, free_i->free_segmap)) { + free_i->free_segments--; + if (!test_and_set_bit(secno, free_i->free_secmap)) + free_i->free_sections--; + } + write_unlock(&free_i->segmap_lock); +} + +static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, + void *dst_addr) +{ + struct sit_info *sit_i = SIT_I(sbi); + memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); +} + +static inline block_t written_block_count(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + block_t vblocks; + + mutex_lock(&sit_i->sentry_lock); + vblocks = sit_i->written_valid_blocks; + mutex_unlock(&sit_i->sentry_lock); + + return vblocks; +} + +static inline unsigned int free_segments(struct f2fs_sb_info *sbi) +{ + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int free_segs; + + read_lock(&free_i->segmap_lock); + free_segs = free_i->free_segments; + read_unlock(&free_i->segmap_lock); + + return free_segs; +} + +static inline int reserved_segments(struct f2fs_sb_info *sbi) +{ + return SM_I(sbi)->reserved_segments; +} + +static inline unsigned int free_sections(struct f2fs_sb_info *sbi) +{ + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int free_secs; + + read_lock(&free_i->segmap_lock); + free_secs = free_i->free_sections; + read_unlock(&free_i->segmap_lock); + + return free_secs; +} + +static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) +{ + return DIRTY_I(sbi)->nr_dirty[PRE]; +} + +static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi) +{ + return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] + + DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] + + DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] + + DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] + + DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] + + DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE]; +} + +static inline int overprovision_segments(struct f2fs_sb_info *sbi) +{ + return SM_I(sbi)->ovp_segments; +} + +static inline int overprovision_sections(struct f2fs_sb_info *sbi) +{ + return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec; +} + +static inline int reserved_sections(struct f2fs_sb_info *sbi) +{ + return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec; +} + +static inline bool need_SSR(struct f2fs_sb_info *sbi) +{ + return (prefree_segments(sbi) / sbi->segs_per_sec) + + free_sections(sbi) < overprovision_sections(sbi); +} + +static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) +{ + int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); + int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); + + if (unlikely(sbi->por_doing)) + return false; + + return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + + reserved_sections(sbi)); +} + +static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) +{ + return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments; +} + +static inline int utilization(struct f2fs_sb_info *sbi) +{ + return div_u64((u64)valid_user_blocks(sbi) * 100, + sbi->user_block_count); +} + +/* + * Sometimes f2fs may be better to drop out-of-place update policy. + * And, users can control the policy through sysfs entries. + * There are five policies with triggering conditions as follows. + * F2FS_IPU_FORCE - all the time, + * F2FS_IPU_SSR - if SSR mode is activated, + * F2FS_IPU_UTIL - if FS utilization is over threashold, + * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over + * threashold, + * F2FS_IPUT_DISABLE - disable IPU. (=default option) + */ +#define DEF_MIN_IPU_UTIL 70 + +enum { + F2FS_IPU_FORCE, + F2FS_IPU_SSR, + F2FS_IPU_UTIL, + F2FS_IPU_SSR_UTIL, + F2FS_IPU_DISABLE, +}; + +static inline bool need_inplace_update(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + + /* IPU can be done only for the user data */ + if (S_ISDIR(inode->i_mode)) + return false; + + switch (SM_I(sbi)->ipu_policy) { + case F2FS_IPU_FORCE: + return true; + case F2FS_IPU_SSR: + if (need_SSR(sbi)) + return true; + break; + case F2FS_IPU_UTIL: + if (utilization(sbi) > SM_I(sbi)->min_ipu_util) + return true; + break; + case F2FS_IPU_SSR_UTIL: + if (need_SSR(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util) + return true; + break; + case F2FS_IPU_DISABLE: + break; + } + return false; +} + +static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, + int type) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + return curseg->segno; +} + +static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi, + int type) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + return curseg->alloc_type; +} + +static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + return curseg->next_blkoff; +} + +#ifdef CONFIG_F2FS_CHECK_FS +static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) +{ + unsigned int end_segno = SM_I(sbi)->segment_count - 1; + BUG_ON(segno > end_segno); +} + +static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) +{ + struct f2fs_sm_info *sm_info = SM_I(sbi); + block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg; + block_t start_addr = sm_info->seg0_blkaddr; + block_t end_addr = start_addr + total_blks - 1; + BUG_ON(blk_addr < start_addr); + BUG_ON(blk_addr > end_addr); +} + +/* + * Summary block is always treated as invalid block + */ +static inline void check_block_count(struct f2fs_sb_info *sbi, + int segno, struct f2fs_sit_entry *raw_sit) +{ + struct f2fs_sm_info *sm_info = SM_I(sbi); + unsigned int end_segno = sm_info->segment_count - 1; + bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; + int valid_blocks = 0; + int cur_pos = 0, next_pos; + + /* check segment usage */ + BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg); + + /* check boundary of a given segment number */ + BUG_ON(segno > end_segno); + + /* check bitmap with valid block count */ + do { + if (is_valid) { + next_pos = find_next_zero_bit_le(&raw_sit->valid_map, + sbi->blocks_per_seg, + cur_pos); + valid_blocks += next_pos - cur_pos; + } else + next_pos = find_next_bit_le(&raw_sit->valid_map, + sbi->blocks_per_seg, + cur_pos); + cur_pos = next_pos; + is_valid = !is_valid; + } while (cur_pos < sbi->blocks_per_seg); + BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks); +} +#else +#define check_seg_range(sbi, segno) +#define verify_block_addr(sbi, blk_addr) +#define check_block_count(sbi, segno, raw_sit) +#endif + +static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, + unsigned int start) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned int offset = SIT_BLOCK_OFFSET(sit_i, start); + block_t blk_addr = sit_i->sit_base_addr + offset; + + check_seg_range(sbi, start); + + /* calculate sit block address */ + if (f2fs_test_bit(offset, sit_i->sit_bitmap)) + blk_addr += sit_i->sit_blocks; + + return blk_addr; +} + +static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi, + pgoff_t block_addr) +{ + struct sit_info *sit_i = SIT_I(sbi); + block_addr -= sit_i->sit_base_addr; + if (block_addr < sit_i->sit_blocks) + block_addr += sit_i->sit_blocks; + else + block_addr -= sit_i->sit_blocks; + + return block_addr + sit_i->sit_base_addr; +} + +static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) +{ + unsigned int block_off = SIT_BLOCK_OFFSET(sit_i, start); + + if (f2fs_test_bit(block_off, sit_i->sit_bitmap)) + f2fs_clear_bit(block_off, sit_i->sit_bitmap); + else + f2fs_set_bit(block_off, sit_i->sit_bitmap); +} + +static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec - + sit_i->mounted_time; +} + +static inline void set_summary(struct f2fs_summary *sum, nid_t nid, + unsigned int ofs_in_node, unsigned char version) +{ + sum->nid = cpu_to_le32(nid); + sum->ofs_in_node = cpu_to_le16(ofs_in_node); + sum->version = version; +} + +static inline block_t start_sum_block(struct f2fs_sb_info *sbi) +{ + return __start_cp_addr(sbi) + + le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); +} + +static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) +{ + return __start_cp_addr(sbi) + + le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count) + - (base + 1) + type; +} + +static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) +{ + if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) + return true; + return false; +} + +static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi) +{ + struct block_device *bdev = sbi->sb->s_bdev; + struct request_queue *q = bdev_get_queue(bdev); + return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q)); +} diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c new file mode 100644 index 0000000000000..3a3d3662021e1 --- /dev/null +++ b/fs/f2fs/super.c @@ -0,0 +1,1251 @@ +/* + * fs/f2fs/super.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "node.h" +#include "segment.h" +#include "xattr.h" +#include "gc.h" + +#define CREATE_TRACE_POINTS +#include + +static struct proc_dir_entry *f2fs_proc_root; +static struct kmem_cache *f2fs_inode_cachep; +static struct kset *f2fs_kset; + +enum { + Opt_gc_background, + Opt_disable_roll_forward, + Opt_discard, + Opt_noheap, + Opt_user_xattr, + Opt_nouser_xattr, + Opt_acl, + Opt_noacl, + Opt_active_logs, + Opt_disable_ext_identify, + Opt_inline_xattr, + Opt_android_emu, + Opt_err_continue, + Opt_err_panic, + Opt_err_recover, + Opt_inline_data, + Opt_err, +}; + +static match_table_t f2fs_tokens = { + {Opt_gc_background, "background_gc=%s"}, + {Opt_disable_roll_forward, "disable_roll_forward"}, + {Opt_discard, "discard"}, + {Opt_noheap, "no_heap"}, + {Opt_user_xattr, "user_xattr"}, + {Opt_nouser_xattr, "nouser_xattr"}, + {Opt_acl, "acl"}, + {Opt_noacl, "noacl"}, + {Opt_active_logs, "active_logs=%u"}, + {Opt_disable_ext_identify, "disable_ext_identify"}, + {Opt_inline_xattr, "inline_xattr"}, + {Opt_android_emu, "android_emu=%s"}, + {Opt_err_continue, "errors=continue"}, + {Opt_err_panic, "errors=panic"}, + {Opt_err_recover, "errors=recover"}, + {Opt_inline_data, "inline_data"}, + {Opt_err, NULL}, +}; + +/* Sysfs support for f2fs */ +enum { + GC_THREAD, /* struct f2fs_gc_thread */ + SM_INFO, /* struct f2fs_sm_info */ + F2FS_SBI, /* struct f2fs_sb_info */ +}; + +struct f2fs_attr { + struct attribute attr; + ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *); + ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *, + const char *, size_t); + int struct_type; + int offset; +}; + +static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) +{ + if (struct_type == GC_THREAD) + return (unsigned char *)sbi->gc_thread; + else if (struct_type == SM_INFO) + return (unsigned char *)SM_I(sbi); + else if (struct_type == F2FS_SBI) + return (unsigned char *)sbi; + return NULL; +} + +static ssize_t f2fs_sbi_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + unsigned char *ptr = NULL; + unsigned int *ui; + + ptr = __struct_ptr(sbi, a->struct_type); + if (!ptr) + return -EINVAL; + + ui = (unsigned int *)(ptr + a->offset); + + return snprintf(buf, PAGE_SIZE, "%u\n", *ui); +} + +static ssize_t f2fs_sbi_store(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, + const char *buf, size_t count) +{ + unsigned char *ptr; + unsigned long t; + unsigned int *ui; + ssize_t ret; + + ptr = __struct_ptr(sbi, a->struct_type); + if (!ptr) + return -EINVAL; + + ui = (unsigned int *)(ptr + a->offset); + + ret = kstrtoul(skip_spaces(buf), 0, &t); + if (ret < 0) + return ret; + *ui = t; + return count; +} + +static ssize_t f2fs_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info, + s_kobj); + struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr); + + return a->show ? a->show(a, sbi, buf) : 0; +} + +static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t len) +{ + struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info, + s_kobj); + struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr); + + return a->store ? a->store(a, sbi, buf, len) : 0; +} + +static void f2fs_sb_release(struct kobject *kobj) +{ + struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info, + s_kobj); + complete(&sbi->s_kobj_unregister); +} + +#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \ +static struct f2fs_attr f2fs_attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ + .struct_type = _struct_type, \ + .offset = _offset \ +} + +#define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \ + F2FS_ATTR_OFFSET(struct_type, name, 0644, \ + f2fs_sbi_show, f2fs_sbi_store, \ + offsetof(struct struct_name, elname)) + +F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time); +F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time); +F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time); +F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle); +F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments); +F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards); +F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy); +F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search); + +#define ATTR_LIST(name) (&f2fs_attr_##name.attr) +static struct attribute *f2fs_attrs[] = { + ATTR_LIST(gc_min_sleep_time), + ATTR_LIST(gc_max_sleep_time), + ATTR_LIST(gc_no_gc_sleep_time), + ATTR_LIST(gc_idle), + ATTR_LIST(reclaim_segments), + ATTR_LIST(max_small_discards), + ATTR_LIST(ipu_policy), + ATTR_LIST(min_ipu_util), + ATTR_LIST(max_victim_search), + NULL, +}; + +static const struct sysfs_ops f2fs_attr_ops = { + .show = f2fs_attr_show, + .store = f2fs_attr_store, +}; + +static struct kobj_type f2fs_ktype = { + .default_attrs = f2fs_attrs, + .sysfs_ops = &f2fs_attr_ops, + .release = f2fs_sb_release, +}; + +void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); + va_end(args); +} + +static void init_once(void *foo) +{ + struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; + + inode_init_once(&fi->vfs_inode); +} + +static int parse_android_emu(struct f2fs_sb_info *sbi, char *args) +{ + char *sep = args; + char *sepres; + int ret; + + if (!sep) + return -EINVAL; + + sepres = strsep(&sep, ":"); + if (!sep) + return -EINVAL; + ret = kstrtou32(sepres, 0, &sbi->android_emu_uid); + if (ret) + return ret; + + sepres = strsep(&sep, ":"); + if (!sep) + return -EINVAL; + ret = kstrtou32(sepres, 0, &sbi->android_emu_gid); + if (ret) + return ret; + + sepres = strsep(&sep, ":"); + ret = kstrtou16(sepres, 8, &sbi->android_emu_mode); + if (ret) + return ret; + + if (sep && strstr(sep, "nocase")) + sbi->android_emu_flags = F2FS_ANDROID_EMU_NOCASE; + + return 0; +} + +static int parse_options(struct super_block *sb, char *options) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + substring_t args[MAX_OPT_ARGS]; + char *p, *name; + int arg = 0; + + if (!options) + return 0; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + if (!*p) + continue; + /* + * Initialize args struct so we know whether arg was + * found; some options take optional arguments. + */ + args[0].to = args[0].from = NULL; + token = match_token(p, f2fs_tokens, args); + + switch (token) { + case Opt_gc_background: + name = match_strdup(&args[0]); + + if (!name) + return -ENOMEM; + if (!strncmp(name, "on", 2)) + set_opt(sbi, BG_GC); + else if (!strncmp(name, "off", 3)) + clear_opt(sbi, BG_GC); + else { + kfree(name); + return -EINVAL; + } + kfree(name); + break; + case Opt_disable_roll_forward: + set_opt(sbi, DISABLE_ROLL_FORWARD); + break; + case Opt_discard: + set_opt(sbi, DISCARD); + break; + case Opt_noheap: + set_opt(sbi, NOHEAP); + break; +#ifdef CONFIG_F2FS_FS_XATTR + case Opt_user_xattr: + set_opt(sbi, XATTR_USER); + break; + case Opt_nouser_xattr: + clear_opt(sbi, XATTR_USER); + break; + case Opt_inline_xattr: + set_opt(sbi, INLINE_XATTR); + break; +#else + case Opt_user_xattr: + f2fs_msg(sb, KERN_INFO, + "user_xattr options not supported"); + break; + case Opt_nouser_xattr: + f2fs_msg(sb, KERN_INFO, + "nouser_xattr options not supported"); + break; + case Opt_inline_xattr: + f2fs_msg(sb, KERN_INFO, + "inline_xattr options not supported"); + break; +#endif +#ifdef CONFIG_F2FS_FS_POSIX_ACL + case Opt_acl: + set_opt(sbi, POSIX_ACL); + break; + case Opt_noacl: + clear_opt(sbi, POSIX_ACL); + break; +#else + case Opt_acl: + f2fs_msg(sb, KERN_INFO, "acl options not supported"); + break; + case Opt_noacl: + f2fs_msg(sb, KERN_INFO, "noacl options not supported"); + break; +#endif + case Opt_active_logs: + if (args->from && match_int(args, &arg)) + return -EINVAL; + if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) + return -EINVAL; + sbi->active_logs = arg; + break; + case Opt_disable_ext_identify: + set_opt(sbi, DISABLE_EXT_IDENTIFY); + break; + case Opt_err_continue: + clear_opt(sbi, ERRORS_RECOVER); + clear_opt(sbi, ERRORS_PANIC); + break; + case Opt_err_panic: + set_opt(sbi, ERRORS_PANIC); + clear_opt(sbi, ERRORS_RECOVER); + break; + case Opt_err_recover: + set_opt(sbi, ERRORS_RECOVER); + clear_opt(sbi, ERRORS_PANIC); + break; + case Opt_android_emu: + if (args->from) { + int ret; + char *perms = match_strdup(args); + + ret = parse_android_emu(sbi, perms); + kfree(perms); + + if (ret) + return -EINVAL; + + set_opt(sbi, ANDROID_EMU); + } else + return -EINVAL; + break; + case Opt_inline_data: + set_opt(sbi, INLINE_DATA); + break; + default: + f2fs_msg(sb, KERN_ERR, + "Unrecognized mount option \"%s\" or missing value", + p); + return -EINVAL; + } + } + return 0; +} + +static struct inode *f2fs_alloc_inode(struct super_block *sb) +{ + struct f2fs_inode_info *fi; + + fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO); + if (!fi) + return NULL; + + init_once((void *) fi); + + /* Initialize f2fs-specific inode info */ + fi->vfs_inode.i_version = 1; + atomic_set(&fi->dirty_dents, 0); + fi->i_current_depth = 1; + fi->i_advise = 0; + rwlock_init(&fi->ext.ext_lock); + + set_inode_flag(fi, FI_NEW_INODE); + + if (test_opt(F2FS_SB(sb), INLINE_XATTR)) + set_inode_flag(fi, FI_INLINE_XATTR); + + return &fi->vfs_inode; +} + +static int f2fs_drop_inode(struct inode *inode) +{ + /* + * This is to avoid a deadlock condition like below. + * writeback_single_inode(inode) + * - f2fs_write_data_page + * - f2fs_gc -> iput -> evict + * - inode_wait_for_writeback(inode) + */ + if (!inode_unhashed(inode) && inode->i_state & I_SYNC) + return 0; + return generic_drop_inode(inode); +} + +/* + * f2fs_dirty_inode() is called from __mark_inode_dirty() + * + * We should call set_dirty_inode to write the dirty inode through write_inode. + */ +static void f2fs_dirty_inode(struct inode *inode, int flags) +{ + set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE); +} + +static void f2fs_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode)); +} + +static void f2fs_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, f2fs_i_callback); +} + +static void f2fs_put_super(struct super_block *sb) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + + if (sbi->s_proc) { + remove_proc_entry("segment_info", sbi->s_proc); + remove_proc_entry(sb->s_id, f2fs_proc_root); + } + kobject_del(&sbi->s_kobj); + + f2fs_destroy_stats(sbi); + stop_gc_thread(sbi); + + /* We don't need to do checkpoint when it's clean */ + if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES)) + write_checkpoint(sbi, true); + + iput(sbi->node_inode); + iput(sbi->meta_inode); + + /* destroy f2fs internal modules */ + destroy_node_manager(sbi); + destroy_segment_manager(sbi); + + kfree(sbi->ckpt); + kobject_put(&sbi->s_kobj); + wait_for_completion(&sbi->s_kobj_unregister); + + sb->s_fs_info = NULL; + brelse(sbi->raw_super_buf); + kfree(sbi); +} + +int f2fs_sync_fs(struct super_block *sb, int sync) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + + trace_f2fs_sync_fs(sb, sync); + + if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES)) + return 0; + + if (sync) { + mutex_lock(&sbi->gc_mutex); + write_checkpoint(sbi, false); + mutex_unlock(&sbi->gc_mutex); + } else { + f2fs_balance_fs(sbi); + } + + return 0; +} + +static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct super_block *sb = dentry->d_sb; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + u64 id = huge_encode_dev(sb->s_bdev->bd_dev); + block_t total_count, user_block_count, start_count, ovp_count; + + total_count = le64_to_cpu(sbi->raw_super->block_count); + user_block_count = sbi->user_block_count; + start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); + ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg; + buf->f_type = F2FS_SUPER_MAGIC; + buf->f_bsize = sbi->blocksize; + + buf->f_blocks = total_count - start_count; + buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count; + buf->f_bavail = user_block_count - valid_user_blocks(sbi); + + buf->f_files = sbi->total_node_count; + buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi); + + buf->f_namelen = F2FS_NAME_LEN; + buf->f_fsid.val[0] = (u32)id; + buf->f_fsid.val[1] = (u32)(id >> 32); + + return 0; +} + +static int f2fs_show_options(struct seq_file *seq, struct dentry *root) +{ + struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); + + if (!(root->d_sb->s_flags & MS_RDONLY) && test_opt(sbi, BG_GC)) + seq_printf(seq, ",background_gc=%s", "on"); + else + seq_printf(seq, ",background_gc=%s", "off"); + if (test_opt(sbi, DISABLE_ROLL_FORWARD)) + seq_puts(seq, ",disable_roll_forward"); + if (test_opt(sbi, DISCARD)) + seq_puts(seq, ",discard"); + if (test_opt(sbi, NOHEAP)) + seq_puts(seq, ",no_heap_alloc"); +#ifdef CONFIG_F2FS_FS_XATTR + if (test_opt(sbi, XATTR_USER)) + seq_puts(seq, ",user_xattr"); + else + seq_puts(seq, ",nouser_xattr"); + if (test_opt(sbi, INLINE_XATTR)) + seq_puts(seq, ",inline_xattr"); +#endif +#ifdef CONFIG_F2FS_FS_POSIX_ACL + if (test_opt(sbi, POSIX_ACL)) + seq_puts(seq, ",acl"); + else + seq_puts(seq, ",noacl"); +#endif + if (test_opt(sbi, ERRORS_PANIC)) + seq_puts(seq, ",errors=panic"); + else if (test_opt(sbi, ERRORS_RECOVER)) + seq_puts(seq, ",errors=recover"); + else + seq_puts(seq, ",errors=continue"); + if (test_opt(sbi, DISABLE_EXT_IDENTIFY)) + seq_puts(seq, ",disable_ext_identify"); + + if (test_opt(sbi, ANDROID_EMU)) + seq_printf(seq, ",android_emu=%u:%u:%ho%s", + sbi->android_emu_uid, + sbi->android_emu_gid, + sbi->android_emu_mode, + (sbi->android_emu_flags & + F2FS_ANDROID_EMU_NOCASE) ? + ":nocase" : ""); + + if (test_opt(sbi, INLINE_DATA)) + seq_puts(seq, ",inline_data"); + seq_printf(seq, ",active_logs=%u", sbi->active_logs); + + return 0; +} + +static int segment_info_seq_show(struct seq_file *seq, void *offset) +{ + struct super_block *sb = seq->private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + unsigned int total_segs = + le32_to_cpu(sbi->raw_super->segment_count_main); + int i; + + for (i = 0; i < total_segs; i++) { + seq_printf(seq, "%u", get_valid_blocks(sbi, i, 1)); + if (i != 0 && (i % 10) == 0) + seq_puts(seq, "\n"); + else + seq_puts(seq, " "); + } + return 0; +} + +static int segment_info_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, segment_info_seq_show, + PROC_I(inode)->pde->data); +} + +static const struct file_operations f2fs_seq_segment_info_fops = { + .owner = THIS_MODULE, + .open = segment_info_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int f2fs_remount(struct super_block *sb, int *flags, char *data) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct f2fs_mount_info org_mount_opt; + int err, active_logs; + + /* + * Save the old mount options in case we + * need to restore them. + */ + org_mount_opt = sbi->mount_opt; + active_logs = sbi->active_logs; + + /* parse mount options */ + err = parse_options(sb, data); + if (err) + goto restore_opts; + + /* + * Previous and new state of filesystem is RO, + * so no point in checking GC conditions. + */ + if ((sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) + goto skip; + + /* + * We stop the GC thread if FS is mounted as RO + * or if background_gc = off is passed in mount + * option. Also sync the filesystem. + */ + if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) { + if (sbi->gc_thread) { + stop_gc_thread(sbi); + f2fs_sync_fs(sb, 1); + } + } else if (test_opt(sbi, BG_GC) && !sbi->gc_thread) { + err = start_gc_thread(sbi); + if (err) + goto restore_opts; + } +skip: + /* Update the POSIXACL Flag */ + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | + (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); + return 0; + +restore_opts: + sbi->mount_opt = org_mount_opt; + sbi->active_logs = active_logs; + return err; +} + +static struct super_operations f2fs_sops = { + .alloc_inode = f2fs_alloc_inode, + .drop_inode = f2fs_drop_inode, + .destroy_inode = f2fs_destroy_inode, + .write_inode = f2fs_write_inode, + .dirty_inode = f2fs_dirty_inode, + .show_options = f2fs_show_options, + .evict_inode = f2fs_evict_inode, + .put_super = f2fs_put_super, + .sync_fs = f2fs_sync_fs, + .statfs = f2fs_statfs, + .remount_fs = f2fs_remount, +}; + +static struct inode *f2fs_nfs_get_inode(struct super_block *sb, + u64 ino, u32 generation) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct inode *inode; + + if (unlikely(ino < F2FS_ROOT_INO(sbi))) + return ERR_PTR(-ESTALE); + + /* + * f2fs_iget isn't quite right if the inode is currently unallocated! + * However f2fs_iget currently does appropriate checks to handle stale + * inodes so everything is OK. + */ + inode = f2fs_iget(sb, ino); + if (IS_ERR(inode)) + return ERR_CAST(inode); + if (unlikely(generation && inode->i_generation != generation)) { + /* we didn't find the right inode.. */ + iput(inode); + return ERR_PTR(-ESTALE); + } + return inode; +} + +static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type) +{ + return generic_fh_to_dentry(sb, fid, fh_len, fh_type, + f2fs_nfs_get_inode); +} + +static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type) +{ + return generic_fh_to_parent(sb, fid, fh_len, fh_type, + f2fs_nfs_get_inode); +} + +static const struct export_operations f2fs_export_ops = { + .fh_to_dentry = f2fs_fh_to_dentry, + .fh_to_parent = f2fs_fh_to_parent, + .get_parent = f2fs_get_parent, +}; + +static loff_t max_file_size(unsigned bits) +{ + loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS); + loff_t leaf_count = ADDRS_PER_BLOCK; + + /* two direct node blocks */ + result += (leaf_count * 2); + + /* two indirect node blocks */ + leaf_count *= NIDS_PER_BLOCK; + result += (leaf_count * 2); + + /* one double indirect node block */ + leaf_count *= NIDS_PER_BLOCK; + result += leaf_count; + + result <<= bits; + return result; +} + +static int sanity_check_raw_super(struct super_block *sb, + struct f2fs_super_block *raw_super) +{ + unsigned int blocksize; + + if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { + f2fs_msg(sb, KERN_INFO, + "Magic Mismatch, valid(0x%x) - read(0x%x)", + F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); + return 1; + } + + /* Currently, support only 4KB page cache size */ + if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { + f2fs_msg(sb, KERN_INFO, + "Invalid page_cache_size (%lu), supports only 4KB\n", + PAGE_CACHE_SIZE); + return 1; + } + + /* Currently, support only 4KB block size */ + blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); + if (blocksize != F2FS_BLKSIZE) { + f2fs_msg(sb, KERN_INFO, + "Invalid blocksize (%u), supports only 4KB\n", + blocksize); + return 1; + } + + if (le32_to_cpu(raw_super->log_sectorsize) != + F2FS_LOG_SECTOR_SIZE) { + f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize"); + return 1; + } + if (le32_to_cpu(raw_super->log_sectors_per_block) != + F2FS_LOG_SECTORS_PER_BLOCK) { + f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block"); + return 1; + } + return 0; +} + +static int sanity_check_ckpt(struct f2fs_sb_info *sbi) +{ + unsigned int total, fsmeta; + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + + total = le32_to_cpu(raw_super->segment_count); + fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); + fsmeta += le32_to_cpu(raw_super->segment_count_sit); + fsmeta += le32_to_cpu(raw_super->segment_count_nat); + fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); + fsmeta += le32_to_cpu(raw_super->segment_count_ssa); + + if (unlikely(fsmeta >= total)) + return 1; + + if (unlikely(is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))) { + f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); + return 1; + } + return 0; +} + +static void init_sb_info(struct f2fs_sb_info *sbi) +{ + struct f2fs_super_block *raw_super = sbi->raw_super; + int i; + + sbi->log_sectors_per_block = + le32_to_cpu(raw_super->log_sectors_per_block); + sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); + sbi->blocksize = 1 << sbi->log_blocksize; + sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); + sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg; + sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); + sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); + sbi->total_sections = le32_to_cpu(raw_super->section_count); + sbi->total_node_count = + (le32_to_cpu(raw_super->segment_count_nat) / 2) + * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; + sbi->root_ino_num = le32_to_cpu(raw_super->root_ino); + sbi->node_ino_num = le32_to_cpu(raw_super->node_ino); + sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino); + sbi->cur_victim_sec = NULL_SECNO; + sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; + + for (i = 0; i < NR_COUNT_TYPE; i++) + atomic_set(&sbi->nr_pages[i], 0); +} + +/* + * Read f2fs raw super block. + * Because we have two copies of super block, so read the first one at first, + * if the first one is invalid, move to read the second one. + */ +static int read_raw_super_block(struct super_block *sb, + struct f2fs_super_block **raw_super, + struct buffer_head **raw_super_buf) +{ + int block = 0; + +retry: + *raw_super_buf = sb_bread(sb, block); + if (!*raw_super_buf) { + f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock", + block + 1); + if (block == 0) { + block++; + goto retry; + } else { + return -EIO; + } + } + + *raw_super = (struct f2fs_super_block *) + ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET); + + /* sanity checking of raw super */ + if (sanity_check_raw_super(sb, *raw_super)) { + brelse(*raw_super_buf); + f2fs_msg(sb, KERN_ERR, + "Can't find valid F2FS filesystem in %dth superblock", + block + 1); + if (block == 0) { + block++; + goto retry; + } else { + return -EINVAL; + } + } + + return 0; +} + +static int f2fs_fill_super(struct super_block *sb, void *data, int silent) +{ + struct f2fs_sb_info *sbi; + struct f2fs_super_block *raw_super; + struct buffer_head *raw_super_buf; + struct inode *root; + long err = -EINVAL; + const char *descr = ""; + int i; + + f2fs_msg(sb, KERN_INFO, "mounting.."); + /* allocate memory for f2fs-specific super block info */ + sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); + if (!sbi) + return -ENOMEM; + + /* set a block size */ + if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { + f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); + goto free_sbi; + } + + err = read_raw_super_block(sb, &raw_super, &raw_super_buf); + if (err) + goto free_sbi; + + sb->s_fs_info = sbi; + /* init some FS parameters */ + sbi->active_logs = NR_CURSEG_TYPE; + + set_opt(sbi, BG_GC); + +#ifdef CONFIG_F2FS_FS_XATTR + set_opt(sbi, XATTR_USER); +#endif +#ifdef CONFIG_F2FS_FS_POSIX_ACL + set_opt(sbi, POSIX_ACL); +#endif + /* parse mount options */ + err = parse_options(sb, (char *)data); + if (err) + goto free_sb_buf; + + sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); + sb->s_max_links = F2FS_LINK_MAX; + get_random_bytes(&sbi->s_next_generation, sizeof(u32)); + + sb->s_op = &f2fs_sops; + sb->s_xattr = f2fs_xattr_handlers; + sb->s_export_op = &f2fs_export_ops; + sb->s_magic = F2FS_SUPER_MAGIC; + sb->s_time_gran = 1; + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | + (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); + memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); + + /* init f2fs-specific super block info */ + sbi->sb = sb; + sbi->raw_super = raw_super; + sbi->raw_super_buf = raw_super_buf; + mutex_init(&sbi->gc_mutex); + mutex_init(&sbi->writepages); + mutex_init(&sbi->cp_mutex); + mutex_init(&sbi->node_write); + sbi->por_doing = false; + spin_lock_init(&sbi->stat_lock); + + mutex_init(&sbi->read_io.io_mutex); + sbi->read_io.sbi = sbi; + sbi->read_io.bio = NULL; + for (i = 0; i < NR_PAGE_TYPE; i++) { + mutex_init(&sbi->write_io[i].io_mutex); + sbi->write_io[i].sbi = sbi; + sbi->write_io[i].bio = NULL; + } + + init_rwsem(&sbi->cp_rwsem); + init_waitqueue_head(&sbi->cp_wait); + init_sb_info(sbi); + + /* get an inode for meta space */ + sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); + if (IS_ERR(sbi->meta_inode)) { + f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); + err = PTR_ERR(sbi->meta_inode); + goto free_sb_buf; + } + +get_cp: + err = get_valid_checkpoint(sbi); + if (err) { + f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); + goto free_meta_inode; + } + + /* sanity checking of checkpoint */ + err = -EINVAL; + if (sanity_check_ckpt(sbi)) { + f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); + goto free_cp; + } + + sbi->total_valid_node_count = + le32_to_cpu(sbi->ckpt->valid_node_count); + sbi->total_valid_inode_count = + le32_to_cpu(sbi->ckpt->valid_inode_count); + sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); + sbi->total_valid_block_count = + le64_to_cpu(sbi->ckpt->valid_block_count); + sbi->last_valid_block_count = sbi->total_valid_block_count; + sbi->alloc_valid_block_count = 0; + INIT_LIST_HEAD(&sbi->dir_inode_list); + spin_lock_init(&sbi->dir_inode_lock); + + init_orphan_info(sbi); + + /* setup f2fs internal modules */ + err = build_segment_manager(sbi); + if (err) { + f2fs_msg(sb, KERN_ERR, + "Failed to initialize F2FS segment manager"); + goto free_sm; + } + err = build_node_manager(sbi); + if (err) { + f2fs_msg(sb, KERN_ERR, + "Failed to initialize F2FS node manager"); + goto free_nm; + } + + build_gc_manager(sbi); + + /* get an inode for node space */ + sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); + if (IS_ERR(sbi->node_inode)) { + f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); + err = PTR_ERR(sbi->node_inode); + goto free_nm; + } + + /* if there are nt orphan nodes free them */ + recover_orphan_inodes(sbi); + + /* read root inode and dentry */ + root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); + if (IS_ERR(root)) { + f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); + err = PTR_ERR(root); + goto free_node_inode; + } + if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { + err = -EINVAL; + goto free_root_inode; + } + + sb->s_root = d_make_root(root); /* allocate root dentry */ + if (!sb->s_root) { + err = -ENOMEM; + goto free_root_inode; + } + + /* recover fsynced data */ + if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { + err = recover_fsync_data(sbi); + if (err) { + if (f2fs_handle_error(sbi)) { + set_opt(sbi, DISABLE_ROLL_FORWARD); + kfree(sbi->ckpt); + f2fs_msg(sb, KERN_ERR, + "reloading last checkpoint"); + goto get_cp; + } + f2fs_msg(sb, KERN_ERR, + "cannot recover all fsync data errno=%ld", err); + /* checkpoint what we have */ + write_checkpoint(sbi, false); + } + } + + /* + * If filesystem is not mounted as read-only then + * do start the gc_thread. + */ + if (!(sb->s_flags & MS_RDONLY)) { + /* After POR, we can run background GC thread.*/ + err = start_gc_thread(sbi); + if (err) + goto free_gc; + } + + err = f2fs_build_stats(sbi); + if (err) + goto free_gc; + + if (f2fs_proc_root) + sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); + + if (sbi->s_proc) + proc_create_data("segment_info", S_IRUGO, sbi->s_proc, + &f2fs_seq_segment_info_fops, sb); + + if (test_opt(sbi, DISCARD)) { + struct request_queue *q = bdev_get_queue(sb->s_bdev); + if (!blk_queue_discard(q)) + f2fs_msg(sb, KERN_WARNING, + "mounting with \"discard\" option, but " + "the device does not support discard"); + } + + if (test_opt(sbi, ANDROID_EMU)) + descr = " with android sdcard emulation"; + f2fs_msg(sb, KERN_INFO, "mounted filesystem%s", descr); + + sbi->s_kobj.kset = f2fs_kset; + init_completion(&sbi->s_kobj_unregister); + err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL, + "%s", sb->s_id); + if (err) + goto fail; + + return 0; +fail: + if (sbi->s_proc) { + remove_proc_entry("segment_info", sbi->s_proc); + remove_proc_entry(sb->s_id, f2fs_proc_root); + } + f2fs_destroy_stats(sbi); +free_gc: + stop_gc_thread(sbi); +free_root_inode: + dput(sb->s_root); + sb->s_root = NULL; +free_node_inode: + iput(sbi->node_inode); +free_nm: + destroy_node_manager(sbi); +free_sm: + destroy_segment_manager(sbi); +free_cp: + kfree(sbi->ckpt); +free_meta_inode: + make_bad_inode(sbi->meta_inode); + iput(sbi->meta_inode); +free_sb_buf: + brelse(raw_super_buf); +free_sbi: + kfree(sbi); + f2fs_msg(sb, KERN_ERR, "mount failed"); + return err; +} + +static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super); +} + +static struct file_system_type f2fs_fs_type = { + .owner = THIS_MODULE, + .name = "f2fs", + .mount = f2fs_mount, + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, +}; + +static int __init init_inodecache(void) +{ + f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", + sizeof(struct f2fs_inode_info), NULL); + if (!f2fs_inode_cachep) + return -ENOMEM; + return 0; +} + +static void destroy_inodecache(void) +{ + /* + * Make sure all delayed rcu free inodes are flushed before we + * destroy cache. + */ + rcu_barrier(); + kmem_cache_destroy(f2fs_inode_cachep); +} + +static int __init init_f2fs_fs(void) +{ + int err; + + err = init_inodecache(); + if (err) + goto fail; + err = create_node_manager_caches(); + if (err) + goto free_inodecache; + err = create_segment_manager_caches(); + if (err) + goto free_node_manager_caches; + err = create_gc_caches(); + if (err) + goto free_segment_manager_caches; + err = create_checkpoint_caches(); + if (err) + goto free_gc_caches; + f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj); + if (!f2fs_kset) { + err = -ENOMEM; + goto free_checkpoint_caches; + } + err = register_filesystem(&f2fs_fs_type); + if (err) + goto free_kset; + f2fs_create_root_stats(); + f2fs_proc_root = proc_mkdir("fs/f2fs", NULL); + return 0; + +free_kset: + kset_unregister(f2fs_kset); +free_checkpoint_caches: + destroy_checkpoint_caches(); +free_gc_caches: + destroy_gc_caches(); +free_segment_manager_caches: + destroy_segment_manager_caches(); +free_node_manager_caches: + destroy_node_manager_caches(); +free_inodecache: + destroy_inodecache(); +fail: + return err; +} + +static void __exit exit_f2fs_fs(void) +{ + remove_proc_entry("fs/f2fs", NULL); + f2fs_destroy_root_stats(); + unregister_filesystem(&f2fs_fs_type); + destroy_checkpoint_caches(); + destroy_gc_caches(); + destroy_segment_manager_caches(); + destroy_node_manager_caches(); + destroy_inodecache(); + kset_unregister(f2fs_kset); +} + +module_init(init_f2fs_fs) +module_exit(exit_f2fs_fs) + +MODULE_AUTHOR("Samsung Electronics's Praesto Team"); +MODULE_DESCRIPTION("Flash Friendly File System"); +MODULE_LICENSE("GPL"); diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c new file mode 100644 index 0000000000000..4cd9c2fab68b0 --- /dev/null +++ b/fs/f2fs/xattr.c @@ -0,0 +1,597 @@ +/* + * fs/f2fs/xattr.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * Portions of this code from linux/fs/ext2/xattr.c + * + * Copyright (C) 2001-2003 Andreas Gruenbacher + * + * Fix by Harrison Xing . + * Extended attributes for symlinks and special files added per + * suggestion of Luka Renko . + * xattr consolidation Copyright (c) 2004 James Morris , + * Red Hat Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include "f2fs.h" +#include "xattr.h" + +static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list, + size_t list_size, const char *name, size_t name_len, int type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + int total_len, prefix_len = 0; + const char *prefix = NULL; + + switch (type) { + case F2FS_XATTR_INDEX_USER: + if (!test_opt(sbi, XATTR_USER)) + return -EOPNOTSUPP; + prefix = XATTR_USER_PREFIX; + prefix_len = XATTR_USER_PREFIX_LEN; + break; + case F2FS_XATTR_INDEX_TRUSTED: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + prefix = XATTR_TRUSTED_PREFIX; + prefix_len = XATTR_TRUSTED_PREFIX_LEN; + break; + case F2FS_XATTR_INDEX_SECURITY: + prefix = XATTR_SECURITY_PREFIX; + prefix_len = XATTR_SECURITY_PREFIX_LEN; + break; + default: + return -EINVAL; + } + + total_len = prefix_len + name_len + 1; + if (list && total_len <= list_size) { + memcpy(list, prefix, prefix_len); + memcpy(list + prefix_len, name, name_len); + list[prefix_len + name_len] = '\0'; + } + return total_len; +} + +static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name, + void *buffer, size_t size, int type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + + switch (type) { + case F2FS_XATTR_INDEX_USER: + if (!test_opt(sbi, XATTR_USER)) + return -EOPNOTSUPP; + break; + case F2FS_XATTR_INDEX_TRUSTED: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + break; + case F2FS_XATTR_INDEX_SECURITY: + break; + default: + return -EINVAL; + } + if (strcmp(name, "") == 0) + return -EINVAL; + return f2fs_getxattr(dentry->d_inode, type, name, buffer, size); +} + +static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, int type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + + switch (type) { + case F2FS_XATTR_INDEX_USER: + if (!test_opt(sbi, XATTR_USER)) + return -EOPNOTSUPP; + break; + case F2FS_XATTR_INDEX_TRUSTED: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + break; + case F2FS_XATTR_INDEX_SECURITY: + break; + default: + return -EINVAL; + } + if (strcmp(name, "") == 0) + return -EINVAL; + + return f2fs_setxattr(dentry->d_inode, type, name, value, size, NULL); +} + +static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list, + size_t list_size, const char *name, size_t name_len, int type) +{ + const char *xname = F2FS_SYSTEM_ADVISE_PREFIX; + size_t size; + + if (type != F2FS_XATTR_INDEX_ADVISE) + return 0; + + size = strlen(xname) + 1; + if (list && size <= list_size) + memcpy(list, xname, size); + return size; +} + +static int f2fs_xattr_advise_get(struct dentry *dentry, const char *name, + void *buffer, size_t size, int type) +{ + struct inode *inode = dentry->d_inode; + + if (!name || strcmp(name, "") != 0) + return -EINVAL; + + if (buffer) + *((char *)buffer) = F2FS_I(inode)->i_advise; + return sizeof(char); +} + +static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, int type) +{ + struct inode *inode = dentry->d_inode; + + if (!name || strcmp(name, "") != 0) + return -EINVAL; + if (!inode_owner_or_capable(inode)) + return -EPERM; + if (value == NULL) + return -EINVAL; + + F2FS_I(inode)->i_advise = *(char *)value; + return 0; +} + +#ifdef CONFIG_F2FS_FS_SECURITY +static int __f2fs_setxattr(struct inode *inode, int name_index, + const char *name, const void *value, size_t value_len, + struct page *ipage); +static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array, + void *page) +{ + const struct xattr *xattr; + int err = 0; + + for (xattr = xattr_array; xattr->name != NULL; xattr++) { + err = __f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY, + xattr->name, xattr->value, + xattr->value_len, (struct page *)page); + if (err < 0) + break; + } + return err; +} + +int f2fs_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, struct page *ipage) +{ + return security_inode_init_security(inode, dir, qstr, + &f2fs_initxattrs, ipage); +} +#endif + +const struct xattr_handler f2fs_xattr_user_handler = { + .prefix = XATTR_USER_PREFIX, + .flags = F2FS_XATTR_INDEX_USER, + .list = f2fs_xattr_generic_list, + .get = f2fs_xattr_generic_get, + .set = f2fs_xattr_generic_set, +}; + +const struct xattr_handler f2fs_xattr_trusted_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .flags = F2FS_XATTR_INDEX_TRUSTED, + .list = f2fs_xattr_generic_list, + .get = f2fs_xattr_generic_get, + .set = f2fs_xattr_generic_set, +}; + +const struct xattr_handler f2fs_xattr_advise_handler = { + .prefix = F2FS_SYSTEM_ADVISE_PREFIX, + .flags = F2FS_XATTR_INDEX_ADVISE, + .list = f2fs_xattr_advise_list, + .get = f2fs_xattr_advise_get, + .set = f2fs_xattr_advise_set, +}; + +const struct xattr_handler f2fs_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .flags = F2FS_XATTR_INDEX_SECURITY, + .list = f2fs_xattr_generic_list, + .get = f2fs_xattr_generic_get, + .set = f2fs_xattr_generic_set, +}; + +static const struct xattr_handler *f2fs_xattr_handler_map[] = { + [F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler, +#ifdef CONFIG_F2FS_FS_POSIX_ACL + [F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &f2fs_xattr_acl_access_handler, + [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &f2fs_xattr_acl_default_handler, +#endif + [F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler, +#ifdef CONFIG_F2FS_FS_SECURITY + [F2FS_XATTR_INDEX_SECURITY] = &f2fs_xattr_security_handler, +#endif + [F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler, +}; + +const struct xattr_handler *f2fs_xattr_handlers[] = { + &f2fs_xattr_user_handler, +#ifdef CONFIG_F2FS_FS_POSIX_ACL + &f2fs_xattr_acl_access_handler, + &f2fs_xattr_acl_default_handler, +#endif + &f2fs_xattr_trusted_handler, +#ifdef CONFIG_F2FS_FS_SECURITY + &f2fs_xattr_security_handler, +#endif + &f2fs_xattr_advise_handler, + NULL, +}; + +static inline const struct xattr_handler *f2fs_xattr_handler(int name_index) +{ + const struct xattr_handler *handler = NULL; + + if (name_index > 0 && name_index < ARRAY_SIZE(f2fs_xattr_handler_map)) + handler = f2fs_xattr_handler_map[name_index]; + return handler; +} + +static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int name_index, + size_t name_len, const char *name) +{ + struct f2fs_xattr_entry *entry; + + list_for_each_xattr(entry, base_addr) { + if (entry->e_name_index != name_index) + continue; + if (entry->e_name_len != name_len) + continue; + if (!memcmp(entry->e_name, name, name_len)) + break; + } + return entry; +} + +static void *read_all_xattrs(struct inode *inode, struct page *ipage) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct f2fs_xattr_header *header; + size_t size = PAGE_SIZE, inline_size = 0; + void *txattr_addr; + + inline_size = inline_xattr_size(inode); + + txattr_addr = kzalloc(inline_size + size, GFP_KERNEL); + if (!txattr_addr) + return NULL; + + /* read from inline xattr */ + if (inline_size) { + struct page *page = NULL; + void *inline_addr; + + if (ipage) { + inline_addr = inline_xattr_addr(ipage); + } else { + page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(page)) + goto fail; + inline_addr = inline_xattr_addr(page); + } + memcpy(txattr_addr, inline_addr, inline_size); + f2fs_put_page(page, 1); + } + + /* read from xattr node block */ + if (F2FS_I(inode)->i_xattr_nid) { + struct page *xpage; + void *xattr_addr; + + /* The inode already has an extended attribute block. */ + xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); + if (IS_ERR(xpage)) + goto fail; + + xattr_addr = page_address(xpage); + memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE); + f2fs_put_page(xpage, 1); + } + + header = XATTR_HDR(txattr_addr); + + /* never been allocated xattrs */ + if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) { + header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC); + header->h_refcount = cpu_to_le32(1); + } + return txattr_addr; +fail: + kzfree(txattr_addr); + return NULL; +} + +static inline int write_all_xattrs(struct inode *inode, __u32 hsize, + void *txattr_addr, struct page *ipage) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + size_t inline_size = 0; + void *xattr_addr; + struct page *xpage; + nid_t new_nid = 0; + int err; + + inline_size = inline_xattr_size(inode); + + if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid) + if (!alloc_nid(sbi, &new_nid)) + return -ENOSPC; + + /* write to inline xattr */ + if (inline_size) { + struct page *page = NULL; + void *inline_addr; + + if (ipage) { + inline_addr = inline_xattr_addr(ipage); + } else { + page = get_node_page(sbi, inode->i_ino); + if (IS_ERR(page)) { + alloc_nid_failed(sbi, new_nid); + return PTR_ERR(page); + } + inline_addr = inline_xattr_addr(page); + } + memcpy(inline_addr, txattr_addr, inline_size); + f2fs_put_page(page, 1); + + /* no need to use xattr node block */ + if (hsize <= inline_size) { + err = truncate_xattr_node(inode, ipage); + alloc_nid_failed(sbi, new_nid); + return err; + } + } + + /* write to xattr node block */ + if (F2FS_I(inode)->i_xattr_nid) { + xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); + if (IS_ERR(xpage)) { + alloc_nid_failed(sbi, new_nid); + return PTR_ERR(xpage); + } + f2fs_bug_on(new_nid); + } else { + struct dnode_of_data dn; + set_new_dnode(&dn, inode, NULL, NULL, new_nid); + xpage = new_node_page(&dn, XATTR_NODE_OFFSET, ipage); + if (IS_ERR(xpage)) { + alloc_nid_failed(sbi, new_nid); + return PTR_ERR(xpage); + } + alloc_nid_done(sbi, new_nid); + } + + xattr_addr = page_address(xpage); + memcpy(xattr_addr, txattr_addr + inline_size, PAGE_SIZE - + sizeof(struct node_footer)); + set_page_dirty(xpage); + f2fs_put_page(xpage, 1); + + /* need to checkpoint during fsync */ + F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); + return 0; +} + +int f2fs_getxattr(struct inode *inode, int name_index, const char *name, + void *buffer, size_t buffer_size) +{ + struct f2fs_xattr_entry *entry; + void *base_addr; + int error = 0; + size_t value_len, name_len; + + if (name == NULL) + return -EINVAL; + name_len = strlen(name); + + base_addr = read_all_xattrs(inode, NULL); + if (!base_addr) + return -ENOMEM; + + entry = __find_xattr(base_addr, name_index, name_len, name); + if (IS_XATTR_LAST_ENTRY(entry)) { + error = -ENODATA; + goto cleanup; + } + + value_len = le16_to_cpu(entry->e_value_size); + + if (buffer && value_len > buffer_size) { + error = -ERANGE; + goto cleanup; + } + + if (buffer) { + char *pval = entry->e_name + entry->e_name_len; + memcpy(buffer, pval, value_len); + } + error = value_len; + +cleanup: + kzfree(base_addr); + return error; +} + +ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) +{ + struct inode *inode = dentry->d_inode; + struct f2fs_xattr_entry *entry; + void *base_addr; + int error = 0; + size_t rest = buffer_size; + + base_addr = read_all_xattrs(inode, NULL); + if (!base_addr) + return -ENOMEM; + + list_for_each_xattr(entry, base_addr) { + const struct xattr_handler *handler = + f2fs_xattr_handler(entry->e_name_index); + size_t size; + + if (!handler) + continue; + + size = handler->list(dentry, buffer, rest, entry->e_name, + entry->e_name_len, handler->flags); + if (buffer && size > rest) { + error = -ERANGE; + goto cleanup; + } + + if (buffer) + buffer += size; + rest -= size; + } + error = buffer_size - rest; +cleanup: + kzfree(base_addr); + return error; +} + +static int __f2fs_setxattr(struct inode *inode, int name_index, + const char *name, const void *value, size_t value_len, + struct page *ipage) +{ + struct f2fs_inode_info *fi = F2FS_I(inode); + struct f2fs_xattr_entry *here, *last; + void *base_addr; + int found, newsize; + size_t name_len; + __u32 new_hsize; + int error = -ENOMEM; + + if (name == NULL) + return -EINVAL; + + if (value == NULL) + value_len = 0; + + name_len = strlen(name); + + if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN(inode)) + return -ERANGE; + + base_addr = read_all_xattrs(inode, ipage); + if (!base_addr) + goto exit; + + /* find entry with wanted name. */ + here = __find_xattr(base_addr, name_index, name_len, name); + + found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1; + last = here; + + while (!IS_XATTR_LAST_ENTRY(last)) + last = XATTR_NEXT_ENTRY(last); + + newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + + name_len + value_len); + + /* 1. Check space */ + if (value) { + int free; + /* + * If value is NULL, it is remove operation. + * In case of update operation, we caculate free. + */ + free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr); + if (found) + free = free + ENTRY_SIZE(here); + + if (unlikely(free < newsize)) { + error = -ENOSPC; + goto exit; + } + } + + /* 2. Remove old entry */ + if (found) { + /* + * If entry is found, remove old entry. + * If not found, remove operation is not needed. + */ + struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here); + int oldsize = ENTRY_SIZE(here); + + memmove(here, next, (char *)last - (char *)next); + last = (struct f2fs_xattr_entry *)((char *)last - oldsize); + memset(last, 0, oldsize); + } + + new_hsize = (char *)last - (char *)base_addr; + + /* 3. Write new entry */ + if (value) { + char *pval; + /* + * Before we come here, old entry is removed. + * We just write new entry. + */ + memset(last, 0, newsize); + last->e_name_index = name_index; + last->e_name_len = name_len; + memcpy(last->e_name, name, name_len); + pval = last->e_name + name_len; + memcpy(pval, value, value_len); + last->e_value_size = cpu_to_le16(value_len); + new_hsize += newsize; + } + + error = write_all_xattrs(inode, new_hsize, base_addr, ipage); + if (error) + goto exit; + + if (is_inode_flag_set(fi, FI_ACL_MODE)) { + inode->i_mode = fi->i_acl_mode; + inode->i_ctime = CURRENT_TIME; + clear_inode_flag(fi, FI_ACL_MODE); + } + + if (ipage) + update_inode(inode, ipage); + else + update_inode_page(inode); +exit: + kzfree(base_addr); + return error; +} + +int f2fs_setxattr(struct inode *inode, int name_index, const char *name, + const void *value, size_t value_len, struct page *ipage) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + int err; + + f2fs_balance_fs(sbi); + + f2fs_lock_op(sbi); + err = __f2fs_setxattr(inode, name_index, name, value, value_len, ipage); + f2fs_unlock_op(sbi); + + return err; +} diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h new file mode 100644 index 0000000000000..02a08fb88a151 --- /dev/null +++ b/fs/f2fs/xattr.h @@ -0,0 +1,152 @@ +/* + * fs/f2fs/xattr.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * Portions of this code from linux/fs/ext2/xattr.h + * + * On-disk format of extended attributes for the ext2 filesystem. + * + * (C) 2001 Andreas Gruenbacher, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __F2FS_XATTR_H__ +#define __F2FS_XATTR_H__ + +#include +#include + +/* Magic value in attribute blocks */ +#define F2FS_XATTR_MAGIC 0xF2F52011 + +/* Maximum number of references to one attribute block */ +#define F2FS_XATTR_REFCOUNT_MAX 1024 + +/* Name indexes */ +#define F2FS_SYSTEM_ADVISE_PREFIX "system.advise" +#define F2FS_XATTR_INDEX_USER 1 +#define F2FS_XATTR_INDEX_POSIX_ACL_ACCESS 2 +#define F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT 3 +#define F2FS_XATTR_INDEX_TRUSTED 4 +#define F2FS_XATTR_INDEX_LUSTRE 5 +#define F2FS_XATTR_INDEX_SECURITY 6 +#define F2FS_XATTR_INDEX_ADVISE 7 + +struct f2fs_xattr_header { + __le32 h_magic; /* magic number for identification */ + __le32 h_refcount; /* reference count */ + __u32 h_reserved[4]; /* zero right now */ +}; + +struct f2fs_xattr_entry { + __u8 e_name_index; + __u8 e_name_len; + __le16 e_value_size; /* size of attribute value */ + char e_name[0]; /* attribute name */ +}; + +#define XATTR_HDR(ptr) ((struct f2fs_xattr_header *)(ptr)) +#define XATTR_ENTRY(ptr) ((struct f2fs_xattr_entry *)(ptr)) +#define XATTR_FIRST_ENTRY(ptr) (XATTR_ENTRY(XATTR_HDR(ptr) + 1)) +#define XATTR_ROUND (3) + +#define XATTR_ALIGN(size) ((size + XATTR_ROUND) & ~XATTR_ROUND) + +#define ENTRY_SIZE(entry) (XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + \ + entry->e_name_len + le16_to_cpu(entry->e_value_size))) + +#define XATTR_NEXT_ENTRY(entry) ((struct f2fs_xattr_entry *)((char *)(entry) +\ + ENTRY_SIZE(entry))) + +#define IS_XATTR_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) + +#define list_for_each_xattr(entry, addr) \ + for (entry = XATTR_FIRST_ENTRY(addr);\ + !IS_XATTR_LAST_ENTRY(entry);\ + entry = XATTR_NEXT_ENTRY(entry)) + +#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + PAGE_SIZE - \ + sizeof(struct node_footer) - sizeof(__u32)) + +#define MAX_VALUE_LEN(i) (MIN_OFFSET(i) - \ + sizeof(struct f2fs_xattr_header) - \ + sizeof(struct f2fs_xattr_entry)) + +/* + * On-disk structure of f2fs_xattr + * We use inline xattrs space + 1 block for xattr. + * + * +--------------------+ + * | f2fs_xattr_header | + * | | + * +--------------------+ + * | f2fs_xattr_entry | + * | .e_name_index = 1 | + * | .e_name_len = 3 | + * | .e_value_size = 14 | + * | .e_name = "foo" | + * | "value_of_xattr" |<- value_offs = e_name + e_name_len + * +--------------------+ + * | f2fs_xattr_entry | + * | .e_name_index = 4 | + * | .e_name = "bar" | + * +--------------------+ + * | | + * | Free | + * | | + * +--------------------+<- MIN_OFFSET + * | node_footer | + * | (nid, ino, offset) | + * +--------------------+ + * + **/ + +#ifdef CONFIG_F2FS_FS_XATTR +extern const struct xattr_handler f2fs_xattr_user_handler; +extern const struct xattr_handler f2fs_xattr_trusted_handler; +extern const struct xattr_handler f2fs_xattr_acl_access_handler; +extern const struct xattr_handler f2fs_xattr_acl_default_handler; +extern const struct xattr_handler f2fs_xattr_advise_handler; +extern const struct xattr_handler f2fs_xattr_security_handler; + +extern const struct xattr_handler *f2fs_xattr_handlers[]; + +extern int f2fs_setxattr(struct inode *, int, const char *, + const void *, size_t, struct page *); +extern int f2fs_getxattr(struct inode *, int, const char *, void *, size_t); +extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t); +#else + +#define f2fs_xattr_handlers NULL +static inline int f2fs_setxattr(struct inode *inode, int name_index, + const char *name, const void *value, size_t value_len) +{ + return -EOPNOTSUPP; +} +static inline int f2fs_getxattr(struct inode *inode, int name_index, + const char *name, void *buffer, size_t buffer_size) +{ + return -EOPNOTSUPP; +} +static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, + size_t buffer_size) +{ + return -EOPNOTSUPP; +} +#endif + +#ifdef CONFIG_F2FS_FS_SECURITY +extern int f2fs_init_security(struct inode *, struct inode *, + const struct qstr *, struct page *); +#else +static inline int f2fs_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, struct page *ipage) +{ + return 0; +} +#endif +#endif /* __F2FS_XATTR_H__ */ diff --git a/include/linux/blx.h b/include/linux/blx.h new file mode 100644 index 0000000000000..7fa45fcff1698 --- /dev/null +++ b/include/linux/blx.h @@ -0,0 +1,10 @@ +/* include/linux/blx.h */ + +#ifndef _LINUX_BLX_H +#define _LINUX_BLX_H + +#define MAX_CHARGINGLIMIT 100 + +int get_charginglimit(void); + +#endif diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 59b8ca1a07497..7c71ca3b12954 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -359,12 +359,18 @@ extern struct cpufreq_governor cpufreq_gov_performance; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE) extern struct cpufreq_governor cpufreq_gov_powersave; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SELADANG) +extern struct cpufreq_governor cpufreq_gov_seladang; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_seladang) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE) extern struct cpufreq_governor cpufreq_gov_userspace; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND) extern struct cpufreq_governor cpufreq_gov_ondemand; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ELEMENTALX) +extern struct cpufreq_governor cpufreq_gov_elementalx; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_elementalx) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE) extern struct cpufreq_governor cpufreq_gov_conservative; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative) diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h new file mode 100644 index 0000000000000..da74d878dc4f8 --- /dev/null +++ b/include/linux/f2fs_fs.h @@ -0,0 +1,431 @@ +/** + * include/linux/f2fs_fs.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_F2FS_FS_H +#define _LINUX_F2FS_FS_H + +#include +#include + +#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */ +#define F2FS_LOG_SECTOR_SIZE 9 /* 9 bits for 512 byte */ +#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */ +#define F2FS_BLKSIZE 4096 /* support only 4KB block */ +#define F2FS_MAX_EXTENSION 64 /* # of extension entries */ + +#define NULL_ADDR ((block_t)0) /* used as block_t addresses */ +#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ + +#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num) +#define F2FS_NODE_INO(sbi) (sbi->node_ino_num) +#define F2FS_META_INO(sbi) (sbi->meta_ino_num) + +/* This flag is used by node and meta inodes, and by recovery */ +#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) + +/* + * For further optimization on multi-head logs, on-disk layout supports maximum + * 16 logs by default. The number, 16, is expected to cover all the cases + * enoughly. The implementaion currently uses no more than 6 logs. + * Half the logs are used for nodes, and the other half are used for data. + */ +#define MAX_ACTIVE_LOGS 16 +#define MAX_ACTIVE_NODE_LOGS 8 +#define MAX_ACTIVE_DATA_LOGS 8 + +/* + * For superblock + */ +struct f2fs_super_block { + __le32 magic; /* Magic Number */ + __le16 major_ver; /* Major Version */ + __le16 minor_ver; /* Minor Version */ + __le32 log_sectorsize; /* log2 sector size in bytes */ + __le32 log_sectors_per_block; /* log2 # of sectors per block */ + __le32 log_blocksize; /* log2 block size in bytes */ + __le32 log_blocks_per_seg; /* log2 # of blocks per segment */ + __le32 segs_per_sec; /* # of segments per section */ + __le32 secs_per_zone; /* # of sections per zone */ + __le32 checksum_offset; /* checksum offset inside super block */ + __le64 block_count; /* total # of user blocks */ + __le32 section_count; /* total # of sections */ + __le32 segment_count; /* total # of segments */ + __le32 segment_count_ckpt; /* # of segments for checkpoint */ + __le32 segment_count_sit; /* # of segments for SIT */ + __le32 segment_count_nat; /* # of segments for NAT */ + __le32 segment_count_ssa; /* # of segments for SSA */ + __le32 segment_count_main; /* # of segments for main area */ + __le32 segment0_blkaddr; /* start block address of segment 0 */ + __le32 cp_blkaddr; /* start block address of checkpoint */ + __le32 sit_blkaddr; /* start block address of SIT */ + __le32 nat_blkaddr; /* start block address of NAT */ + __le32 ssa_blkaddr; /* start block address of SSA */ + __le32 main_blkaddr; /* start block address of main area */ + __le32 root_ino; /* root inode number */ + __le32 node_ino; /* node inode number */ + __le32 meta_ino; /* meta inode number */ + __u8 uuid[16]; /* 128-bit uuid for volume */ + __le16 volume_name[512]; /* volume name */ + __le32 extension_count; /* # of extensions below */ + __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ +} __packed; + +/* + * For checkpoint + */ +#define CP_ERROR_FLAG 0x00000008 +#define CP_COMPACT_SUM_FLAG 0x00000004 +#define CP_ORPHAN_PRESENT_FLAG 0x00000002 +#define CP_UMOUNT_FLAG 0x00000001 + +struct f2fs_checkpoint { + __le64 checkpoint_ver; /* checkpoint block version number */ + __le64 user_block_count; /* # of user blocks */ + __le64 valid_block_count; /* # of valid blocks in main area */ + __le32 rsvd_segment_count; /* # of reserved segments for gc */ + __le32 overprov_segment_count; /* # of overprovision segments */ + __le32 free_segment_count; /* # of free segments in main area */ + + /* information of current node segments */ + __le32 cur_node_segno[MAX_ACTIVE_NODE_LOGS]; + __le16 cur_node_blkoff[MAX_ACTIVE_NODE_LOGS]; + /* information of current data segments */ + __le32 cur_data_segno[MAX_ACTIVE_DATA_LOGS]; + __le16 cur_data_blkoff[MAX_ACTIVE_DATA_LOGS]; + __le32 ckpt_flags; /* Flags : umount and journal_present */ + __le32 cp_pack_total_block_count; /* total # of one cp pack */ + __le32 cp_pack_start_sum; /* start block number of data summary */ + __le32 valid_node_count; /* Total number of valid nodes */ + __le32 valid_inode_count; /* Total number of valid inodes */ + __le32 next_free_nid; /* Next free node number */ + __le32 sit_ver_bitmap_bytesize; /* Default value 64 */ + __le32 nat_ver_bitmap_bytesize; /* Default value 256 */ + __le32 checksum_offset; /* checksum offset inside cp block */ + __le64 elapsed_time; /* mounted time */ + /* allocation type of current segment */ + unsigned char alloc_type[MAX_ACTIVE_LOGS]; + + /* SIT and NAT version bitmap */ + unsigned char sit_nat_version_bitmap[1]; +} __packed; + +/* + * For orphan inode management + */ +#define F2FS_ORPHANS_PER_BLOCK 1020 + +struct f2fs_orphan_block { + __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */ + __le32 reserved; /* reserved */ + __le16 blk_addr; /* block index in current CP */ + __le16 blk_count; /* Number of orphan inode blocks in CP */ + __le32 entry_count; /* Total number of orphan nodes in current CP */ + __le32 check_sum; /* CRC32 for orphan inode block */ +} __packed; + +/* + * For NODE structure + */ +struct f2fs_extent { + __le32 fofs; /* start file offset of the extent */ + __le32 blk_addr; /* start block address of the extent */ + __le32 len; /* lengh of the extent */ +} __packed; + +#define F2FS_NAME_LEN 255 +#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */ +#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ +#define ADDRS_PER_INODE(fi) addrs_per_inode(fi) +#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ +#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ + +#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) +#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) +#define NODE_IND1_BLOCK (DEF_ADDRS_PER_INODE + 3) +#define NODE_IND2_BLOCK (DEF_ADDRS_PER_INODE + 4) +#define NODE_DIND_BLOCK (DEF_ADDRS_PER_INODE + 5) + +#define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ +#define F2FS_INLINE_DATA 0x02 /* file inline data flag */ + +#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ + F2FS_INLINE_XATTR_ADDRS - 1)) + +#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) \ + - sizeof(__le32) * (DEF_ADDRS_PER_INODE + 5 - 1)) + +struct f2fs_inode { + __le16 i_mode; /* file mode */ + __u8 i_advise; /* file hints */ + __u8 i_inline; /* file inline flags */ + __le32 i_uid; /* user ID */ + __le32 i_gid; /* group ID */ + __le32 i_links; /* links count */ + __le64 i_size; /* file size in bytes */ + __le64 i_blocks; /* file size in blocks */ + __le64 i_atime; /* access time */ + __le64 i_ctime; /* change time */ + __le64 i_mtime; /* modification time */ + __le32 i_atime_nsec; /* access time in nano scale */ + __le32 i_ctime_nsec; /* change time in nano scale */ + __le32 i_mtime_nsec; /* modification time in nano scale */ + __le32 i_generation; /* file version (for NFS) */ + __le32 i_current_depth; /* only for directory depth */ + __le32 i_xattr_nid; /* nid to save xattr */ + __le32 i_flags; /* file attributes */ + __le32 i_pino; /* parent inode number */ + __le32 i_namelen; /* file name length */ + __u8 i_name[F2FS_NAME_LEN]; /* file name for SPOR */ + __u8 i_reserved2; /* for backward compatibility */ + + struct f2fs_extent i_ext; /* caching a largest extent */ + + __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ + + __le32 i_nid[5]; /* direct(2), indirect(2), + double_indirect(1) node id */ +} __packed; + +struct direct_node { + __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */ +} __packed; + +struct indirect_node { + __le32 nid[NIDS_PER_BLOCK]; /* array of data block address */ +} __packed; + +enum { + COLD_BIT_SHIFT = 0, + FSYNC_BIT_SHIFT, + DENT_BIT_SHIFT, + OFFSET_BIT_SHIFT +}; + +struct node_footer { + __le32 nid; /* node id */ + __le32 ino; /* inode nunmber */ + __le32 flag; /* include cold/fsync/dentry marks and offset */ + __le64 cp_ver; /* checkpoint version */ + __le32 next_blkaddr; /* next node page block address */ +} __packed; + +struct f2fs_node { + /* can be one of three types: inode, direct, and indirect types */ + union { + struct f2fs_inode i; + struct direct_node dn; + struct indirect_node in; + }; + struct node_footer footer; +} __packed; + +/* + * For NAT entries + */ +#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry)) + +struct f2fs_nat_entry { + __u8 version; /* latest version of cached nat entry */ + __le32 ino; /* inode number */ + __le32 block_addr; /* block address */ +} __packed; + +struct f2fs_nat_block { + struct f2fs_nat_entry entries[NAT_ENTRY_PER_BLOCK]; +} __packed; + +/* + * For SIT entries + * + * Each segment is 2MB in size by default so that a bitmap for validity of + * there-in blocks should occupy 64 bytes, 512 bits. + * Not allow to change this. + */ +#define SIT_VBLOCK_MAP_SIZE 64 +#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry)) + +/* + * Note that f2fs_sit_entry->vblocks has the following bit-field information. + * [15:10] : allocation type such as CURSEG_XXXX_TYPE + * [9:0] : valid block count + */ +#define SIT_VBLOCKS_SHIFT 10 +#define SIT_VBLOCKS_MASK ((1 << SIT_VBLOCKS_SHIFT) - 1) +#define GET_SIT_VBLOCKS(raw_sit) \ + (le16_to_cpu((raw_sit)->vblocks) & SIT_VBLOCKS_MASK) +#define GET_SIT_TYPE(raw_sit) \ + ((le16_to_cpu((raw_sit)->vblocks) & ~SIT_VBLOCKS_MASK) \ + >> SIT_VBLOCKS_SHIFT) + +struct f2fs_sit_entry { + __le16 vblocks; /* reference above */ + __u8 valid_map[SIT_VBLOCK_MAP_SIZE]; /* bitmap for valid blocks */ + __le64 mtime; /* segment age for cleaning */ +} __packed; + +struct f2fs_sit_block { + struct f2fs_sit_entry entries[SIT_ENTRY_PER_BLOCK]; +} __packed; + +/* + * For segment summary + * + * One summary block contains exactly 512 summary entries, which represents + * exactly 2MB segment by default. Not allow to change the basic units. + * + * NOTE: For initializing fields, you must use set_summary + * + * - If data page, nid represents dnode's nid + * - If node page, nid represents the node page's nid. + * + * The ofs_in_node is used by only data page. It represents offset + * from node's page's beginning to get a data block address. + * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node) + */ +#define ENTRIES_IN_SUM 512 +#define SUMMARY_SIZE (7) /* sizeof(struct summary) */ +#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */ +#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM) + +/* a summary entry for a 4KB-sized block in a segment */ +struct f2fs_summary { + __le32 nid; /* parent node id */ + union { + __u8 reserved[3]; + struct { + __u8 version; /* node version number */ + __le16 ofs_in_node; /* block index in parent node */ + } __packed; + }; +} __packed; + +/* summary block type, node or data, is stored to the summary_footer */ +#define SUM_TYPE_NODE (1) +#define SUM_TYPE_DATA (0) + +struct summary_footer { + unsigned char entry_type; /* SUM_TYPE_XXX */ + __u32 check_sum; /* summary checksum */ +} __packed; + +#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\ + SUM_ENTRY_SIZE) +#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ + sizeof(struct nat_journal_entry)) +#define NAT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\ + sizeof(struct nat_journal_entry)) +#define SIT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ + sizeof(struct sit_journal_entry)) +#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\ + sizeof(struct sit_journal_entry)) +/* + * frequently updated NAT/SIT entries can be stored in the spare area in + * summary blocks + */ +enum { + NAT_JOURNAL = 0, + SIT_JOURNAL +}; + +struct nat_journal_entry { + __le32 nid; + struct f2fs_nat_entry ne; +} __packed; + +struct nat_journal { + struct nat_journal_entry entries[NAT_JOURNAL_ENTRIES]; + __u8 reserved[NAT_JOURNAL_RESERVED]; +} __packed; + +struct sit_journal_entry { + __le32 segno; + struct f2fs_sit_entry se; +} __packed; + +struct sit_journal { + struct sit_journal_entry entries[SIT_JOURNAL_ENTRIES]; + __u8 reserved[SIT_JOURNAL_RESERVED]; +} __packed; + +/* 4KB-sized summary block structure */ +struct f2fs_summary_block { + struct f2fs_summary entries[ENTRIES_IN_SUM]; + union { + __le16 n_nats; + __le16 n_sits; + }; + /* spare area is used by NAT or SIT journals */ + union { + struct nat_journal nat_j; + struct sit_journal sit_j; + }; + struct summary_footer footer; +} __packed; + +/* + * For directory operations + */ +#define F2FS_DOT_HASH 0 +#define F2FS_DDOT_HASH F2FS_DOT_HASH +#define F2FS_MAX_HASH (~((0x3ULL) << 62)) +#define F2FS_HASH_COL_BIT ((0x1ULL) << 63) + +typedef __le32 f2fs_hash_t; + +/* One directory entry slot covers 8bytes-long file name */ +#define F2FS_SLOT_LEN 8 +#define F2FS_SLOT_LEN_BITS 3 + +#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS) + +/* the number of dentry in a block */ +#define NR_DENTRY_IN_BLOCK 214 + +/* MAX level for dir lookup */ +#define MAX_DIR_HASH_DEPTH 63 + +#define SIZE_OF_DIR_ENTRY 11 /* by byte */ +#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ + BITS_PER_BYTE) +#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ + F2FS_SLOT_LEN) * \ + NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) + +/* One directory entry slot representing F2FS_SLOT_LEN-sized file name */ +struct f2fs_dir_entry { + __le32 hash_code; /* hash code of file name */ + __le32 ino; /* inode number */ + __le16 name_len; /* lengh of file name */ + __u8 file_type; /* file type */ +} __packed; + +/* 4KB-sized directory entry block */ +struct f2fs_dentry_block { + /* validity bitmap for directory entries in each block */ + __u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP]; + __u8 reserved[SIZE_OF_RESERVED]; + struct f2fs_dir_entry dentry[NR_DENTRY_IN_BLOCK]; + __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; +} __packed; + +/* file types used in inode_info->flags */ +enum { + F2FS_FT_UNKNOWN, + F2FS_FT_REG_FILE, + F2FS_FT_DIR, + F2FS_FT_CHRDEV, + F2FS_FT_BLKDEV, + F2FS_FT_FIFO, + F2FS_FT_SOCK, + F2FS_FT_SYMLINK, + F2FS_FT_MAX +}; + +#endif /* _LINUX_F2FS_FS_H */ diff --git a/include/linux/fastchg.h b/include/linux/fastchg.h new file mode 100644 index 0000000000000..0fe3092c305f7 --- /dev/null +++ b/include/linux/fastchg.h @@ -0,0 +1,37 @@ +/* + * based on work from: + * Chad Froebel & + * Jean-Pierre Rasquin + * for backwards compatibility + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_FASTCHG_H +#define _LINUX_FASTCHG_H + +extern int force_fast_charge; +extern int fast_charge_level; + +#define FAST_CHARGE_DISABLED 0 /* default */ +#define FAST_CHARGE_FORCE_AC 1 +#define FAST_CHARGE_FORCE_CUSTOM_MA 2 + +#define FAST_CHARGE_500 500 +#define FAST_CHARGE_700 700 +#define FAST_CHARGE_900 900 +#define FAST_CHARGE_1100 1100 +#define FAST_CHARGE_1300 1300 +#define FAST_CHARGE_1500 1500 + +#define FAST_CHARGE_LEVELS "500 700 900 1100 1300 1500" + +#endif diff --git a/include/linux/kexec.h b/include/linux/kexec.h index af84a25ef6b00..a4509adc86af8 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -111,6 +111,10 @@ struct kimage { #define KEXEC_TYPE_CRASH 1 unsigned int preserve_context : 1; +#ifdef CONFIG_KEXEC_HARDBOOT + unsigned int hardboot : 1; +#endif + #ifdef ARCH_HAS_KIMAGE_ARCH struct kimage_arch arch; #endif @@ -178,6 +182,11 @@ extern struct kimage *kexec_crash_image; #define KEXEC_ON_CRASH 0x00000001 #define KEXEC_PRESERVE_CONTEXT 0x00000002 + +#ifdef CONFIG_KEXEC_HARDBOOT +#define KEXEC_HARDBOOT 0x00000004 +#endif + #define KEXEC_ARCH_MASK 0xffff0000 /* These values match the ELF architecture values. @@ -196,10 +205,14 @@ extern struct kimage *kexec_crash_image; #define KEXEC_ARCH_MIPS ( 8 << 16) /* List of defined/legal kexec flags */ -#ifndef CONFIG_KEXEC_JUMP -#define KEXEC_FLAGS KEXEC_ON_CRASH -#else +#if defined(CONFIG_KEXEC_JUMP) && defined(CONFIG_KEXEC_HARDBOOT) +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_HARDBOOT) +#elif defined(CONFIG_KEXEC_JUMP) #define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) +#elif defined(CONFIG_KEXEC_HARDBOOT) +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_HARDBOOT) +#else +#define KEXEC_FLAGS (KEXEC_ON_CRASH) #endif #define VMCOREINFO_BYTES (4096) diff --git a/include/linux/magic.h b/include/linux/magic.h index e15192cb9cf40..66353ffd06a7b 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h @@ -23,6 +23,7 @@ #define EXT4_SUPER_MAGIC 0xEF53 #define BTRFS_SUPER_MAGIC 0x9123683E #define NILFS_SUPER_MAGIC 0x3434 +#define F2FS_SUPER_MAGIC 0xF2F52010 #define HPFS_SUPER_MAGIC 0xf995e849 #define ISOFS_SUPER_MAGIC 0x9660 #define JFFS2_SUPER_MAGIC 0x72b6 diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h index 641f4239f006a..693485129f11c 100644 --- a/include/linux/msm_mdp.h +++ b/include/linux/msm_mdp.h @@ -94,6 +94,7 @@ enum { MDP_ARGB_8888, /* ARGB 888 */ MDP_RGB_888, /* RGB 888 planer */ MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planer w/ Cr is in MSB */ + MDP_YCBYCR_H2V1, /* YCbYCr interleave */ MDP_YCRYCB_H2V1, /* YCrYCb interleave */ MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */ MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */ diff --git a/include/sound/tpa2028d.h b/include/sound/tpa2028d.h index 43d99411a65d3..b0d883df155ff 100644 --- a/include/sound/tpa2028d.h +++ b/include/sound/tpa2028d.h @@ -43,6 +43,13 @@ struct audio_amp_platform_data { char agc_compression_rate; char agc_output_limiter_disable; char agc_fixed_gain; +//-- optional + char ATK_time; + char REL_time; + char Hold_time; + char Output_limit_level; + char Noise_Gate_Threshold; + char AGC_Max_Gain; }; /* SPK FUNCTION */ diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h index 951e6ca12da81..767c0b69a7bfa 100644 --- a/include/trace/events/cpufreq_interactive.h +++ b/include/trace/events/cpufreq_interactive.h @@ -34,6 +34,20 @@ DEFINE_EVENT(set, cpufreq_interactive_setspeed, TP_ARGS(cpu_id, targfreq, actualfreq) ); +#ifdef CONFIG_CPU_FREQ_GOV_ELEMENTALX +DEFINE_EVENT(set, cpufreq_interactive_up, + TP_PROTO(u32 cpu_id, unsigned long targfreq, + unsigned long actualfreq), + TP_ARGS(cpu_id, targfreq, actualfreq) +); + +DEFINE_EVENT(set, cpufreq_interactive_down, + TP_PROTO(u32 cpu_id, unsigned long targfreq, + unsigned long actualfreq), + TP_ARGS(cpu_id, targfreq, actualfreq) +); +#endif + DECLARE_EVENT_CLASS(loadeval, TP_PROTO(unsigned long cpu_id, unsigned long load, unsigned long curtarg, unsigned long curactual, diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h new file mode 100644 index 0000000000000..3b9f28dfc8492 --- /dev/null +++ b/include/trace/events/f2fs.h @@ -0,0 +1,790 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM f2fs + +#if !defined(_TRACE_F2FS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_F2FS_H + +#include + +#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev) +#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino + +#define show_block_type(type) \ + __print_symbolic(type, \ + { NODE, "NODE" }, \ + { DATA, "DATA" }, \ + { META, "META" }, \ + { META_FLUSH, "META_FLUSH" }) + +#define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA)) +#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) + +#define show_bio_type(type) show_bio_base(type), show_bio_extra(type) + +#define show_bio_base(type) \ + __print_symbolic(F2FS_BIO_MASK(type), \ + { READ, "READ" }, \ + { READA, "READAHEAD" }, \ + { READ_SYNC, "READ_SYNC" }, \ + { WRITE, "WRITE" }, \ + { WRITE_SYNC, "WRITE_SYNC" }, \ + { WRITE_FLUSH, "WRITE_FLUSH" }, \ + { WRITE_FUA, "WRITE_FUA" }, \ + { WRITE_FLUSH_FUA, "WRITE_FLUSH_FUA" }) + +#define show_bio_extra(type) \ + __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \ + { REQ_META, "(M)" }, \ + { REQ_PRIO, "(P)" }, \ + { REQ_META | REQ_PRIO, "(MP)" }, \ + { 0, " \b" }) + +#define show_data_type(type) \ + __print_symbolic(type, \ + { CURSEG_HOT_DATA, "Hot DATA" }, \ + { CURSEG_WARM_DATA, "Warm DATA" }, \ + { CURSEG_COLD_DATA, "Cold DATA" }, \ + { CURSEG_HOT_NODE, "Hot NODE" }, \ + { CURSEG_WARM_NODE, "Warm NODE" }, \ + { CURSEG_COLD_NODE, "Cold NODE" }, \ + { NO_CHECK_TYPE, "No TYPE" }) + +#define show_file_type(type) \ + __print_symbolic(type, \ + { 0, "FILE" }, \ + { 1, "DIR" }) + +#define show_gc_type(type) \ + __print_symbolic(type, \ + { FG_GC, "Foreground GC" }, \ + { BG_GC, "Background GC" }) + +#define show_alloc_mode(type) \ + __print_symbolic(type, \ + { LFS, "LFS-mode" }, \ + { SSR, "SSR-mode" }) + +#define show_victim_policy(type) \ + __print_symbolic(type, \ + { GC_GREEDY, "Greedy" }, \ + { GC_CB, "Cost-Benefit" }) + +struct victim_sel_policy; + +DECLARE_EVENT_CLASS(f2fs__inode, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(ino_t, pino) + __field(umode_t, mode) + __field(loff_t, size) + __field(unsigned int, nlink) + __field(blkcnt_t, blocks) + __field(__u8, advise) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pino = F2FS_I(inode)->i_pino; + __entry->mode = inode->i_mode; + __entry->nlink = inode->i_nlink; + __entry->size = inode->i_size; + __entry->blocks = inode->i_blocks; + __entry->advise = F2FS_I(inode)->i_advise; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pino = %lu, i_mode = 0x%hx, " + "i_size = %lld, i_nlink = %u, i_blocks = %llu, i_advise = 0x%x", + show_dev_ino(__entry), + (unsigned long)__entry->pino, + __entry->mode, + __entry->size, + (unsigned int)__entry->nlink, + (unsigned long long)__entry->blocks, + (unsigned char)__entry->advise) +); + +DECLARE_EVENT_CLASS(f2fs__inode_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, ret = %d", + show_dev_ino(__entry), + __entry->ret) +); + +DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +TRACE_EVENT(f2fs_sync_file_exit, + + TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret), + + TP_ARGS(inode, need_cp, datasync, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(bool, need_cp) + __field(int, datasync) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->need_cp = need_cp; + __entry->datasync = datasync; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, checkpoint is %s, " + "datasync = %d, ret = %d", + show_dev_ino(__entry), + __entry->need_cp ? "needed" : "not needed", + __entry->datasync, + __entry->ret) +); + +TRACE_EVENT(f2fs_sync_fs, + + TP_PROTO(struct super_block *sb, int wait), + + TP_ARGS(sb, wait), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, dirty) + __field(int, wait) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->dirty = F2FS_SB(sb)->s_dirty; + __entry->wait = wait; + ), + + TP_printk("dev = (%d,%d), superblock is %s, wait = %d", + show_dev(__entry), + __entry->dirty ? "dirty" : "not dirty", + __entry->wait) +); + +DEFINE_EVENT(f2fs__inode, f2fs_iget, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_iget_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__inode, f2fs_evict_inode, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_new_inode, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +TRACE_EVENT(f2fs_unlink_enter, + + TP_PROTO(struct inode *dir, struct dentry *dentry), + + TP_ARGS(dir, dentry), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, size) + __field(blkcnt_t, blocks) + __field(const char *, name) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->ino = dir->i_ino; + __entry->size = dir->i_size; + __entry->blocks = dir->i_blocks; + __entry->name = dentry->d_name.name; + ), + + TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, " + "i_blocks = %llu, name = %s", + show_dev_ino(__entry), + __entry->size, + (unsigned long long)__entry->blocks, + __entry->name) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__inode, f2fs_truncate, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +TRACE_EVENT(f2fs_truncate_data_blocks_range, + + TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs, int free), + + TP_ARGS(inode, nid, ofs, free), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(nid_t, nid) + __field(unsigned int, ofs) + __field(int, free) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nid = nid; + __entry->ofs = ofs; + __entry->free = free; + ), + + TP_printk("dev = (%d,%d), ino = %lu, nid = %u, offset = %u, freed = %d", + show_dev_ino(__entry), + (unsigned int)__entry->nid, + __entry->ofs, + __entry->free) +); + +DECLARE_EVENT_CLASS(f2fs__truncate_op, + + TP_PROTO(struct inode *inode, u64 from), + + TP_ARGS(inode, from), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(loff_t, size) + __field(blkcnt_t, blocks) + __field(u64, from) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->size = inode->i_size; + __entry->blocks = inode->i_blocks; + __entry->from = from; + ), + + TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld, i_blocks = %llu, " + "start file offset = %llu", + show_dev_ino(__entry), + __entry->size, + (unsigned long long)__entry->blocks, + (unsigned long long)__entry->from) +); + +DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_blocks_enter, + + TP_PROTO(struct inode *inode, u64 from), + + TP_ARGS(inode, from) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_blocks_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_inode_blocks_enter, + + TP_PROTO(struct inode *inode, u64 from), + + TP_ARGS(inode, from) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_inode_blocks_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DECLARE_EVENT_CLASS(f2fs__truncate_node, + + TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), + + TP_ARGS(inode, nid, blk_addr), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(nid_t, nid) + __field(block_t, blk_addr) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nid = nid; + __entry->blk_addr = blk_addr; + ), + + TP_printk("dev = (%d,%d), ino = %lu, nid = %u, block_address = 0x%llx", + show_dev_ino(__entry), + (unsigned int)__entry->nid, + (unsigned long long)__entry->blk_addr) +); + +DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_nodes_enter, + + TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), + + TP_ARGS(inode, nid, blk_addr) +); + +DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_nodes_exit, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + +DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_node, + + TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), + + TP_ARGS(inode, nid, blk_addr) +); + +TRACE_EVENT(f2fs_truncate_partial_nodes, + + TP_PROTO(struct inode *inode, nid_t nid[], int depth, int err), + + TP_ARGS(inode, nid, depth, err), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(nid_t, nid[3]) + __field(int, depth) + __field(int, err) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->nid[0] = nid[0]; + __entry->nid[1] = nid[1]; + __entry->nid[2] = nid[2]; + __entry->depth = depth; + __entry->err = err; + ), + + TP_printk("dev = (%d,%d), ino = %lu, " + "nid[0] = %u, nid[1] = %u, nid[2] = %u, depth = %d, err = %d", + show_dev_ino(__entry), + (unsigned int)__entry->nid[0], + (unsigned int)__entry->nid[1], + (unsigned int)__entry->nid[2], + __entry->depth, + __entry->err) +); + +TRACE_EVENT_CONDITION(f2fs_submit_page_bio, + + TP_PROTO(struct page *page, sector_t blkaddr, int type), + + TP_ARGS(page, blkaddr, type), + + TP_CONDITION(page->mapping), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(pgoff_t, index) + __field(sector_t, blkaddr) + __field(int, type) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->ino = page->mapping->host->i_ino; + __entry->index = page->index; + __entry->blkaddr = blkaddr; + __entry->type = type; + ), + + TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " + "blkaddr = 0x%llx, bio_type = %s%s", + show_dev_ino(__entry), + (unsigned long)__entry->index, + (unsigned long long)__entry->blkaddr, + show_bio_type(__entry->type)) +); + +TRACE_EVENT(f2fs_get_data_block, + TP_PROTO(struct inode *inode, sector_t iblock, + struct buffer_head *bh, int ret), + + TP_ARGS(inode, iblock, bh, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(sector_t, iblock) + __field(sector_t, bh_start) + __field(size_t, bh_size) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->iblock = iblock; + __entry->bh_start = bh->b_blocknr; + __entry->bh_size = bh->b_size; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, " + "start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d", + show_dev_ino(__entry), + (unsigned long long)__entry->iblock, + (unsigned long long)__entry->bh_start, + (unsigned long long)__entry->bh_size, + __entry->ret) +); + +TRACE_EVENT(f2fs_get_victim, + + TP_PROTO(struct super_block *sb, int type, int gc_type, + struct victim_sel_policy *p, unsigned int pre_victim, + unsigned int prefree, unsigned int free), + + TP_ARGS(sb, type, gc_type, p, pre_victim, prefree, free), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, type) + __field(int, gc_type) + __field(int, alloc_mode) + __field(int, gc_mode) + __field(unsigned int, victim) + __field(unsigned int, ofs_unit) + __field(unsigned int, pre_victim) + __field(unsigned int, prefree) + __field(unsigned int, free) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->type = type; + __entry->gc_type = gc_type; + __entry->alloc_mode = p->alloc_mode; + __entry->gc_mode = p->gc_mode; + __entry->victim = p->min_segno; + __entry->ofs_unit = p->ofs_unit; + __entry->pre_victim = pre_victim; + __entry->prefree = prefree; + __entry->free = free; + ), + + TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u " + "ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u", + show_dev(__entry), + show_data_type(__entry->type), + show_gc_type(__entry->gc_type), + show_alloc_mode(__entry->alloc_mode), + show_victim_policy(__entry->gc_mode), + __entry->victim, + __entry->ofs_unit, + (int)__entry->pre_victim, + __entry->prefree, + __entry->free) +); + +TRACE_EVENT(f2fs_fallocate, + + TP_PROTO(struct inode *inode, int mode, + loff_t offset, loff_t len, int ret), + + TP_ARGS(inode, mode, offset, len, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, mode) + __field(loff_t, offset) + __field(loff_t, len) + __field(loff_t, size) + __field(blkcnt_t, blocks) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = mode; + __entry->offset = offset; + __entry->len = len; + __entry->size = inode->i_size; + __entry->blocks = inode->i_blocks; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, mode = %x, offset = %lld, " + "len = %lld, i_size = %lld, i_blocks = %llu, ret = %d", + show_dev_ino(__entry), + __entry->mode, + (unsigned long long)__entry->offset, + (unsigned long long)__entry->len, + (unsigned long long)__entry->size, + (unsigned long long)__entry->blocks, + __entry->ret) +); + +TRACE_EVENT(f2fs_reserve_new_block, + + TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node), + + TP_ARGS(inode, nid, ofs_in_node), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(nid_t, nid) + __field(unsigned int, ofs_in_node) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = nid; + __entry->ofs_in_node = ofs_in_node; + ), + + TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u", + show_dev(__entry), + (unsigned int)__entry->nid, + __entry->ofs_in_node) +); + +DECLARE_EVENT_CLASS(f2fs__submit_bio, + + TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), + + TP_ARGS(sb, rw, type, bio), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, rw) + __field(int, type) + __field(sector_t, sector) + __field(unsigned int, size) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->rw = rw; + __entry->type = type; + __entry->sector = bio->bi_sector; + __entry->size = bio->bi_size; + ), + + TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u", + show_dev(__entry), + show_bio_type(__entry->rw), + show_block_type(__entry->type), + (unsigned long long)__entry->sector, + __entry->size) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio, + + TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), + + TP_ARGS(sb, rw, type, bio), + + TP_CONDITION(bio) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio, + + TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), + + TP_ARGS(sb, rw, type, bio), + + TP_CONDITION(bio) +); + +DECLARE_EVENT_CLASS(f2fs__page, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, type) + __field(int, dir) + __field(pgoff_t, index) + __field(int, dirty) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->ino = page->mapping->host->i_ino; + __entry->type = type; + __entry->dir = S_ISDIR(page->mapping->host->i_mode); + __entry->index = page->index; + __entry->dirty = PageDirty(page); + ), + + TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, dirty = %d", + show_dev_ino(__entry), + show_block_type(__entry->type), + show_file_type(__entry->dir), + (unsigned long)__entry->index, + __entry->dirty) +); + +DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type) +); + +DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite, + + TP_PROTO(struct page *page, int type), + + TP_ARGS(page, type) +); + +TRACE_EVENT(f2fs_submit_page_mbio, + + TP_PROTO(struct page *page, int rw, int type, block_t blk_addr), + + TP_ARGS(page, rw, type, blk_addr), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, rw) + __field(int, type) + __field(pgoff_t, index) + __field(block_t, block) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->ino = page->mapping->host->i_ino; + __entry->rw = rw; + __entry->type = type; + __entry->index = page->index; + __entry->block = blk_addr; + ), + + TP_printk("dev = (%d,%d), ino = %lu, %s%s, %s, index = %lu, blkaddr = 0x%llx", + show_dev_ino(__entry), + show_bio_type(__entry->rw), + show_block_type(__entry->type), + (unsigned long)__entry->index, + (unsigned long long)__entry->block) +); + +TRACE_EVENT(f2fs_write_checkpoint, + + TP_PROTO(struct super_block *sb, bool is_umount, char *msg), + + TP_ARGS(sb, is_umount, msg), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(bool, is_umount) + __field(char *, msg) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->is_umount = is_umount; + __entry->msg = msg; + ), + + TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", + show_dev(__entry), + __entry->is_umount ? "clean umount" : "consistency", + __entry->msg) +); + +TRACE_EVENT(f2fs_issue_discard, + + TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen), + + TP_ARGS(sb, blkstart, blklen), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(block_t, blkstart) + __field(block_t, blklen) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->blkstart = blkstart; + __entry->blklen = blklen; + ), + + TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx", + show_dev(__entry), + (unsigned long long)__entry->blkstart, + (unsigned long long)__entry->blklen) +); +#endif /* _TRACE_F2FS_H */ + + /* This part must be outside protection */ +#include diff --git a/kernel/kexec.c b/kernel/kexec.c index 4e2e472f6aeb3..aef789344c214 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1004,6 +1004,10 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, if (flags & KEXEC_PRESERVE_CONTEXT) image->preserve_context = 1; +#ifdef CONFIG_KEXEC_HARDBOOT + if (flags & KEXEC_HARDBOOT) + image->hardboot = 1; +#endif result = machine_kexec_prepare(image); if (result) goto out; diff --git a/sem-config b/sem-config new file mode 100644 index 0000000000000..bc37e91c9a478 --- /dev/null +++ b/sem-config @@ -0,0 +1,3439 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm 3.4.75 Kernel Configuration +# +CONFIG_ARM=y +CONFIG_ARM_HAS_SG_CHAIN=y +CONFIG_MIGHT_HAVE_PCI=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_KTIME_SCALAR=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_ARM_TICKET_LOCKS=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +# CONFIG_ARM_PATCH_PHYS_VIRT is not set +CONFIG_NEED_MACH_IO_H=y +CONFIG_NEED_MACH_MEMORY_H=y +CONFIG_PHYS_OFFSET=0x80200000 +CONFIG_GENERIC_BUG=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_HAVE_IRQ_WORK=y +CONFIG_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_FHANDLE is not set +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_AUDIT=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y +# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_IRQ_DOMAIN=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set + +# +# RCU Subsystem +# +CONFIG_TREE_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_RCU_FAST_NO_HZ is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_RCU_BOOST is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +# CONFIG_CGROUP_PERF is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +CONFIG_RT_GROUP_SCHED=y +# CONFIG_BLK_CGROUP is not set +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_IPC_NS is not set +# CONFIG_USER_NS is not set +# CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +# CONFIG_RELAY is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_EMBEDDED=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_PERF_COUNTERS is not set +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +CONFIG_COMPAT_BRK=y +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +# CONFIG_JUMP_LABEL is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_DMA_ATTRS=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_USE_GENERIC_SMP_HELPERS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_HW_BRKPT_RESERVED_RW_ACCESS=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_STOP_MACHINE=y +CONFIG_BLOCK=y +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y +# CONFIG_BLK_DEV_BSGLIB is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_TEST is not set +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_SIO=y +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_SIO is not set +CONFIG_DEFAULT_NOOP=y +CONFIG_DEFAULT_IOSCHED="noop" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +CONFIG_UNINLINE_SPIN_UNLOCK=y +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_HIGHBANK is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_PRIMA2 is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PICOXCELL is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C24XX is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_ARCH_VT8500 is not set +# CONFIG_ARCH_ZYNQ is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set + +# +# MSM SoC Type +# +# CONFIG_ARCH_MSM7X01A is not set +# CONFIG_ARCH_MSM7X25 is not set +# CONFIG_ARCH_MSM7X27 is not set +# CONFIG_ARCH_MSM7X30 is not set +# CONFIG_ARCH_QSD8X50 is not set +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM8960=y +# CONFIG_ARCH_MSM8930 is not set +CONFIG_ARCH_APQ8064=y +# CONFIG_ARCH_MSM8974 is not set +# CONFIG_ARCH_MPQ8092 is not set +# CONFIG_ARCH_MSM8226 is not set +# CONFIG_ARCH_FSM9XXX is not set +# CONFIG_ARCH_MSM9615 is not set +# CONFIG_ARCH_MSM8625 is not set +# CONFIG_ARCH_MSM9625 is not set +CONFIG_MSM_SOC_REV_NONE=y +# CONFIG_MSM_SOC_REV_A is not set +CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER=y +CONFIG_ARCH_MSM_KRAIT=y +CONFIG_MSM_SMP=y +CONFIG_ARCH_MSM_KRAITMP=y +CONFIG_MSM_KRAIT_WFE_FIXUP=y +CONFIG_MSM_RPM=y +# CONFIG_MSM_RPM_SMD is not set +CONFIG_MSM_MPM=y +CONFIG_MSM_XO=y +CONFIG_MSM_REMOTE_SPINLOCK_SFPB=y + +# +# MSM Board Selection +# +# CONFIG_MACH_MSM8960_CDP is not set +# CONFIG_MACH_MSM8960_MTP is not set +# CONFIG_MACH_MSM8960_FLUID is not set +# CONFIG_MACH_MSM8960_LIQUID is not set +# CONFIG_MACH_APQ8064_CDP is not set +# CONFIG_MACH_APQ8064_MTP is not set +# CONFIG_MACH_APQ8064_LIQUID is not set +# CONFIG_MACH_MPQ8064_CDP is not set +# CONFIG_MACH_MPQ8064_HRD is not set +# CONFIG_MACH_MPQ8064_DTV is not set +CONFIG_MACH_MSM_DUMMY=y +CONFIG_MACH_LGE=y + +# +# LGE Board Selection +# +CONFIG_BOARD_HEADER_FILE="mach/lge/board_mako.h" +CONFIG_MACH_APQ8064_MAKO=y +# CONFIG_MACH_LGE_DUMMY is not set + +# +# LGE Specific Patches +# +CONFIG_LGE_QFPROM_INTERFACE=y +CONFIG_UPDATE_LCDC_LUT=y +CONFIG_LCD_KCAL=y +CONFIG_EARJACK_DEBUGGER=y +CONFIG_LGE_CRASH_HANDLER=y +# CONFIG_MSM_STACKED_MEMORY is not set +CONFIG_KERNEL_MSM_CONTIG_MEM_REGION=y +CONFIG_MSM_AMSS_VERSION=6225 +# CONFIG_MSM_AMSS_VERSION_6210 is not set +# CONFIG_MSM_AMSS_VERSION_6220 is not set +CONFIG_MSM_AMSS_VERSION_6225=y +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=20000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET=1000000000 +CONFIG_CPU_HAS_L2_PMU=y +# CONFIG_HTC_HEADSET is not set +# CONFIG_HTC_PWRSINK is not set +# CONFIG_MSM_FIQ_SUPPORT is not set +# CONFIG_MSM_SERIAL_DEBUGGER is not set +# CONFIG_MSM_PROC_COMM is not set +CONFIG_MSM_SMD=y +# CONFIG_MSM_SMD_PKG3 is not set +CONFIG_MSM_SMD_PKG4=y +# CONFIG_MSM_SMD_DEBUG is not set +CONFIG_MSM_BAM_DMUX=y +CONFIG_MSM_N_WAY_SMD=y +CONFIG_MSM_N_WAY_SMSM=y +# CONFIG_MSM_RESET_MODEM is not set +CONFIG_MSM_SMD_LOGGING=y +# CONFIG_MSM_IPC_LOGGING is not set +CONFIG_MSM_SMD_NMEA=y +# CONFIG_MSM_HSIC_TTY is not set +CONFIG_MSM_SMD_TTY=y +CONFIG_MSM_SMD_QMI=y +CONFIG_MSM_SMD_PKT=y +CONFIG_MSM_DSPS=y +# CONFIG_MSM_ONCRPCROUTER is not set +CONFIG_MSM_IPC_ROUTER=y +CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y +# CONFIG_MSM_DALRPC is not set +# CONFIG_MSM_CPU_FREQ_SET_MIN_MAX is not set +# CONFIG_MSM_AVS_HW is not set +# CONFIG_MSM_HW3D is not set +CONFIG_AMSS_7X25_VERSION_2009=y +# CONFIG_AMSS_7X25_VERSION_2008 is not set +CONFIG_RTAC=y +# CONFIG_MSM_VREG_SWITCH_INVERTED is not set +# CONFIG_MSM_DMA_TEST is not set +# CONFIG_WIFI_CONTROL_FUNC is not set +CONFIG_MSM_SLEEP_TIME_OVERRIDE=y +# CONFIG_MSM_MEMORY_LOW_POWER_MODE is not set +CONFIG_MSM_PM_TIMEOUT_HALT=y +# CONFIG_MSM_PM_TIMEOUT_RESET_MODEM is not set +# CONFIG_MSM_PM_TIMEOUT_RESET_CHIP is not set +CONFIG_MSM_IDLE_WAIT_ON_MODEM=0 +CONFIG_MSM_RPM_REGULATOR=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +# CONFIG_MSM_SYSMON_COMM is not set +CONFIG_MSM_PIL=y +# CONFIG_MSM_PIL_MODEM is not set +# CONFIG_MSM_PIL_QDSP6V3 is not set +CONFIG_MSM_PIL_QDSP6V4=y +# CONFIG_MSM_PIL_LPASS_QDSP6V5 is not set +# CONFIG_MSM_PIL_MSS_QDSP6V5 is not set +CONFIG_MSM_PIL_RIVA=y +CONFIG_MSM_PIL_TZAPPS=y +CONFIG_MSM_PIL_DSPS=y +CONFIG_MSM_PIL_VIDC=y +# CONFIG_MSM_PIL_VENUS is not set +CONFIG_MSM_PIL_GSS=y +# CONFIG_MSM_PIL_PRONTO is not set +CONFIG_MSM_SCM=y +CONFIG_MSM_MODEM_8960=y +CONFIG_MSM_LPASS_8960=y +CONFIG_MSM_WCNSS_SSR_8960=y +CONFIG_MSM_GSS_SSR_8064=y +# CONFIG_MSM_BUSPM_DEV is not set +CONFIG_MSM_TZ_LOG=y +CONFIG_MSM_RPM_LOG=y +CONFIG_MSM_RPM_STATS_LOG=y +# CONFIG_MSM_RPM_RBCPR_STATS_LOG is not set +CONFIG_MSM_DIRECT_SCLK_ACCESS=y +CONFIG_IOMMU_API=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_NATIVE_RESTART=y +CONFIG_MSM_PM8X60=y +# CONFIG_MSM_EVENT_TIMER is not set +CONFIG_MSM_BUS_SCALING=y +CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED=y +CONFIG_MSM_WATCHDOG=y +# CONFIG_MSM_WATCHDOG_V2 is not set +# CONFIG_MSM_MEMORY_DUMP is not set +CONFIG_MSM_DLOAD_MODE=y +# CONFIG_MSM_JTAG is not set +# CONFIG_MSM_JTAG_MM is not set +# CONFIG_MSM_SLEEP_STATS_DEVICE is not set +# CONFIG_MSM_RUN_QUEUE_STATS is not set +# CONFIG_MSM_STANDALONE_POWER_COLLAPSE is not set +# CONFIG_MSM_GSBI9_UART is not set +CONFIG_MSM_SHOW_RESUME_IRQ=y +# CONFIG_MSM_FAKE_BATTERY is not set +CONFIG_MSM_QDSP6_APR=y +# CONFIG_MSM_QDSP6_APRV2 is not set +CONFIG_MSM_QDSP6_CODECS=y +# CONFIG_MSM_QDSP6V2_CODECS is not set +CONFIG_MSM_AUDIO_QDSP6=y +# CONFIG_MSM_AUDIO_QDSP6V2 is not set +CONFIG_MSM_ADSP_LOADER=y +CONFIG_MSM_ULTRASOUND=y +# CONFIG_MSM_SPM_V1 is not set +CONFIG_MSM_SPM_V2=y +CONFIG_MSM_L2_SPM=y +CONFIG_MSM_MULTIMEDIA_USE_ION=y +# CONFIG_MSM_OCMEM is not set +# CONFIG_MSM_RTB is not set +# CONFIG_MSM_EBI_ERP is not set +CONFIG_MSM_CACHE_ERP=y +CONFIG_MSM_L1_ERR_PANIC=y +# CONFIG_MSM_L1_ERR_LOG is not set +# CONFIG_MSM_L2_ERP_PRINT_ACCESS_ERRORS is not set +# CONFIG_MSM_L2_ERP_1BIT_PANIC is not set +CONFIG_MSM_L2_ERP_2BIT_PANIC=y +CONFIG_MSM_DCVS=y +# CONFIG_MSM_CPR is not set +CONFIG_HAVE_ARCH_HAS_CURRENT_TIMER=y +CONFIG_MSM_CACHE_DUMP=y +CONFIG_MSM_CACHE_DUMP_ON_PANIC=y +CONFIG_MSM_HSIC_SYSMON=y +# CONFIG_MSM_HSIC_SYSMON_TEST is not set +CONFIG_MSM_CPU_PWRCTL=y + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_V7=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +# CONFIG_ARM_LPAE is not set +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set +CONFIG_ARM_THUMB=y +# CONFIG_ARM_THUMBEE is not set +CONFIG_SWP_EMULATE=y +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +# CONFIG_CACHE_L2X0 is not set +CONFIG_ARM_L1_CACHE_SHIFT_6=y +CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_STRICT_MEMORY_RWX=y +CONFIG_ARM_NR_BANKS=8 +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +CONFIG_MULTI_IRQ_HANDLER=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_742230 is not set +# CONFIG_ARM_ERRATA_742231 is not set +# CONFIG_ARM_ERRATA_720789 is not set +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_ARM_ERRATA_751472 is not set +# CONFIG_ARM_ERRATA_754322 is not set +# CONFIG_ARM_ERRATA_754327 is not set +# CONFIG_ARM_ERRATA_764369 is not set +# CONFIG_KSAPI is not set +# CONFIG_ARM_ERRATA_775420 is not set +CONFIG_ARM_GIC=y +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI is not set +# CONFIG_PCI_SYSCALL is not set +CONFIG_ARCH_SUPPORTS_MSI=y +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_HAVE_SMP=y +CONFIG_SMP=y +# CONFIG_SMP_ON_UP is not set +CONFIG_ARM_CPU_TOPOLOGY=y +# CONFIG_SCHED_MC is not set +# CONFIG_SCHED_SMT is not set +CONFIG_HAVE_ARM_SCU=y +# CONFIG_ARM_ARCH_TIMER is not set +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_NR_CPUS=4 +CONFIG_HOTPLUG_CPU=y +CONFIG_LOCAL_TIMERS=y +CONFIG_ARCH_NR_GPIO=0 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +CONFIG_OABI_COMPAT=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HIGHMEM=y +# CONFIG_HIGHPTE is not set +CONFIG_HW_PERF_EVENTS=y +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_CLEANCACHE=y +# CONFIG_ARCH_MEMORY_PROBE is not set +# CONFIG_ARCH_MEMORY_REMOVE is not set +# CONFIG_ENABLE_DMM is not set +# CONFIG_FIX_MOVABLE_ZONE is not set +CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0=y +# CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG is not set +# CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE is not set +CONFIG_HOLES_IN_ZONE=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +CONFIG_CC_STACKPROTECTOR=y +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART is not set +CONFIG_CP_ACCESS=y + +# +# Boot options +# +# CONFIG_USE_OF is not set +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZBOOT_ROM_BSS=0 +CONFIG_CMDLINE="" +# CONFIG_XIP_KERNEL is not set +CONFIG_KEXEC=y +CONFIG_ATAGS_PROC=y +CONFIG_KEXEC_HARDBOOT=y +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +# CONFIG_CPU_FREQ_STAT_DETAILS is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y + +# +# ARM CPU frequency scaling drivers +# +# CONFIG_ARM_EXYNOS4210_CPUFREQ is not set +# CONFIG_ARM_EXYNOS4X12_CPUFREQ is not set +# CONFIG_ARM_EXYNOS5250_CPUFREQ is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +# CONFIG_FPE_NWFPE is not set +# CONFIG_FPE_FASTFPE is not set +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +# CONFIG_CONSOLE_EARLYSUSPEND is not set +CONFIG_FB_EARLYSUSPEND=y +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +CONFIG_PM_RUNTIME=y +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_APM_EMULATION is not set +CONFIG_PM_CLK=y +CONFIG_CPU_PM=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARM_CPU_SUSPEND=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +CONFIG_IP_ADVANCED_ROUTER=y +# CONFIG_IP_FIB_TRIE_STATS is not set +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_XFRM_TUNNEL=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=y +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +# CONFIG_IPV6_TUNNEL is not set +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETLABEL is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +CONFIG_NETWORK_SECMARK=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +# CONFIG_NETFILTER_NETLINK_ACCT is not set +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +# CONFIG_NF_CONNTRACK_TIMESTAMP is not set +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_BROADCAST=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +# CONFIG_NF_CONNTRACK_SNMP is not set +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +# CONFIG_NF_CONNTRACK_SIP is not set +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +# CONFIG_NF_CT_NETLINK_TIMEOUT is not set +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +# CONFIG_NETFILTER_XT_TARGET_HL is not set +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +# CONFIG_NETFILTER_XT_TARGET_LED is not set +# CONFIG_NETFILTER_XT_TARGET_LOG is not set +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +CONFIG_NETFILTER_XT_MATCH_ECN=y +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_SET is not set +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +# CONFIG_IP_NF_MATCH_RPFILTER is not set +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_REJECT_SKERR=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +# CONFIG_NF_NAT_SIP is not set +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +# CONFIG_IP6_NF_MATCH_AH is not set +# CONFIG_IP6_NF_MATCH_EUI64 is not set +# CONFIG_IP6_NF_MATCH_FRAG is not set +# CONFIG_IP6_NF_MATCH_OPTS is not set +# CONFIG_IP6_NF_MATCH_HL is not set +# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set +# CONFIG_IP6_NF_MATCH_MH is not set +# CONFIG_IP6_NF_MATCH_RPFILTER is not set +# CONFIG_IP6_NF_MATCH_RT is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_TARGET_REJECT_SKERR=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_IP6_NF_SECURITY is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_L2TP=y +# CONFIG_L2TP_DEBUGFS is not set +# CONFIG_L2TP_V3 is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +CONFIG_NET_SCH_PRIO=y +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFB is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +# CONFIG_NET_SCH_MQPRIO is not set +# CONFIG_NET_SCH_CHOKE is not set +# CONFIG_NET_SCH_QFQ is not set +# CONFIG_NET_SCH_INGRESS is not set +# CONFIG_NET_SCH_PLUG is not set + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +CONFIG_CLS_U32_MARK=y +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +CONFIG_NET_CLS_FLOW=y +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +# CONFIG_NET_ACT_POLICE is not set +# CONFIG_NET_ACT_GACT is not set +# CONFIG_NET_ACT_MIRRED is not set +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_NETPRIO_CGROUP is not set +CONFIG_BQL=y +CONFIG_HAVE_BPF_JIT=y +# CONFIG_BPF_JIT is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCISMD is not set +# CONFIG_BT_HCIBTUSB is not set +# CONFIG_BT_HCIBTSDIO is not set +# CONFIG_BT_HCIUART is not set +# CONFIG_BT_HCIBCM203X is not set +# CONFIG_BT_HCIBPA10X is not set +# CONFIG_BT_MSM_SLEEP is not set +# CONFIG_BT_HCIBFUSB is not set +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_MSM_BT_POWER is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WEXT_PRIV=y +CONFIG_CFG80211=y +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_WEXT is not set +# CONFIG_WIRELESS_EXT_SYSFS is not set +# CONFIG_LIB80211 is not set +# CONFIG_CFG80211_ALLOW_RECONNECT is not set +# CONFIG_MAC80211 is not set +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +CONFIG_RFKILL_PM=y +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_RFKILL_REGULATOR is not set +# CONFIG_RFKILL_GPIO is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +CONFIG_BCM2079X=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_DMA_SHARED_BUFFER=y +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_SW_SYNC_USER is not set +# CONFIG_CMA is not set +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_MTD is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_UB is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ATMEL_PWM is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_TSIF is not set +# CONFIG_TSPP is not set +# CONFIG_HAPTIC_ISA1200 is not set +# CONFIG_PMIC8XXX_VIBRATOR is not set +CONFIG_ANDROID_VIBRATOR=y +# CONFIG_TOUCHSENSE_VIBRATOR is not set +# CONFIG_PMIC8XXX_NFC is not set +# CONFIG_PMIC8XXX_UPL is not set +CONFIG_QSEECOM=y +# CONFIG_QFP_FUSE is not set +CONFIG_USB_HSIC_SMSC_HUB=y +# CONFIG_BU52031NVX is not set +CONFIG_TOUCH_WAKE=y +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set +CONFIG_SLIMPORT_ANX7808=y + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_TGT=y +# CONFIG_SCSI_NETLINK is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_WAIT_SCAN=y + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_TCP is not set +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_LIBFC is not set +# CONFIG_LIBFCOE is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_THIN_PROVISIONING is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_TARGET_CORE is not set +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +CONFIG_DUMMY=y +# CONFIG_EQUALIZER is not set +CONFIG_MII=y +# CONFIG_IFB is not set +# CONFIG_NET_TEAM is not set +# CONFIG_MACVLAN is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set + +# +# CAIF transport drivers +# +CONFIG_ETHERNET=y +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_CALXEDA_XGMAC is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_DM9000 is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_FARADAY is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_MSM_RMNET is not set +CONFIG_MSM_RMNET_BAM=y +CONFIG_MSM_RMNET_SMUX=y +# CONFIG_QFEC is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_ETHOC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_PHYLIB is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_SLIP=y +CONFIG_SLHC=y +CONFIG_SLIP_COMPRESSED=y +# CONFIG_SLIP_SMART is not set +CONFIG_SLIP_MODE_SLIP6=y + +# +# USB Network Adapters +# +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +CONFIG_USB_USBNET=y +CONFIG_USB_NET_AX8817X=y +CONFIG_USB_NET_CDCETHER=y +# CONFIG_USB_NET_CDC_EEM is not set +CONFIG_USB_NET_CDC_NCM=y +# CONFIG_USB_NET_DM9601 is not set +# CONFIG_USB_NET_SMSC75XX is not set +# CONFIG_USB_NET_SMSC95XX is not set +# CONFIG_USB_NET_GL620A is not set +CONFIG_USB_NET_NET1080=y +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_MCS7830 is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +CONFIG_USB_NET_CDC_SUBSET=y +# CONFIG_USB_ALI_M5632 is not set +# CONFIG_USB_AN2720 is not set +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +# CONFIG_USB_EPSON2888 is not set +# CONFIG_USB_KC2190 is not set +CONFIG_USB_NET_ZAURUS=y +# CONFIG_USB_NET_CX82310_ETH is not set +# CONFIG_USB_NET_KALMIA is not set +# CONFIG_USB_NET_QMI_WWAN is not set +# CONFIG_USB_HSO is not set +# CONFIG_USB_NET_INT51X1 is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_USB_SIERRA_NET is not set +# CONFIG_USB_VL600 is not set +CONFIG_MSM_RMNET_USB=y +CONFIG_WLAN=y +# CONFIG_USB_ZD1201 is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set +# CONFIG_LIBRA_SDIOIF is not set +# CONFIG_ATH6K_LEGACY_EXT is not set +CONFIG_WCNSS_CORE=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCMDHD is not set +# CONFIG_BRCMFMAC is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set +# CONFIG_MWIFIEX is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +CONFIG_KEYBOARD_PMIC8XXX=y +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_QCIKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_RMI4_I2C is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_MSM_LEGACY is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_CY8C_TS is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C_QC is not set +# CONFIG_TOUCHSCREEN_FT5X06 is not set +CONFIG_TOUCHSCREEN_LGE_COMMON=y +CONFIG_TOUCHSCREEN_LGE_SYNAPTICS=y +CONFIG_TOUCH_REG_MAP_TM2000=y +CONFIG_TOUCHSCREEN_CHARGER_NOTIFY=y +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_PM8XXX_VIBRATOR is not set +CONFIG_INPUT_PMIC8XXX_PWRKEY=y +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_MPU3050 is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_TILT_POLLED is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +# CONFIG_INPUT_GPIO is not set +# CONFIG_INPUT_ISA1200_FF_MEMLESS is not set +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_BOSCH_BMA150 is not set +# CONFIG_STM_LIS3DH is not set +# CONFIG_BMP18X is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set +CONFIG_N_SMUX=y +CONFIG_N_SMUX_LOOPBACK=y +CONFIG_SMUX_CTL=y +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y +CONFIG_DEVKMEM=y + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_MSM is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_MSM_HSL=y +CONFIG_SERIAL_MSM_HSL_CONSOLE=y +# CONFIG_SERIAL_BCM_BT_LPM is not set +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_MSM_SMD is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set + +# +# Diag Support +# +CONFIG_DIAG_CHAR=y + +# +# DIAG traffic over USB +# +CONFIG_DIAG_OVER_USB=y + +# +# SDIO support for DIAG +# + +# +# HSIC/SMUX support for DIAG +# +CONFIG_DIAGFWD_BRIDGE_CODE=y +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_MSM=y +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_MSM_ROTATOR=y +CONFIG_MSM_ADSPRPC=y +# CONFIG_MMC_GENERIC_CSDIO is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_MSM is not set +CONFIG_I2C_QUP=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPMI is not set +CONFIG_SLIMBUS=y +CONFIG_SLIMBUS_MSM_CTRL=y +# CONFIG_HSI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# + +# +# PTP clock support +# + +# +# Enable Device Drivers -> PPS to see the PTP clock options. +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +CONFIG_DEBUG_GPIO=y +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers: +# +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_MSM_V1 is not set +CONFIG_GPIO_MSM_V2=y +# CONFIG_GPIO_MSM_V3 is not set +# CONFIG_GPIO_FSM9XXX is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_GPIO_PM8XXX=y +CONFIG_GPIO_PM8XXX_MPP=y +# CONFIG_GPIO_PM8XXX_RPC is not set +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_ISP1704 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_BATTERY_MSM is not set +# CONFIG_BATTERY_MSM8X60 is not set +CONFIG_ISL9519_CHARGER=y +# CONFIG_SMB137B_CHARGER is not set +# CONFIG_SMB349_CHARGER is not set +# CONFIG_BATTERY_BQ27520 is not set +# CONFIG_BATTERY_BQ27541 is not set +CONFIG_PM8921_CHARGER=y +CONFIG_PM8XXX_CCADC=y +# CONFIG_LTC4088_CHARGER is not set +CONFIG_PM8921_BMS=y +# CONFIG_CHARGER_SMB347 is not set +CONFIG_WIRELESS_CHARGER=y +CONFIG_BQ51051B_CHARGER=y +CONFIG_BATTERY_TEMP_CONTROL=y +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +CONFIG_SENSORS_PM8XXX_ADC=y +# CONFIG_SENSORS_EPM_ADC is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_THERMAL=y +CONFIG_THERMAL_HWMON=y +# CONFIG_THERMAL_MSM_POPMEM is not set +# CONFIG_THERMAL_TSENS is not set +CONFIG_THERMAL_TSENS8960=y +# CONFIG_THERMAL_TSENS8974 is not set +CONFIG_THERMAL_PM8XXX=y +CONFIG_THERMAL_MONITOR=y +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y + +# +# Broadcom specific AMBA +# +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_PMIC8058 is not set +# CONFIG_PMIC8901 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_S5M_CORE is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13XXX is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_MFD_PM8XXX=y +CONFIG_MFD_PM8921_CORE=y +CONFIG_MFD_PM8821_CORE=y +# CONFIG_MFD_PM8018_CORE is not set +CONFIG_MFD_PM8038_CORE=y +CONFIG_MFD_PM8XXX_IRQ=y +CONFIG_MFD_PM8821_IRQ=y +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_AAT2870_CORE is not set +CONFIG_MFD_PM8XXX_DEBUG=y +CONFIG_MFD_PM8XXX_PWM=y +CONFIG_MFD_PM8XXX_MISC=y +CONFIG_MFD_PM8XXX_SPK=y +CONFIG_MFD_PM8XXX_BATT_ALARM=y +CONFIG_WCD9304_CODEC=y +CONFIG_WCD9310_CODEC=y +# CONFIG_WCD9320_CODEC is not set +# CONFIG_MFD_RC5T583 is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_REGULATOR_PM8XXX=y +CONFIG_REGULATOR_MSM_GPIO=y +# CONFIG_REGULATOR_STUB is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_DEV=y +CONFIG_VIDEO_V4L2_COMMON=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_DVB_CORE is not set +CONFIG_VIDEO_MEDIA=y + +# +# Multimedia drivers +# +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_ATTACH is not set +CONFIG_MEDIA_TUNER=y +# CONFIG_MEDIA_TUNER_CUSTOMISE is not set +CONFIG_MEDIA_TUNER_SIMPLE=y +CONFIG_MEDIA_TUNER_TDA8290=y +CONFIG_MEDIA_TUNER_TDA827X=y +CONFIG_MEDIA_TUNER_TDA18271=y +CONFIG_MEDIA_TUNER_TDA9887=y +CONFIG_MEDIA_TUNER_TEA5761=y +CONFIG_MEDIA_TUNER_TEA5767=y +CONFIG_MEDIA_TUNER_MT20XX=y +CONFIG_MEDIA_TUNER_XC2028=y +CONFIG_MEDIA_TUNER_XC5000=y +CONFIG_MEDIA_TUNER_XC4000=y +CONFIG_MEDIA_TUNER_MC44S803=y +CONFIG_VIDEO_V4L2=y +CONFIG_VIDEOBUF2_CORE=y +CONFIG_VIDEOBUF2_MEMOPS=y +CONFIG_VIDEOBUF2_DMA_CONTIG=y +CONFIG_VIDEOBUF2_VMALLOC=y +CONFIG_VIDEOBUF2_DMA_SG=y +CONFIG_VIDEOBUF2_MSM_MEM=y +CONFIG_VIDEO_CAPTURE_DRIVERS=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_HELPER_CHIPS_AUTO=y + +# +# Audio decoders, processors and mixers +# + +# +# RDS decoders +# + +# +# Video decoders +# + +# +# Video and audio decoders +# + +# +# MPEG video encoders +# + +# +# Video encoders +# + +# +# Camera sensor devices +# + +# +# Flash devices +# + +# +# Video improvement chips +# + +# +# Miscelaneous helper chips +# +# CONFIG_MSM_VCAP is not set +CONFIG_V4L_USB_DRIVERS=y +CONFIG_USB_VIDEO_CLASS=y +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +# CONFIG_USB_GSPCA is not set +# CONFIG_VIDEO_PVRUSB2 is not set +# CONFIG_VIDEO_HDPVR is not set +# CONFIG_VIDEO_EM28XX is not set +# CONFIG_VIDEO_USBVISION is not set +# CONFIG_USB_ET61X251 is not set +# CONFIG_USB_SN9C102 is not set +# CONFIG_USB_PWC is not set +# CONFIG_VIDEO_CPIA2 is not set +# CONFIG_USB_ZR364XX is not set +# CONFIG_USB_STKWEBCAM is not set +# CONFIG_USB_S2255 is not set +CONFIG_V4L_PLATFORM_DRIVERS=y +# CONFIG_SOC_CAMERA is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set +CONFIG_MSM_CAMERA_V4L2=y + +# +# Camera Sensor Selection +# +# CONFIG_IMX074 is not set +# CONFIG_OV5647 is not set +# CONFIG_MT9M114 is not set +CONFIG_IMX111=y +# CONFIG_IMX111_ACT is not set +CONFIG_SEKONIX_LENS_ACT=y +# CONFIG_IMX091 is not set +# CONFIG_IMX091_ACT is not set +CONFIG_IMX119=y +CONFIG_MSM_CAMERA_FLASH_LM3559=y +# CONFIG_IMX074_ACT is not set +# CONFIG_S5K4E1 is not set +# CONFIG_MSM_CAMERA_FLASH_SC628A is not set +# CONFIG_MSM_CAMERA_FLASH_TPS61310 is not set +# CONFIG_IMX072 is not set +# CONFIG_OV2720 is not set +# CONFIG_OV8825 is not set +CONFIG_MSM_CAMERA_FLASH=y +CONFIG_MSM_CAMERA_SENSOR=y +CONFIG_MSM_ACTUATOR=y +CONFIG_MSM_EEPROM=y +# CONFIG_IMX074_EEPROM is not set +# CONFIG_IMX091_EEPROM is not set +CONFIG_MSM_GEMINI=y +# CONFIG_MSM_MERCURY is not set +# CONFIG_MSM_CAM_IRQ_ROUTER is not set +# CONFIG_MSM_CPP is not set +# CONFIG_MSM_CCI is not set +# CONFIG_QUP_EXCLUSIVE_TO_CAMERA is not set +CONFIG_MSM_CSI20_HEADER=y +# CONFIG_MSM_CSI30_HEADER is not set +# CONFIG_MSM_CSIPHY is not set +# CONFIG_MSM_CSID is not set +# CONFIG_MSM_CSI2_REGISTER is not set +# CONFIG_MSM_ISPIF is not set +# CONFIG_S5K3L1YX is not set +# CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE is not set +# CONFIG_OV7692 is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +# CONFIG_MSM_WFD is not set +# CONFIG_RADIO_ADAPTERS is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +CONFIG_MSM_KGSL_2D=y +CONFIG_KGSL_PER_PROCESS_PAGE_TABLE=y +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_PAGE_TABLE_COUNT=24 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_UVESA is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_TMIO is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +CONFIG_FB_VIRTUAL=y +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_MSM_VIDC=y +CONFIG_MSM_VIDC_1080P=y +CONFIG_MSM_VIDC_VENC=y +CONFIG_MSM_VIDC_VDEC=y +# CONFIG_MSM_VIDC_CONTENT_PROTECTION is not set +CONFIG_FB_MSM=y +# CONFIG_FB_MSM_BACKLIGHT is not set +# CONFIG_FB_MSM_LOGO is not set +CONFIG_FB_MSM_LCDC_HW=y +CONFIG_FB_MSM_TRIPLE_BUFFER=y +CONFIG_FB_MSM_MDP_HW=y +# CONFIG_FB_MSM_MDP22 is not set +# CONFIG_FB_MSM_MDP30 is not set +# CONFIG_FB_MSM_MDP31 is not set +CONFIG_FB_MSM_MDP40=y +# CONFIG_FB_MSM_MDSS is not set +# CONFIG_FB_MSM_MDP_NONE is not set +# CONFIG_FB_MSM_EBI2 is not set +# CONFIG_FB_MSM_MDDI is not set +CONFIG_FB_MSM_MIPI_DSI=y +# CONFIG_FB_MSM_LCDC is not set +# CONFIG_FB_MSM_LVDS is not set +CONFIG_FB_MSM_OVERLAY=y +CONFIG_FB_MSM_DTV=y +# CONFIG_FB_MSM_EXTMDDI is not set +# CONFIG_FB_MSM_TVOUT is not set +# CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON is not set +# CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON_VGA is not set +# CONFIG_FB_MSM_MDDI_ORISE is not set +# CONFIG_FB_MSM_MDDI_QUICKVX is not set +# CONFIG_FB_MSM_MDDI_AUTO_DETECT is not set +# CONFIG_FB_MSM_LCDC_AUTO_DETECT is not set +# CONFIG_FB_MSM_LCDC_PANEL is not set +# CONFIG_FB_MSM_MIPI_DSI_TOSHIBA is not set +CONFIG_FB_MSM_MIPI_DSI_LGIT=y +# CONFIG_FB_MSM_MIPI_DSI_RENESAS is not set +# CONFIG_FB_MSM_MIPI_DSI_SIMULATOR is not set +# CONFIG_FB_MSM_MIPI_DSI_NOVATEK is not set +# CONFIG_FB_MSM_MIPI_DSI_ORISE is not set +# CONFIG_FB_MSM_LCDC_ST15_WXGA is not set +# CONFIG_FB_MSM_LCDC_ST15_PANEL is not set +# CONFIG_FB_MSM_LCDC_PRISM_WVGA is not set +# CONFIG_FB_MSM_LCDC_SAMSUNG_WSVGA is not set +# CONFIG_FB_MSM_LCDC_CHIMEI_WXGA is not set +# CONFIG_FB_MSM_LCDC_GORDON_VGA is not set +# CONFIG_FB_MSM_LCDC_TOSHIBA_WVGA_PT is not set +# CONFIG_FB_MSM_LCDC_TOSHIBA_FWVGA_PT is not set +# CONFIG_FB_MSM_LCDC_SHARP_WVGA_PT is not set +# CONFIG_FB_MSM_LCDC_AUO_WVGA is not set +# CONFIG_FB_MSM_LCDC_TRULY_HVGA_IPS3P2335 is not set +# CONFIG_FB_MSM_LCDC_TRULY_HVGA_IPS3P2335_PT_PANEL is not set +# CONFIG_FB_MSM_LCDC_SAMSUNG_OLED_PT is not set +# CONFIG_FB_MSM_LCDC_NT35582_WVGA is not set +# CONFIG_FB_MSM_LCDC_WXGA is not set +CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT=y +# CONFIG_FB_MSM_LVDS_CHIMEI_WXGA is not set +# CONFIG_FB_MSM_LVDS_FRC_FHD is not set +# CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT is not set +# CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WSVGA_PT is not set +# CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WUXGA is not set +# CONFIG_FB_MSM_MIPI_NOVATEK_VIDEO_QHD_PT is not set +# CONFIG_FB_MSM_MIPI_NOVATEK_CMD_QHD_PT is not set +# CONFIG_FB_MSM_MIPI_ORISE_VIDEO_720P_PT is not set +# CONFIG_FB_MSM_MIPI_ORISE_CMD_720P_PT is not set +# CONFIG_FB_MSM_MIPI_RENESAS_VIDEO_FWVGA_PT is not set +# CONFIG_FB_MSM_MIPI_RENESAS_CMD_FWVGA_PT is not set +# CONFIG_FB_MSM_MIPI_NT35510_VIDEO_WVGA_PT is not set +# CONFIG_FB_MSM_MIPI_NT35510_CMD_WVGA_PT is not set +# CONFIG_FB_MSM_MIPI_NT35516_VIDEO_QHD_PT is not set +# CONFIG_FB_MSM_MIPI_NT35516_CMD_QHD_PT is not set +# CONFIG_FB_MSM_MIPI_CHIMEI_WXGA is not set +# CONFIG_FB_MSM_MIPI_CHIMEI_WUXGA is not set +# CONFIG_FB_MSM_MIPI_SIMULATOR_VIDEO is not set +CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y +CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y +CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y +CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y +# CONFIG_FB_MSM_LCDC_PRISM_WVGA_PANEL is not set +# CONFIG_FB_MSM_LCDC_SAMSUNG_WSVGA_PANEL is not set +# CONFIG_FB_MSM_LCDC_CHIMEI_WXGA_PANEL is not set +# CONFIG_FB_MSM_LCDC_GORDON_VGA_PANEL is not set +# CONFIG_FB_MSM_LCDC_TOSHIBA_WVGA_PT_PANEL is not set +# CONFIG_FB_MSM_LCDC_TOSHIBA_FWVGA_PT_PANEL is not set +# CONFIG_FB_MSM_LCDC_SHARP_WVGA_PT_PANEL is not set +# CONFIG_FB_MSM_LCDC_AUO_WVGA_PANEL is not set +# CONFIG_FB_MSM_LCDC_NT35582_PANEL is not set +# CONFIG_FB_MSM_LCDC_SAMSUNG_OLED_PT_PANEL is not set +# CONFIG_FB_MSM_LVDS_CHIMEI_WXGA_PANEL is not set +# CONFIG_FB_MSM_LVDS_FRC_FHD_PANEL is not set +# CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM is not set +# CONFIG_FB_MSM_MIPI_PANEL_DETECT is not set +# CONFIG_FB_MSM_MDDI_PANEL_AUTO_DETECT is not set +# CONFIG_FB_MSM_LCDC_PANEL_AUTO_DETECT is not set +# CONFIG_FB_MSM_LCDC_MIPI_PANEL_AUTO_DETECT is not set +# CONFIG_FB_MSM_LVDS_MIPI_PANEL_DETECT is not set +# CONFIG_FB_MSM_MDDI_PRISM_WVGA is not set +# CONFIG_FB_MSM_MDDI_TOSHIBA_WVGA_PORTRAIT is not set +# CONFIG_FB_MSM_MDDI_TOSHIBA_VGA is not set +# CONFIG_FB_MSM_MDDI_TOSHIBA_WVGA is not set +# CONFIG_FB_MSM_MDDI_SHARP_QVGA_128x128 is not set +CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT_PANEL=y +# CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WSVGA_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_TOSHIBA_VIDEO_WUXGA_PANEL is not set +# CONFIG_FB_MSM_MIPI_NOVATEK_VIDEO_QHD_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_NOVATEK_CMD_QHD_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_ORISE_VIDEO_720P_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_ORISE_CMD_720P_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_RENESAS_VIDEO_FWVGA_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_RENESAS_CMD_FWVGA_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_CHIMEI_WXGA_PANEL is not set +# CONFIG_FB_MSM_MIPI_CHIMEI_WUXGA_PANEL is not set +# CONFIG_FB_MSM_MIPI_TRULY_VIDEO_WVGA_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_NT35510_VIDEO_WVGA_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_NT35510_CMD_WVGA_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_NT35516_VIDEO_QHD_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_NT35516_CMD_QHD_PT_PANEL is not set +# CONFIG_FB_MSM_MIPI_SIMULATOR_VIDEO_PANEL is not set +# CONFIG_FB_MSM_EBI2_TMD_QVGA_EPSON_QCIF is not set +# CONFIG_FB_MSM_HDMI_AS_PRIMARY is not set +# CONFIG_FB_MSM_PANEL_NONE is not set +CONFIG_LGIT_VIDEO_WXGA_CABC=y +CONFIG_FB_MSM_EXT_INTERFACE_COMMON=y +CONFIG_FB_MSM_HDMI_COMMON=y +CONFIG_FB_MSM_HDMI_3D=y +# CONFIG_FB_MSM_HDMI_ADV7520_PANEL is not set +CONFIG_FB_MSM_HDMI_MSM_PANEL=y +# CONFIG_FB_MSM_HDMI_MSM_PANEL_DVI_SUPPORT is not set +# CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT is not set +# CONFIG_FB_MSM_HDMI_MHL_9244 is not set +# CONFIG_FB_MSM_HDMI_MHL_8334 is not set +# CONFIG_FB_MSM_TVOUT_NTSC_M is not set +# CONFIG_FB_MSM_TVOUT_NTSC_J is not set +# CONFIG_FB_MSM_TVOUT_PAL_BDGHIN is not set +# CONFIG_FB_MSM_TVOUT_PAL_M is not set +# CONFIG_FB_MSM_TVOUT_PAL_N is not set +CONFIG_FB_MSM_TVOUT_NONE=y +# CONFIG_FB_MSM_DEFAULT_DEPTH_RGB565 is not set +# CONFIG_FB_MSM_DEFAULT_DEPTH_ARGB8888 is not set +CONFIG_FB_MSM_DEFAULT_DEPTH_RGBA8888=y +# CONFIG_FB_MSM_EBI2_EPSON_S1D_QVGA_PANEL is not set +# CONFIG_FB_MSM_EBI2_PANEL_DETECT is not set +# CONFIG_EXYNOS_VIDEO is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=y +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LP855X is not set +CONFIG_BACKLIGHT_LM3530=y +# CONFIG_BACKLIGHT_LM3533 is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set +CONFIG_SOUND=y +# CONFIG_SOUND_OSS_CORE is not set +CONFIG_SND=y +CONFIG_SND_TIMER=y +CONFIG_SND_PCM=y +CONFIG_SND_HWDEP=y +CONFIG_SND_RAWMIDI=y +CONFIG_SND_COMPRESS_OFFLOAD=y +CONFIG_SND_JACK=y +# CONFIG_SND_SEQUENCER is not set +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +# CONFIG_SND_HRTIMER is not set +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_ALOOP is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +# CONFIG_SND_ARM is not set +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=y +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_6FIRE is not set +CONFIG_SND_SOC=y + +# +# MSM SoC Audio support +# +CONFIG_SND_SOC_MSM_HOSTLESS_PCM=y +CONFIG_SND_SOC_MSM_QDSP6_HDMI_AUDIO=y +CONFIG_SND_SOC_MSM_QDSP6_INTF=y +# CONFIG_SND_SOC_MSM_QDSP6V2_INTF is not set +CONFIG_SND_SOC_VOICE=y +CONFIG_SND_SOC_QDSP6=y +# CONFIG_SND_SOC_QDSP6V2 is not set +CONFIG_SND_SOC_MSM8960=y +CONFIG_SND_SOC_DUAL_AMIC=y +CONFIG_SND_SOC_I2C_AND_SPI=y +# CONFIG_SND_SOC_ALL_CODECS is not set +CONFIG_SND_SOC_WCD9304=y +CONFIG_SND_SOC_WCD9310=y +CONFIG_SND_SOC_MSM_STUB=y +CONFIG_SND_SOC_TPA2028D=y +CONFIG_SOUND_CONTROL_HAX_GPL=y +CONFIG_SOUND_CONTROL_HAX_3_GPL=y +# CONFIG_SOUND_PRIME is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +CONFIG_UHID=y + +# +# USB Input Devices +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +CONFIG_USB_HIDDEV=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_PRODIKEYS is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LOGITECH is not set +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB_ARCH_HAS_XHCI is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +# CONFIG_USB_DEBUG is not set +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +# CONFIG_USB_DEVICEFS is not set +CONFIG_USB_DEVICE_CLASS=y +# CONFIG_USB_DYNAMIC_MINORS is not set +CONFIG_USB_SUSPEND=y +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_MON is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_EHSET=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_MSM=y +CONFIG_USB_EHCI_MSM_HSIC=y +# CONFIG_USB_EHCI_MSM_HOST4 is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1760_HCD is not set +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_PEHCI_HCD is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_RENESAS_USBHS is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=y +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DEBUG=y +# CONFIG_USB_STORAGE_REALTEK is not set +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +# CONFIG_USB_EZUSB is not set +# CONFIG_USB_SERIAL_GENERIC is not set +# CONFIG_USB_SERIAL_AIRCABLE is not set +# CONFIG_USB_SERIAL_ARK3116 is not set +# CONFIG_USB_SERIAL_BELKIN is not set +# CONFIG_USB_SERIAL_CH341 is not set +# CONFIG_USB_SERIAL_WHITEHEAT is not set +# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set +# CONFIG_USB_SERIAL_CP210X is not set +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +# CONFIG_USB_SERIAL_EMPEG is not set +# CONFIG_USB_SERIAL_FTDI_SIO is not set +# CONFIG_USB_SERIAL_FUNSOFT is not set +# CONFIG_USB_SERIAL_VISOR is not set +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +# CONFIG_USB_SERIAL_F81232 is not set +# CONFIG_USB_SERIAL_GARMIN is not set +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_IUU is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +# CONFIG_USB_SERIAL_MCT_U232 is not set +# CONFIG_USB_SERIAL_METRO is not set +# CONFIG_USB_SERIAL_MOS7720 is not set +# CONFIG_USB_SERIAL_MOS7840 is not set +# CONFIG_USB_SERIAL_MOTOROLA is not set +# CONFIG_USB_SERIAL_NAVMAN is not set +# CONFIG_USB_SERIAL_PL2303 is not set +# CONFIG_USB_SERIAL_OTI6858 is not set +# CONFIG_USB_SERIAL_QCAUX is not set +# CONFIG_USB_SERIAL_QUALCOMM is not set +# CONFIG_USB_SERIAL_SPCP8X5 is not set +# CONFIG_USB_SERIAL_HP4X is not set +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_SIEMENS_MPI is not set +# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set +# CONFIG_USB_SERIAL_SYMBOL is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +# CONFIG_USB_SERIAL_XIRCOM is not set +# CONFIG_USB_SERIAL_OPTION is not set +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set +# CONFIG_USB_SERIAL_ZIO is not set +# CONFIG_USB_SERIAL_SSU100 is not set +CONFIG_USB_SERIAL_CSVT=y +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +CONFIG_USB_EHSET_TEST_FIXTURE=y +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +CONFIG_USB_QCOM_DIAG_BRIDGE=y +# CONFIG_USB_QCOM_DIAG_BRIDGE_TEST is not set +CONFIG_USB_QCOM_MDM_BRIDGE=y +CONFIG_USB_QCOM_KS_BRIDGE=y +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +CONFIG_USB_GADGET_DEBUG_FILES=y +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_FUSB300 is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_NET2272 is not set +CONFIG_USB_CI13XXX_MSM=y +# CONFIG_USB_CI13XXX_MSM_HSIC is not set +# CONFIG_USB_DWC3_MSM is not set +# CONFIG_USB_MSM_72K is not set +# CONFIG_USB_DUMMY_HCD is not set +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_GADGET_SUPERSPEED is not set +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_G_ANDROID=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_MSC_PROFILING is not set +CONFIG_MODEM_SUPPORT=y +CONFIG_RMNET_SMD_CTL_CHANNEL="" +CONFIG_RMNET_SMD_DATA_CHANNEL="" +# CONFIG_USB_ANDROID_CDC_ECM is not set + +# +# OTG and related infrastructure +# +CONFIG_USB_OTG_UTILS=y +# CONFIG_USB_OTG_WAKELOCK is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_USB_MSM_OTG_72K is not set +# CONFIG_NOP_USB_XCEIV is not set +CONFIG_USB_MSM_OTG=y +# CONFIG_USB_MSM_ACA is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_UNSAFE_RESUME=y +CONFIG_MMC_CLKGATE=y +# CONFIG_MMC_EMBEDDED_SDIO is not set +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=32 +# CONFIG_MMC_BLOCK_BOUNCE is not set +# CONFIG_MMC_BLOCK_DEFERRED_RESUME is not set +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_SDHCI_PXAV3 is not set +# CONFIG_MMC_SDHCI_PXAV2 is not set +CONFIG_MMC_MSM=y +CONFIG_MMC_MSM_SDC1_SUPPORT=y +CONFIG_MMC_MSM_SDC1_8_BIT_SUPPORT=y +# CONFIG_MMC_MSM_SDC2_SUPPORT is not set +# CONFIG_MMC_MSM_SDC3_SUPPORT is not set +# CONFIG_MMC_MSM_SDC3_POLLING is not set +# CONFIG_MMC_MSM_SDC4_SUPPORT is not set +# CONFIG_MMC_MSM_SDC5_SUPPORT is not set +CONFIG_MMC_MSM_SPS_SUPPORT=y +# CONFIG_MMC_DW is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_LEDS_MSM_PDM is not set +# CONFIG_LEDS_PMIC_MPP is not set +# CONFIG_LEDS_MSM_TRICOLOR is not set +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_CPLD is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_PCA955X is not set +CONFIG_LEDS_PM8XXX=y +# CONFIG_LEDS_PCA9633 is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_MSM_PMIC is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_RENESAS_TPU is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_OT200 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +# CONFIG_LEDS_TRIGGER_TIMER is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +# CONFIG_LEDS_TRIGGER_SLEEP is not set +CONFIG_LEDS_TRIGGER_THERMAL=y + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_SWITCH=y +# CONFIG_SWITCH_GPIO is not set +CONFIG_SWITCH_FSA8008=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_MSM is not set +# CONFIG_RTC_DRV_MSM7X00A is not set +CONFIG_RTC_DRV_PM8XXX=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set + +# +# Virtio drivers +# +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_STAGING=y +# CONFIG_USBIP_CORE is not set +# CONFIG_PRISM2_USB is not set +# CONFIG_ECHO is not set +# CONFIG_ASUS_OLED is not set +# CONFIG_RTLLIB is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5139 is not set +# CONFIG_TRANZPORT is not set +# CONFIG_LINE6_USB is not set +# CONFIG_USB_SERIAL_QUATECH2 is not set +# CONFIG_USB_SERIAL_QUATECH_USB2 is not set +# CONFIG_VT6656 is not set +# CONFIG_IIO is not set +CONFIG_QCACHE=y +# CONFIG_FB_SM7XX is not set +# CONFIG_USB_ENESTORAGE is not set +# CONFIG_BCM_WIMAX is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOGGER=m +CONFIG_ANDROID_PERSISTENT_RAM=y +CONFIG_ANDROID_RAM_CONSOLE=y +# CONFIG_PERSISTENT_TRACER is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y +# CONFIG_ANDROID_SWITCH is not set +# CONFIG_ANDROID_INTF_ALARM_DEV is not set +# CONFIG_PHONE is not set +# CONFIG_USB_WPAN_HCD is not set + +# +# Qualcomm Atheros Prima WLAN module +# +CONFIG_PRIMA_WLAN=y +# CONFIG_PRIMA_WLAN_BTAMP is not set +CONFIG_PRIMA_WLAN_LFR=y +CONFIG_PRIMA_WLAN_OKC=y +# CONFIG_PRIMA_WLAN_11AC_HIGH_TP is not set +# CONFIG_QCOM_VOWIFI_11R is not set + +# +# Qualcomm MSM specific device drivers +# +CONFIG_MSM_SSBI=y +CONFIG_SPS=y +# CONFIG_USB_BAM is not set +CONFIG_SPS_SUPPORT_BAMDMA=y +# CONFIG_SPS_SUPPORT_NDP_BAM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y + +# +# Hardware Spinlock drivers +# +CONFIG_IOMMU_SUPPORT=y +CONFIG_MSM_IOMMU=y +CONFIG_MSM_IOMMU_GPU_SYNC=y +CONFIG_IOMMU_PGTABLES_L2=y + +# +# Remoteproc drivers (EXPERIMENTAL) +# + +# +# Rpmsg drivers (EXPERIMENTAL) +# +# CONFIG_VIRT_DRIVERS is not set +# CONFIG_PM_DEVFREQ is not set +# CONFIG_MOBICORE_SUPPORT is not set +# CONFIG_CORESIGHT is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_TMPFS_XATTR is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +# CONFIG_CIFS_WEAK_PW_HASH is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1048 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHEDSTATS is not set +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_HIGHMEM is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_CPU_STALL_VERBOSE is not set +# CONFIG_RCU_CPU_STALL_INFO is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_LKDTM is not set +# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +# CONFIG_SCHED_TRACER is not set +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_CPU_FREQ_SWITCH_PROFILER is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +CONFIG_ARM_UNWIND=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_RODATA is not set +# CONFIG_DEBUG_LL is not set +CONFIG_PID_IN_CONTEXTIDR=y + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_NETWORK_XFRM is not set +# CONFIG_SECURITY_PATH is not set +CONFIG_LSM_MMAP_MIN_ADDR=4096 +CONFIG_SECURITY_SELINUX=y +# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set +# CONFIG_SECURITY_SELINUX_DISABLE is not set +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_IMA is not set +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_DEFAULT_SECURITY="selinux" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_HW is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_AUDIT_GENERIC=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_NLATTR=y +# CONFIG_AVERAGE is not set +# CONFIG_CORDIC is not set diff --git a/semaphore-build.sh b/semaphore-build.sh new file mode 100755 index 0000000000000..f17c284370f80 --- /dev/null +++ b/semaphore-build.sh @@ -0,0 +1,21 @@ +export ARCH=arm +unset CROSS_COMPILE +#export CROSS_COMPILE=/opt/gcc-4.8-linaro/bin/arm-cortex_a15-linux-gnueabi- +#export CROSS_COMPILE=/opt/gcc-4.7-linaro/bin/arm-cortex_a15-linux-gnueabi- +export CROSS_COMPILE=/opt/sabermod-gcc/bin/arm-eabi- + +#echo "Cleaning old craps..." +#make distclean + +export KBUILD_BUILD_USER=najmi +export KBUILD_BUILD_HOST="kampung-pandan" +export LOCALVERSION="-Semaphore-Pandan-kernel-" + +#echo "Copy backup config..." +#cp najmi-mako-config .config +make semaphore_mako_defconfig +make menuconfig +#echo "Begin compile..." +#make -j8 + + diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 0e99137db86a6..aec054a739fff 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -454,3 +454,16 @@ config SND_SOC_TPA2028D default n help Texas Instruments 3W Mono Class-D Audio Amplifier + +config SOUND_CONTROL_HAX_GPL + tristate "wcd93xx sound control hax" + default y + help + FauxSound WCD93xx chipset sound control hacks + +config SOUND_CONTROL_HAX_3_GPL + tristate "new wcd93xx sound control hax" + default y + help + FauxSound WCD93xx chipset sound control hacks 3.0 for deeper hax + diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index ec05d3c644da6..4286da06f394e 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -213,3 +213,7 @@ obj-$(CONFIG_SND_SOC_MSM_STUB) += snd-soc-msm-stub.o obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o obj-$(CONFIG_SND_SOC_TPA2028D) += tpa2028d.o + +obj-$(CONFIG_SOUND_CONTROL_HAX_GPL) += sound_control_gpl.o +obj-$(CONFIG_SOUND_CONTROL_HAX_3_GPL) += sound_control_3_gpl.o + diff --git a/sound/soc/codecs/sound_control_3_gpl.c b/sound/soc/codecs/sound_control_3_gpl.c new file mode 100644 index 0000000000000..5376620462cbd --- /dev/null +++ b/sound/soc/codecs/sound_control_3_gpl.c @@ -0,0 +1,433 @@ +/* + * Author: Paul Reioux aka Faux123 + * + * WCD93xx sound control module + * Copyright 2013 Paul Reioux + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#define SOUND_CONTROL_MAJOR_VERSION 3 +#define SOUND_CONTROL_MINOR_VERSION 2 + +#define REG_SZ 21 + +extern struct snd_soc_codec *fauxsound_codec_ptr; + +static int snd_ctrl_locked = 0; + +unsigned int tabla_read(struct snd_soc_codec *codec, unsigned int reg); +int tabla_write(struct snd_soc_codec *codec, unsigned int reg, + unsigned int value); + + +static unsigned int cached_regs[] = {6, 6, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0 }; + +static unsigned int *cache_select(unsigned int reg) +{ + unsigned int *out = NULL; + + switch (reg) { + case TABLA_A_RX_HPH_L_GAIN: + out = &cached_regs[0]; + break; + case TABLA_A_RX_HPH_R_GAIN: + out = &cached_regs[1]; + break; + case TABLA_A_CDC_RX1_VOL_CTL_B2_CTL: + out = &cached_regs[4]; + break; + case TABLA_A_CDC_RX2_VOL_CTL_B2_CTL: + out = &cached_regs[5]; + break; + case TABLA_A_CDC_RX3_VOL_CTL_B2_CTL: + out = &cached_regs[6]; + break; + case TABLA_A_CDC_RX4_VOL_CTL_B2_CTL: + out = &cached_regs[7]; + break; + case TABLA_A_CDC_RX5_VOL_CTL_B2_CTL: + out = &cached_regs[8]; + break; + case TABLA_A_CDC_RX6_VOL_CTL_B2_CTL: + out = &cached_regs[9]; + break; + case TABLA_A_CDC_RX7_VOL_CTL_B2_CTL: + out = &cached_regs[10]; + break; + case TABLA_A_CDC_TX1_VOL_CTL_GAIN: + out = &cached_regs[11]; + break; + case TABLA_A_CDC_TX2_VOL_CTL_GAIN: + out = &cached_regs[12]; + break; + case TABLA_A_CDC_TX3_VOL_CTL_GAIN: + out = &cached_regs[13]; + break; + case TABLA_A_CDC_TX4_VOL_CTL_GAIN: + out = &cached_regs[14]; + break; + case TABLA_A_CDC_TX5_VOL_CTL_GAIN: + out = &cached_regs[15]; + break; + case TABLA_A_CDC_TX6_VOL_CTL_GAIN: + out = &cached_regs[16]; + break; + case TABLA_A_CDC_TX7_VOL_CTL_GAIN: + out = &cached_regs[17]; + break; + case TABLA_A_CDC_TX8_VOL_CTL_GAIN: + out = &cached_regs[18]; + break; + case TABLA_A_CDC_TX9_VOL_CTL_GAIN: + out = &cached_regs[19]; + break; + case TABLA_A_CDC_TX10_VOL_CTL_GAIN: + out = &cached_regs[20]; + break; + } + return out; +} + +void snd_hax_cache_write(unsigned int reg, unsigned int value) +{ + unsigned int *tmp = cache_select(reg); + + if (tmp != NULL) + *tmp = value; +} +EXPORT_SYMBOL(snd_hax_cache_write); + +unsigned int snd_hax_cache_read(unsigned int reg) +{ + if (cache_select(reg) != NULL) + return *cache_select(reg); + else + return -1; +} +EXPORT_SYMBOL(snd_hax_cache_read); + +int snd_hax_reg_access(unsigned int reg) +{ + int ret = 1; + + switch (reg) { + case TABLA_A_RX_HPH_L_GAIN: + case TABLA_A_RX_HPH_R_GAIN: + case TABLA_A_RX_HPH_L_STATUS: + case TABLA_A_RX_HPH_R_STATUS: + case TABLA_A_CDC_RX1_VOL_CTL_B2_CTL: + case TABLA_A_CDC_RX2_VOL_CTL_B2_CTL: + case TABLA_A_CDC_RX3_VOL_CTL_B2_CTL: + case TABLA_A_CDC_RX4_VOL_CTL_B2_CTL: + case TABLA_A_CDC_RX5_VOL_CTL_B2_CTL: + case TABLA_A_CDC_RX6_VOL_CTL_B2_CTL: + case TABLA_A_CDC_RX7_VOL_CTL_B2_CTL: + case TABLA_A_CDC_TX1_VOL_CTL_GAIN: + case TABLA_A_CDC_TX2_VOL_CTL_GAIN: + case TABLA_A_CDC_TX3_VOL_CTL_GAIN: + case TABLA_A_CDC_TX4_VOL_CTL_GAIN: + case TABLA_A_CDC_TX5_VOL_CTL_GAIN: + case TABLA_A_CDC_TX6_VOL_CTL_GAIN: + case TABLA_A_CDC_TX7_VOL_CTL_GAIN: + case TABLA_A_CDC_TX8_VOL_CTL_GAIN: + case TABLA_A_CDC_TX9_VOL_CTL_GAIN: + case TABLA_A_CDC_TX10_VOL_CTL_GAIN: + if (snd_ctrl_locked) + ret = 0; + break; + default: + break; + } + return ret; +} +EXPORT_SYMBOL(snd_hax_reg_access); + +static bool calc_checksum(unsigned int a, unsigned int b, unsigned int c) +{ + unsigned char chksum = 0; + + chksum = ~((a & 0xff) + (b & 0xff)); + + if (chksum == (c & 0xff)) { + return true; + } else { + return false; + } +} + +static ssize_t cam_mic_gain_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", + tabla_read(fauxsound_codec_ptr, + TABLA_A_CDC_TX6_VOL_CTL_GAIN)); + +} + +static ssize_t cam_mic_gain_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + unsigned int lval, chksum; + + sscanf(buf, "%u %u", &lval, &chksum); + + if (calc_checksum(lval, 0, chksum)) { + tabla_write(fauxsound_codec_ptr, + TABLA_A_CDC_TX6_VOL_CTL_GAIN, lval); + } + return count; +} + +static ssize_t mic_gain_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", + tabla_read(fauxsound_codec_ptr, + TABLA_A_CDC_TX7_VOL_CTL_GAIN)); +} + +static ssize_t mic_gain_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + unsigned int lval, chksum; + + sscanf(buf, "%u %u", &lval, &chksum); + + if (calc_checksum(lval, 0, chksum)) { + tabla_write(fauxsound_codec_ptr, + TABLA_A_CDC_TX7_VOL_CTL_GAIN, lval); + } + return count; + +} + +static ssize_t speaker_gain_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u %u\n", + tabla_read(fauxsound_codec_ptr, + TABLA_A_CDC_RX3_VOL_CTL_B2_CTL), + tabla_read(fauxsound_codec_ptr, + TABLA_A_CDC_RX4_VOL_CTL_B2_CTL)); + +} + +static ssize_t speaker_gain_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + unsigned int lval, rval, chksum; + + sscanf(buf, "%u %u %u", &lval, &rval, &chksum); + + if (calc_checksum(lval, rval, chksum)) { + tabla_write(fauxsound_codec_ptr, + TABLA_A_CDC_RX3_VOL_CTL_B2_CTL, lval); + tabla_write(fauxsound_codec_ptr, + TABLA_A_CDC_RX4_VOL_CTL_B2_CTL, rval); + } + return count; +} + +static ssize_t headphone_gain_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u %u\n", + tabla_read(fauxsound_codec_ptr, + TABLA_A_CDC_RX1_VOL_CTL_B2_CTL), + tabla_read(fauxsound_codec_ptr, + TABLA_A_CDC_RX2_VOL_CTL_B2_CTL)); +} + +static ssize_t headphone_gain_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + unsigned int lval, rval, chksum; + + sscanf(buf, "%u %u %u", &lval, &rval, &chksum); + + if (calc_checksum(lval, rval, chksum)) { + tabla_write(fauxsound_codec_ptr, + TABLA_A_CDC_RX1_VOL_CTL_B2_CTL, lval); + tabla_write(fauxsound_codec_ptr, + TABLA_A_CDC_RX2_VOL_CTL_B2_CTL, rval); + } + return count; +} + +static ssize_t headphone_pa_gain_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u %u\n", + tabla_read(fauxsound_codec_ptr, TABLA_A_RX_HPH_L_GAIN), + tabla_read(fauxsound_codec_ptr, TABLA_A_RX_HPH_R_GAIN)); +} + +static ssize_t headphone_pa_gain_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + unsigned int lval, rval, chksum; + unsigned int gain, status; + unsigned int out; + + sscanf(buf, "%u %u %u", &lval, &rval, &chksum); + + if (calc_checksum(lval, rval, chksum)) { + gain = tabla_read(fauxsound_codec_ptr, TABLA_A_RX_HPH_L_GAIN); + out = (gain & 0xf0) | lval; + tabla_write(fauxsound_codec_ptr, TABLA_A_RX_HPH_L_GAIN, out); + + status = tabla_read(fauxsound_codec_ptr, TABLA_A_RX_HPH_L_STATUS); + out = (status & 0x0f) | (lval << 4); + tabla_write(fauxsound_codec_ptr, TABLA_A_RX_HPH_L_STATUS, out); + + gain = tabla_read(fauxsound_codec_ptr, TABLA_A_RX_HPH_R_GAIN); + out = (gain & 0xf0) | rval; + tabla_write(fauxsound_codec_ptr, TABLA_A_RX_HPH_R_GAIN, out); + + status = tabla_read(fauxsound_codec_ptr, TABLA_A_RX_HPH_R_STATUS); + out = (status & 0x0f) | (rval << 4); + tabla_write(fauxsound_codec_ptr, TABLA_A_RX_HPH_R_STATUS, out); + } + return count; +} + +static ssize_t sound_control_version_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "version: %u.%u\n", + SOUND_CONTROL_MAJOR_VERSION, + SOUND_CONTROL_MINOR_VERSION); +} + +static ssize_t sound_control_locked_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + int inp; + + sscanf(buf, "%d", &inp); + + if (inp == 0) + snd_ctrl_locked = 0; + else + snd_ctrl_locked = 1; + + return count; +} + +static ssize_t sound_control_locked_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", snd_ctrl_locked); +} + +static struct kobj_attribute cam_mic_gain_attribute = + __ATTR(gpl_cam_mic_gain, + 0666, + cam_mic_gain_show, + cam_mic_gain_store); + +static struct kobj_attribute mic_gain_attribute = + __ATTR(gpl_mic_gain, + 0666, + mic_gain_show, + mic_gain_store); + +static struct kobj_attribute speaker_gain_attribute = + __ATTR(gpl_speaker_gain, + 0666, + speaker_gain_show, + speaker_gain_store); + +static struct kobj_attribute headphone_gain_attribute = + __ATTR(gpl_headphone_gain, + 0666, + headphone_gain_show, + headphone_gain_store); + +static struct kobj_attribute headphone_pa_gain_attribute = + __ATTR(gpl_headphone_pa_gain, + 0666, + headphone_pa_gain_show, + headphone_pa_gain_store); + +static struct kobj_attribute sound_control_locked_attribute = + __ATTR(gpl_sound_control_locked, + 0666, + sound_control_locked_show, + sound_control_locked_store); + +static struct kobj_attribute sound_control_version_attribute = + __ATTR(gpl_sound_control_version, + 0444, + sound_control_version_show, NULL); + +static struct attribute *sound_control_attrs[] = + { + &cam_mic_gain_attribute.attr, + &mic_gain_attribute.attr, + &speaker_gain_attribute.attr, + &headphone_gain_attribute.attr, + &headphone_pa_gain_attribute.attr, + &sound_control_locked_attribute.attr, + &sound_control_version_attribute.attr, + NULL, + }; + +static struct attribute_group sound_control_attr_group = + { + .attrs = sound_control_attrs, + }; + +static struct kobject *sound_control_kobj; + +static int sound_control_init(void) +{ + int sysfs_result; + + sound_control_kobj = + kobject_create_and_add("sound_control_3", kernel_kobj); + + if (!sound_control_kobj) { + pr_err("%s sound_control_kobj create failed!\n", + __FUNCTION__); + return -ENOMEM; + } + + sysfs_result = sysfs_create_group(sound_control_kobj, + &sound_control_attr_group); + + if (sysfs_result) { + pr_info("%s sysfs create failed!\n", __FUNCTION__); + kobject_put(sound_control_kobj); + } + return sysfs_result; +} + +static void sound_control_exit(void) +{ + if (sound_control_kobj != NULL) + kobject_put(sound_control_kobj); +} + +module_init(sound_control_init); +module_exit(sound_control_exit); +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Paul Reioux "); +MODULE_DESCRIPTION("Sound Control Module 3.x"); + diff --git a/sound/soc/codecs/sound_control_gpl.c b/sound/soc/codecs/sound_control_gpl.c new file mode 100644 index 0000000000000..db50d0d752cb6 --- /dev/null +++ b/sound/soc/codecs/sound_control_gpl.c @@ -0,0 +1,325 @@ +/* + * Author: Paul Reioux aka Faux123 + * + * WCD93xx sound control module + * Copyright 2013 Paul Reioux + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include + +extern struct snd_kcontrol_new *gpl_faux_snd_controls_ptr; + +#define SOUND_CONTROL_MAJOR_VERSION 2 +#define SOUND_CONTROL_MINOR_VERSION 1 + +#define CAMCORDER_MIC_OFFSET 20 +#define HANDSET_MIC_OFFSET 21 +#define SPEAKER_OFFSET 10 +#define HEADPHONE_L_OFFSET 8 +#define HEADPHONE_R_OFFSET 9 + +#define HEADPHONE_PA_L_OFFSET 6 +#define HEADPHONE_PA_R_OFFSET 7 + +static ssize_t cam_mic_gain_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct soc_mixer_control *l_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[CAMCORDER_MIC_OFFSET]. + private_value; + + return sprintf(buf, "%d", l_mixer_ptr->max); +} + +static ssize_t cam_mic_gain_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + int l_max; + int l_delta; + struct soc_mixer_control *l_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[CAMCORDER_MIC_OFFSET]. + private_value; + + sscanf(buf, "%d", &l_max); + + // limit the max gain + l_delta = l_max - l_mixer_ptr->platform_max; + l_mixer_ptr->platform_max = l_max; + l_mixer_ptr->max = l_max; + l_mixer_ptr->min += l_delta; + + return (count); +} + +static ssize_t mic_gain_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct soc_mixer_control *l_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[HANDSET_MIC_OFFSET]. + private_value; + + return sprintf(buf, "%d", l_mixer_ptr->max); +} + +static ssize_t mic_gain_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + int l_max; + int l_delta; + struct soc_mixer_control *l_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[HANDSET_MIC_OFFSET]. + private_value; + + sscanf(buf, "%d", &l_max); + + l_delta = l_max - l_mixer_ptr->platform_max; + l_mixer_ptr->platform_max = l_max; + l_mixer_ptr->max = l_max; + l_mixer_ptr->min += l_delta; + + return (count); +} + +static ssize_t speaker_gain_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct soc_mixer_control *l_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[SPEAKER_OFFSET]. + private_value; + + return sprintf(buf, "%d", l_mixer_ptr->max); +} + +static ssize_t speaker_gain_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + int l_max; + int l_delta; + struct soc_mixer_control *l_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[SPEAKER_OFFSET]. + private_value; + + sscanf(buf, "%d", &l_max); + + l_delta = l_max - l_mixer_ptr->platform_max; + l_mixer_ptr->platform_max = l_max; + l_mixer_ptr->max = l_max; + l_mixer_ptr->min += l_delta; + + return (count); +} + +static ssize_t headphone_gain_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct soc_mixer_control *l_mixer_ptr, *r_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[HEADPHONE_L_OFFSET]. + private_value; + r_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[HEADPHONE_R_OFFSET]. + private_value; + + return sprintf(buf, "%d %d", + l_mixer_ptr->max, + r_mixer_ptr->max); +} + +static ssize_t headphone_gain_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + int l_max, r_max; + int l_delta, r_delta; + struct soc_mixer_control *l_mixer_ptr, *r_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[HEADPHONE_L_OFFSET]. + private_value; + r_mixer_ptr = + (struct soc_mixer_control *)gpl_faux_snd_controls_ptr[HEADPHONE_R_OFFSET]. + private_value; + + sscanf(buf, "%d %d", &l_max, &r_max); + + l_delta = l_max - l_mixer_ptr->platform_max; + l_mixer_ptr->platform_max = l_max; + l_mixer_ptr->max = l_max; + l_mixer_ptr->min += l_delta; + + r_delta = r_max - r_mixer_ptr->platform_max; + r_mixer_ptr->platform_max = r_max; + r_mixer_ptr->max = r_max; + r_mixer_ptr->min += r_delta; + + return count; +} + +static ssize_t headphone_pa_gain_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct soc_mixer_control *l_mixer_ptr, *r_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *) + gpl_faux_snd_controls_ptr[HEADPHONE_PA_L_OFFSET]. + private_value; + r_mixer_ptr = + (struct soc_mixer_control *) + gpl_faux_snd_controls_ptr[HEADPHONE_PA_R_OFFSET]. + private_value; + + return sprintf(buf, "%d %d", + l_mixer_ptr->max, + r_mixer_ptr->max); +} + +static ssize_t headphone_pa_gain_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + int l_max, r_max; + int l_delta, r_delta; + struct soc_mixer_control *l_mixer_ptr, *r_mixer_ptr; + + l_mixer_ptr = + (struct soc_mixer_control *) + gpl_faux_snd_controls_ptr[HEADPHONE_PA_L_OFFSET]. + private_value; + r_mixer_ptr = + (struct soc_mixer_control *) + gpl_faux_snd_controls_ptr[HEADPHONE_PA_R_OFFSET]. + private_value; + + sscanf(buf, "%d %d", &l_max, &r_max); + + l_delta = l_max - l_mixer_ptr->platform_max; + l_mixer_ptr->platform_max = l_max; + l_mixer_ptr->max = l_max; + l_mixer_ptr->min += l_delta; + + r_delta = r_max - r_mixer_ptr->platform_max; + r_mixer_ptr->platform_max = r_max; + r_mixer_ptr->max = r_max; + r_mixer_ptr->min += r_delta; + + return count; +} + +static ssize_t sound_control_version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "version: %u.%u\n", + SOUND_CONTROL_MAJOR_VERSION, + SOUND_CONTROL_MINOR_VERSION); +} + +static struct kobj_attribute cam_mic_gain_attribute = + __ATTR(gpl_cam_mic_gain, + 0666, + cam_mic_gain_show, + cam_mic_gain_store); + +static struct kobj_attribute mic_gain_attribute = + __ATTR(gpl_mic_gain, + 0666, + mic_gain_show, + mic_gain_store); + +static struct kobj_attribute speaker_gain_attribute = + __ATTR(gpl_speaker_gain, + 0666, + speaker_gain_show, + speaker_gain_store); + +static struct kobj_attribute headphone_gain_attribute = + __ATTR(gpl_headphone_gain, + 0666, + headphone_gain_show, + headphone_gain_store); + +static struct kobj_attribute headphone_pa_gain_attribute = + __ATTR(gpl_headphone_pa_gain, + 0666, + headphone_pa_gain_show, + headphone_pa_gain_store); + +static struct kobj_attribute sound_control_version_attribute = + __ATTR(gpl_sound_control_version, + 0444, + sound_control_version_show, NULL); + +static struct attribute *sound_control_attrs[] = + { + &cam_mic_gain_attribute.attr, + &mic_gain_attribute.attr, + &speaker_gain_attribute.attr, + &headphone_gain_attribute.attr, + &headphone_pa_gain_attribute.attr, + &sound_control_version_attribute.attr, + NULL, + }; + +static struct attribute_group sound_control_attr_group = + { + .attrs = sound_control_attrs, + }; + +static struct kobject *sound_control_kobj; + +static int sound_control_init(void) +{ + int sysfs_result; + + if (gpl_faux_snd_controls_ptr == NULL) { + pr_err("%s sound_controls_ptr is NULL!\n", __FUNCTION__); + return -1; + } + + sound_control_kobj = + kobject_create_and_add("sound_control", kernel_kobj); + + if (!sound_control_kobj) { + pr_err("%s sound_control_kobj create failed!\n", + __FUNCTION__); + return -ENOMEM; + } + + sysfs_result = sysfs_create_group(sound_control_kobj, + &sound_control_attr_group); + + if (sysfs_result) { + pr_info("%s sysfs create failed!\n", __FUNCTION__); + kobject_put(sound_control_kobj); + } + return sysfs_result; +} + +static void sound_control_exit(void) +{ + if (sound_control_kobj != NULL) + kobject_put(sound_control_kobj); +} + +module_init(sound_control_init); +module_exit(sound_control_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Paul Reioux "); +MODULE_DESCRIPTION("Sound Control Module GPL Edition"); + diff --git a/sound/soc/codecs/tpa2028d.c b/sound/soc/codecs/tpa2028d.c index 38c8c962c37f8..f32b33aafc7a5 100644 --- a/sound/soc/codecs/tpa2028d.c +++ b/sound/soc/codecs/tpa2028d.c @@ -100,17 +100,27 @@ int tpa2028d_poweron(void) int fail = 0; char agc_compression_rate = amp_data->pdata->agc_compression_rate; char agc_output_limiter_disable = amp_data->pdata->agc_output_limiter_disable; - char agc_fixed_gain = amp_data->pdata->agc_fixed_gain; - - agc_output_limiter_disable = (agc_output_limiter_disable<<7); + char agc_fixed_gain = amp_data->pdata->agc_fixed_gain & 0x3F; + char ATK_time = amp_data->pdata->ATK_time & 0x3F; + char REL_time = amp_data->pdata->REL_time & 0x3F; + char Hold_time = amp_data->pdata->Hold_time & 0x3F; + char Output_limit_level = amp_data->pdata->Output_limit_level & 0x1F; + char Noise_Gate_Threshold + = (amp_data->pdata->Noise_Gate_Threshold & 0x03) << 5; + char AGC_Max_Gain = (amp_data->pdata->AGC_Max_Gain & 0x0F) << 4; + agc_output_limiter_disable = (agc_output_limiter_disable << 7); fail |= WriteI2C(IC_CONTROL, 0xE3); /*Tuen On*/ - fail |= WriteI2C(AGC_ATTACK_CONTROL, 0x05); /*Tuen On*/ - fail |= WriteI2C(AGC_RELEASE_CONTROL, 0x0B); /*Tuen On*/ - fail |= WriteI2C(AGC_HOLD_TIME_CONTROL, 0x00); /*Tuen On*/ + fail |= WriteI2C(AGC_ATTACK_CONTROL, ATK_time); /*Tuen On*/ + fail |= WriteI2C(AGC_RELEASE_CONTROL, REL_time); /*Tuen On*/ + fail |= WriteI2C(AGC_HOLD_TIME_CONTROL, Hold_time); /*Tuen On*/ fail |= WriteI2C(AGC_FIXED_GAIN_CONTROL, agc_fixed_gain); /*Tuen On*/ - fail |= WriteI2C(AGC1_CONTROL, 0x3A|agc_output_limiter_disable); /*Tuen On*/ - fail |= WriteI2C(AGC2_CONTROL, 0xC0|agc_compression_rate); /*Tuen On*/ + fail |= WriteI2C(AGC1_CONTROL, + Noise_Gate_Threshold| + Output_limit_level| + agc_output_limiter_disable); /*Tuen On*/ + fail |= WriteI2C(AGC2_CONTROL, + AGC_Max_Gain|agc_compression_rate); /*Tuen On*/ fail |= WriteI2C(IC_CONTROL, 0xC3); /*Tuen On*/ return fail; @@ -235,10 +245,172 @@ tpa2028d_fixed_gain_show(struct device *dev, struct device_attribute *attr, ch return sprintf(buf, "fixed_gain : %x, pdata->agc_fixed_gain : %d\n", val, pdata->agc_fixed_gain); } +static ssize_t +tpa2028d_ATK_time_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + int val; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + pdata->ATK_time = val; + return count; +} + +static ssize_t +tpa2028d_ATK_time_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + char val=0; + + ReadI2C(AGC_ATTACK_CONTROL, &val); + + D("[tpa2028d_ATK_time_show] val : %x \n",val); + + return sprintf(buf, "ATK_time : %x, pdata->ATK_time : %d\n", val, pdata->ATK_time); +} + +static ssize_t +tpa2028d_REL_time_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + int val; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + pdata->REL_time = val; + return count; +} + +static ssize_t +tpa2028d_REL_time_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + char val=0; + + ReadI2C(AGC_RELEASE_CONTROL, &val); + + D("[tpa2028d_REL_time_show] val : %x \n",val); + + return sprintf(buf, "REL_time : %x, pdata->REL_time : %d\n", val, pdata->REL_time); +} + +static ssize_t +tpa2028d_Hold_time_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + int val; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + pdata->Hold_time = val; + return count; +} + +static ssize_t +tpa2028d_Hold_time_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + char val=0; + + ReadI2C(AGC_HOLD_TIME_CONTROL, &val); + + D("[tpa2028d_Hold_time_show] val : %x \n",val); + + return sprintf(buf, "Hold_time : %x, pdata->Hold_time : %d\n", val, pdata->Hold_time); +} + +static ssize_t +tpa2028d_Output_limit_level_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + int val; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + pdata->Output_limit_level = val; + return count; +} + +static ssize_t +tpa2028d_Output_limit_level_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + char val=0; + + ReadI2C(AGC1_CONTROL, &val); + + D("[tpa2028d_Output_limit_level_show] val : %x \n",val); + + return sprintf(buf, "Output_limit_level : %x, pdata->Output_limit_level : %d\n", val, pdata->Output_limit_level); +} + +static ssize_t +tpa2028d_Noise_Gate_Threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + int val; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + pdata->Noise_Gate_Threshold = val; + return count; +} + +static ssize_t +tpa2028d_Noise_Gate_Threshold_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + char val=0; + + ReadI2C(AGC1_CONTROL, &val); + + D("[tpa2028d_Noise_Gate_Threshold_show] val : %x \n",val); + + return sprintf(buf, "Noise_Gate_Threshold : %x, pdata->Noise_Gate_Threshold : %d\n", val, pdata->Noise_Gate_Threshold); +} + +static ssize_t +tpa2028d_AGC_Max_Gain_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + int val; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + pdata->AGC_Max_Gain = val; + return count; +} + +static ssize_t +tpa2028d_AGC_Max_Gain_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct audio_amp_platform_data *pdata = amp_data->pdata; + char val=0; + + ReadI2C(AGC2_CONTROL, &val); + + D("[tpa2028d_AGC_Max_Gain_show] val : %x \n",val); + + return sprintf(buf, "AGC_Max_Gain : %x, pdata->AGC_Max_Gain : %d\n", val, pdata->AGC_Max_Gain); +} + static struct device_attribute tpa2028d_device_attrs[] = { __ATTR(comp_rate, S_IRUGO | S_IWUSR, tpa2028d_comp_rate_show, tpa2028d_comp_rate_store), __ATTR(out_lim, S_IRUGO | S_IWUSR, tpa2028d_out_lim_show, tpa2028d_out_lim_store), __ATTR(fixed_gain, S_IRUGO | S_IWUSR, tpa2028d_fixed_gain_show, tpa2028d_fixed_gain_store), + __ATTR(ATK_time, S_IRUGO | S_IWUSR, tpa2028d_ATK_time_show, tpa2028d_ATK_time_store), + __ATTR(REL_time, S_IRUGO | S_IWUSR, tpa2028d_REL_time_show, tpa2028d_REL_time_store), + __ATTR(Hold_time, S_IRUGO | S_IWUSR, tpa2028d_Hold_time_show, tpa2028d_Hold_time_store), + __ATTR(Output_limit_level, S_IRUGO | S_IWUSR, tpa2028d_Output_limit_level_show, tpa2028d_Output_limit_level_store), + __ATTR(Noise_Gate_Threshold, S_IRUGO | S_IWUSR, tpa2028d_Noise_Gate_Threshold_show, tpa2028d_Noise_Gate_Threshold_store), + __ATTR(AGC_Max_Gain, S_IRUGO | S_IWUSR, tpa2028d_AGC_Max_Gain_show, tpa2028d_AGC_Max_Gain_store), }; static int tpa2028d_amp_probe(struct i2c_client *client, diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c index cb403711c6448..01bcef8517c0a 100644 --- a/sound/soc/codecs/wcd9310.c +++ b/sound/soc/codecs/wcd9310.c @@ -3836,6 +3836,7 @@ static int tabla_readable(struct snd_soc_codec *ssc, unsigned int reg) return tabla_reg_readable[reg]; } + static bool tabla_is_digital_gain_register(unsigned int reg) { bool rtn = false; @@ -3904,7 +3905,10 @@ static int tabla_volatile(struct snd_soc_codec *ssc, unsigned int reg) } #define TABLA_FORMATS (SNDRV_PCM_FMTBIT_S16_LE) -static int tabla_write(struct snd_soc_codec *codec, unsigned int reg, +#ifndef CONFIG_SOUND_CONTROL_HAX_GPL +static +#endif +int tabla_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { int ret; @@ -3919,7 +3923,14 @@ static int tabla_write(struct snd_soc_codec *codec, unsigned int reg, return wcd9xxx_reg_write(codec->control_data, reg, value); } -static unsigned int tabla_read(struct snd_soc_codec *codec, +#ifdef CONFIG_SOUND_CONTROL_HAX_GPL +EXPORT_SYMBOL(tabla_write); +#endif + +#ifndef CONFIG_SOUND_CONTROL_HAX_GPL +static +#endif +unsigned int tabla_read(struct snd_soc_codec *codec, unsigned int reg) { unsigned int val; @@ -3940,6 +3951,9 @@ static unsigned int tabla_read(struct snd_soc_codec *codec, val = wcd9xxx_reg_read(codec->control_data, reg); return val; } +#ifdef CONFIG_SOUND_CONTROL_HAX_GPL +EXPORT_SYMBOL(tabla_read); +#endif static s16 tabla_get_current_v_ins(struct tabla_priv *tabla, bool hu) { @@ -8361,6 +8375,13 @@ static const struct file_operations codec_mbhc_debug_ops = { }; #endif +#ifdef CONFIG_SOUND_CONTROL_HAX_GPL +struct snd_kcontrol_new *gpl_faux_snd_controls_ptr = + (struct snd_kcontrol_new *)tabla_snd_controls; +struct snd_soc_codec *fauxsound_codec_ptr; +EXPORT_SYMBOL(fauxsound_codec_ptr); +#endif + static int tabla_codec_probe(struct snd_soc_codec *codec) { struct wcd9xxx *control; @@ -8370,10 +8391,16 @@ static int tabla_codec_probe(struct snd_soc_codec *codec) int i; int ch_cnt; +#ifdef CONFIG_SOUND_CONTROL_HAX_GPL + pr_info("tabla codec probe...\n"); + fauxsound_codec_ptr = codec; +#endif + codec->control_data = dev_get_drvdata(codec->dev->parent); control = codec->control_data; tabla = kzalloc(sizeof(struct tabla_priv), GFP_KERNEL); + if (!tabla) { dev_err(codec->dev, "Failed to allocate private data\n"); return -ENOMEM;